aboutsummaryrefslogtreecommitdiff
path: root/src/vendorcode/cavium
diff options
context:
space:
mode:
authorDavid Hendricks <dhendricks@fb.com>2018-03-09 13:58:27 -0800
committerPatrick Georgi <pgeorgi@google.com>2018-04-06 06:48:11 +0000
commit2004b93aed993aa02bbc588b8d82c22418ac52ec (patch)
treecdd5e95a154e2e0139474288262835a7f5847665 /src/vendorcode/cavium
parent71cbd71eb5c0e8e13b25b5d5dd2f495e7d2967eb (diff)
soc/cavium: import raw BDK sources
This imports common BDK sources that will be used in subsequent patches. The BDK is licensed under BSD and will be reduced in size and optimized to compile under coreboot. Change-Id: Icb32ee670d9fa9e5c10f9abb298cebf616fa67ad Signed-off-by: David Hendricks <dhendricks@fb.com> Reviewed-on: https://review.coreboot.org/25524 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: David Hendricks <david.hendricks@gmail.com>
Diffstat (limited to 'src/vendorcode/cavium')
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c376
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c927
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c91
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-arch/bdk-platform.c59
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c81
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-watchdog.c108
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c183
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c163
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c213
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c115
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c252
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c103
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c829
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c860
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c124
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c221
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c1946
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c197
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c270
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-twsi.c318
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c561
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-os/bdk-thread.c384
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c286
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-csr.h86
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-env.c83
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-env.h48
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-gpio.h46
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c8535
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h97
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-internal.h201
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-l2c.c69
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-l2c.h45
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-print.h86
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-spd.c583
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-spd.h166
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c2012
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-util.h96
-rw-r--r--src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c2165
-rw-r--r--src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.h124
-rw-r--r--src/vendorcode/cavium/bdk/libdram/libdram-config-load.c262
-rw-r--r--src/vendorcode/cavium/bdk/libdram/libdram.c718
-rw-r--r--src/vendorcode/cavium/include/bdk/bdk.h80
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h85
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-asm.h94
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h324
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ap.h34851
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-fus.h643
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-fusf.h939
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gpio.h2995
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gti.h5352
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c.h2637
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c_cbc.h1282
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c_tad.h2749
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h21359
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmt.h150
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_boot.h537
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h2365
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_tws.h1682
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ocla.h2756
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ocx.h4951
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h4727
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pem.h14736
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rnm.h738
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h6117
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sgp.h975
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-uaa.h2149
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h117
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h100
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h170
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h139
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-platform.h82
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h107
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-swap.h130
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-version.h59
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h104
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-bist/bist.h43
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-bist/efuse-read.h41
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-bgx.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-ccpi.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-dram.h60
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-gpio.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-info.h86
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-mdio.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-pcie.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-qlm.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h94
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-twsi.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h69
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-image.h105
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h81
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-xmodem.h59
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h118
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h198
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-driver/bdk-driver-sgpio.h153
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-driver/bdk-driver.h71
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access-native.h155
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h133
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h541
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h105
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h357
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-crc.h53
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h97
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-error-report.h62
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-fpa.h162
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h111
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-hal.h98
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-key.h86
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h179
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h476
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mmc.h89
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mpi.h105
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nic.h107
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nix.h105
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pbus-flash.h111
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie-flash.h109
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h236
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pki.h83
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pko.h126
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-power-burn.h67
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h508
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h79
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h66
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sata.h163
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h146
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sso.h69
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-tns.h109
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h102
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h109
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h206
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h66
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h259
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h194
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h122
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-trust/bdk-signed.h94
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-trust/bdk-trust.h136
-rw-r--r--src/vendorcode/cavium/include/bdk/libdram/libdram-config.h262
-rw-r--r--src/vendorcode/cavium/include/bdk/libdram/libdram.h51
139 files changed, 148528 insertions, 0 deletions
diff --git a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c
new file mode 100644
index 0000000000..981ad231dc
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c
@@ -0,0 +1,376 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <stdio.h>
+#include "libbdk-arch/bdk-csrs-pccpf.h"
+#include "libbdk-arch/bdk-csrs-pem.h"
+
+#ifndef BDK_BUILD_HOST
+
+/**
+ * Read a slow CSR, not RSL or NCB.
+ *
+ * @param type Bus type the CSR is on
+ * @param busnum Bus number the CSR is on
+ * @param size Width of the CSR in bytes
+ * @param address The address of the CSR
+ *
+ * @return The value of the CSR
+ */
+uint64_t __bdk_csr_read_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address)
+{
+ switch (type)
+ {
+ case BDK_CSR_TYPE_DAB:
+ case BDK_CSR_TYPE_DAB32b:
+ case BDK_CSR_TYPE_NCB:
+ case BDK_CSR_TYPE_NCB32b:
+ case BDK_CSR_TYPE_PEXP_NCB:
+ case BDK_CSR_TYPE_RSL:
+ case BDK_CSR_TYPE_RSL32b:
+ case BDK_CSR_TYPE_RVU_PF_BAR0:
+ case BDK_CSR_TYPE_RVU_PF_BAR2:
+ case BDK_CSR_TYPE_RVU_PFVF_BAR2:
+ case BDK_CSR_TYPE_RVU_VF_BAR2:
+ /* Handled by inline code, we should never get here */
+ bdk_error("%s: Passed type that should be handled inline\n", __FUNCTION__);
+ break;
+
+ case BDK_CSR_TYPE_PCCBR:
+ case BDK_CSR_TYPE_PCCPF:
+ case BDK_CSR_TYPE_PCCVF:
+ case BDK_CSR_TYPE_PEXP:
+ case BDK_CSR_TYPE_MDSB:
+ case BDK_CSR_TYPE_PCICONFIGEP_SHADOW:
+ case BDK_CSR_TYPE_PCICONFIGEPVF:
+ bdk_error("%s: Register not supported\n", __FUNCTION__);
+ break;
+
+ case BDK_CSR_TYPE_SYSREG:
+ return bdk_sysreg_read(node, bdk_get_core_num(), address);
+
+ case BDK_CSR_TYPE_PCICONFIGRC:
+ {
+ /* Don't allow PCIe register access if PCIe wasn't linked in */
+ if (!bdk_pcie_config_read32)
+ bdk_fatal("PCIe CSR access not supported when PCIe not linked in\n");
+ union bdk_pcc_dev_con_s dev_con;
+ switch (busnum)
+ {
+ case 0:
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC0_CN88XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC0_CN83XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC0_CN81XX;
+ else
+ bdk_fatal("Update PCICONFIG in %s\n", __FUNCTION__);
+ break;
+ case 1:
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC1_CN88XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC1_CN83XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC1_CN81XX;
+ else
+ bdk_fatal("Update PCICONFIG in %s\n", __FUNCTION__);
+ break;
+ case 2:
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC2_CN88XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC2_CN83XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC2_CN81XX;
+ else
+ bdk_fatal("Update PCICONFIG in %s\n", __FUNCTION__);
+ break;
+ case 3:
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC3_CN88XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC3_CN83XX;
+ else
+ bdk_fatal("Update PCICONFIG in %s\n", __FUNCTION__);
+ break;
+ case 4:
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC4;
+ break;
+ case 5:
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC5;
+ break;
+ default:
+ bdk_error("%s: Illegal PCIe bus number\n", __FUNCTION__);
+ return -1;
+ }
+ return bdk_pcie_config_read32(node, 100 + dev_con.cn8.ecam, dev_con.s.bus, dev_con.s.func >> 3, dev_con.s.func & 7, address);
+ }
+ case BDK_CSR_TYPE_PCICONFIGEP:
+ {
+ BDK_CSR_DEFINE(cfg_rd, BDK_PEMX_CFG_RD(busnum));
+ cfg_rd.u = 0;
+ cfg_rd.s.addr = address;
+ BDK_CSR_WRITE(node, BDK_PEMX_CFG_RD(busnum), cfg_rd.u);
+ cfg_rd.u = BDK_CSR_READ(node, BDK_PEMX_CFG_RD(busnum));
+ return cfg_rd.s.data;
+ }
+ }
+ return -1; /* Return -1 as this looks invalid in register dumps. Zero is too common as a good value */
+}
+
+
+/**
+ * Write a value to a slow CSR, not RSL or NCB.
+ *
+ * @param type Bus type the CSR is on
+ * @param busnum Bus number the CSR is on
+ * @param size Width of the CSR in bytes
+ * @param address The address of the CSR
+ * @param value Value to write to the CSR
+ */
+void __bdk_csr_write_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value)
+{
+ switch (type)
+ {
+ case BDK_CSR_TYPE_DAB:
+ case BDK_CSR_TYPE_DAB32b:
+ case BDK_CSR_TYPE_NCB:
+ case BDK_CSR_TYPE_NCB32b:
+ case BDK_CSR_TYPE_PEXP_NCB:
+ case BDK_CSR_TYPE_RSL:
+ case BDK_CSR_TYPE_RSL32b:
+ case BDK_CSR_TYPE_RVU_PF_BAR0:
+ case BDK_CSR_TYPE_RVU_PF_BAR2:
+ case BDK_CSR_TYPE_RVU_PFVF_BAR2:
+ case BDK_CSR_TYPE_RVU_VF_BAR2:
+ /* Handled by inline code, we should never get here */
+ bdk_error("%s: Passed type that should be handled inline\n", __FUNCTION__);
+ break;
+
+ case BDK_CSR_TYPE_PCCBR:
+ case BDK_CSR_TYPE_PCCPF:
+ case BDK_CSR_TYPE_PCCVF:
+ case BDK_CSR_TYPE_PEXP:
+ case BDK_CSR_TYPE_MDSB:
+ case BDK_CSR_TYPE_PCICONFIGEP_SHADOW:
+ case BDK_CSR_TYPE_PCICONFIGEPVF:
+ bdk_error("%s: Register not supported\n", __FUNCTION__);
+ break;
+
+ case BDK_CSR_TYPE_SYSREG:
+ bdk_sysreg_write(node, bdk_get_core_num(), address, value);
+ break;
+
+ case BDK_CSR_TYPE_PCICONFIGRC:
+ {
+ /* Don't allow PCIe register access if PCIe wasn't linked in */
+ if (!bdk_pcie_config_write32)
+ bdk_fatal("PCIe CSR access not supported when PCIe not linked in\n");
+ union bdk_pcc_dev_con_s dev_con;
+ switch (busnum)
+ {
+ case 0:
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC0_CN88XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC0_CN83XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC0_CN81XX;
+ else
+ bdk_fatal("Update PCICONFIG in %s\n", __FUNCTION__);
+ break;
+ case 1:
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC1_CN88XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC1_CN83XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC1_CN81XX;
+ else
+ bdk_fatal("Update PCICONFIG in %s\n", __FUNCTION__);
+ break;
+ case 2:
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC2_CN88XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC2_CN83XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC2_CN81XX;
+ else
+ bdk_fatal("Update PCICONFIG in %s\n", __FUNCTION__);
+ break;
+ case 3:
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC3_CN88XX;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC3_CN83XX;
+ else
+ bdk_fatal("Update PCICONFIG in %s\n", __FUNCTION__);
+ break;
+ case 4:
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC4;
+ break;
+ case 5:
+ dev_con.u = BDK_PCC_DEV_CON_E_PCIERC5;
+ break;
+ default:
+ bdk_error("%s: Illegal PCIe bus number\n", __FUNCTION__);
+ return;
+ }
+ bdk_pcie_config_write32(node, 100 + dev_con.cn8.ecam, dev_con.s.bus, dev_con.s.func >> 3, dev_con.s.func & 7, address, value);
+ break;
+ }
+ case BDK_CSR_TYPE_PCICONFIGEP:
+ {
+ BDK_CSR_DEFINE(cfg_wr, BDK_PEMX_CFG_WR(busnum));
+ cfg_wr.u = 0;
+ cfg_wr.s.addr = address;
+ cfg_wr.s.data = value;
+ BDK_CSR_WRITE(node, BDK_PEMX_CFG_WR(busnum), cfg_wr.u);
+ break;
+ }
+ }
+}
+
+#endif
+
+void __bdk_csr_fatal(const char *name, int num_args, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4)
+{
+ switch (num_args)
+ {
+ case 0:
+ bdk_fatal("%s is invalid on this chip\n", name);
+ case 1:
+ bdk_fatal("%s(%lu) is invalid on this chip\n", name, arg1);
+ case 2:
+ bdk_fatal("%s(%lu,%lu) is invalid on this chip\n", name, arg1, arg2);
+ case 3:
+ bdk_fatal("%s(%lu,%lu,%lu) is invalid on this chip\n", name, arg1, arg2, arg3);
+ default:
+ bdk_fatal("%s(%lu,%lu,%lu,%lu) is invalid on this chip\n", name, arg1, arg2, arg3, arg4);
+ }
+}
+
+/**
+ * Read a core system register from a different node or core
+ *
+ * @param node Node to read from
+ * @param core Core to read
+ * @param regnum Register to read in MRS encoding
+ *
+ * @return Register value
+ */
+uint64_t bdk_sysreg_read(int node, int core, uint64_t regnum)
+{
+ BDK_CSR_INIT(pp_reset, node, BDK_RST_PP_RESET);
+ if (pp_reset.u & (1ull<<core))
+ {
+ bdk_error("Attempt to read system register for core in reset\n");
+ return -1;
+ }
+
+ /* Addresses indicate selects as follows:
+ select 3,4,14,2,3
+ == 0x03040e020300
+ | | | | |^--- 1 if is E2H duplicated register
+ | | | |^^-- fifth select
+ | | |^^-- fourth select
+ | |^^-- third select
+ |^^-- second select
+ ^^-- first select */
+ uint64_t first = (regnum >> 40) & 0xff;
+ uint64_t second = (regnum >> 32) & 0xff;
+ uint64_t third = (regnum >> 24) & 0xff;
+ uint64_t fourth = (regnum >> 16) & 0xff;
+ uint64_t fifth = (regnum >> 8) & 0xff;
+ uint64_t regid = ((first & 3) << 14) | (second << 11) | (third << 7) | (fourth << 3) | fifth;
+
+ /* Note this requires DAP_IMP_DAR[caben] = 1 */
+ uint64_t address = 1ull<<47;
+ address |= 0x7Bull << 36;
+ address |= core << 19;
+ address |= regid << 3;
+ address = bdk_numa_get_address(node, address);
+ return bdk_read64_uint64(address);
+}
+
+/**
+ * Write a system register for a different node or core
+ *
+ * @param node Node to write too
+ * @param core Core to write
+ * @param regnum Register to write in MSR encoding
+ * @param value Value to write
+ */
+void bdk_sysreg_write(int node, int core, uint64_t regnum, uint64_t value)
+{
+ BDK_CSR_INIT(pp_reset, node, BDK_RST_PP_RESET);
+ if (pp_reset.u & (1ull<<core))
+ {
+ bdk_error("Attempt to write system register for core in reset\n");
+ return;
+ }
+
+ /* Addresses indicate selects as follows:
+ select 3,4,14,2,3
+ == 0x03040e020300
+ | | | | |^--- 1 if is E2H duplicated register
+ | | | |^^-- fifth select
+ | | |^^-- fourth select
+ | |^^-- third select
+ |^^-- second select
+ ^^-- first select */
+ uint64_t first = (regnum >> 40) & 0xff;
+ uint64_t second = (regnum >> 32) & 0xff;
+ uint64_t third = (regnum >> 24) & 0xff;
+ uint64_t fourth = (regnum >> 16) & 0xff;
+ uint64_t fifth = (regnum >> 8) & 0xff;
+ uint64_t regid = ((first & 3) << 14) | (second << 11) | (third << 7) | (fourth << 3) | fifth;
+
+ /* Note this requires DAP_IMP_DAR[caben] = 1 */
+ uint64_t address = 1ull<<47;
+ address |= 0x7Bull << 36;
+ address |= core << 19;
+ address |= regid << 3;
+ address = bdk_numa_get_address(node, address);
+ bdk_write64_uint64(address, value);
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c
new file mode 100644
index 0000000000..f2b4a0c803
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c
@@ -0,0 +1,927 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-ap.h"
+#include "libbdk-arch/bdk-csrs-mio_fus.h"
+#include "libbdk-arch/bdk-csrs-fus.h"
+#include "libbdk-arch/bdk-csrs-fusf.h"
+
+/*
+ Format of a SKU
+ CN8890-2000BG2601-AAP-G
+ CN8890-2000BG2601-AAP-PR-Y-G
+ CN XX XX X - XXX BG XXX - XX (- XX) (- X) - G
+ | | | | | | | | | | ^ RoHS Option, G=RoHS 6/6
+ | | | | | | | | | ^ Product Revision, blank for pass 1, Y=pass 2, W=pass 3, V=pass 4
+ | | | | | | | | ^ Product Phase, blank=production, PR=Prototype, ES=Engineering Sample
+ | | | | | | | ^ Marketing Segment Option (SC, SNT, etc)
+ | | | | | | ^ Number of balls on the package
+ | | | | | ^ Ball Grid Array
+ | | | | ^ Frequency in Mhz, 3 or 4 digits (300 - 2000)
+ | | | ^ Optional Customer Code, blank or A-Z
+ | | ^ Number of cores, see table below
+ | ^ Processor family, plus or minus for L2 sizes and such (88, 86, 83, 81, 80)
+ ^ Cavium Prefix, sometimes changed for customer specific parts
+
+ Table of Core to Model encoding
+ >= 48 shows xx90
+ >= 44 shows xx88
+ >= 42 shows xx85
+ >= 32 shows xx80
+ >= 24 shows xx70
+ >= 20 shows xx65
+ >= 16 shows xx60
+ = 15 shows xx58
+ = 14 shows xx55
+ = 13 shows xx52
+ = 12 shows xx50
+ = 11 shows xx48
+ = 10 shows xx45
+ = 9 shows xx42
+ = 8 shows xx40
+ = 7 shows xx38
+ = 6 shows xx34
+ = 5 shows xx32
+ = 4 shows xx30
+ = 3 shows xx25
+ = 2 shows xx20
+ = 1 shows xx10
+*/
+
+/* Definition of each SKU table entry for the different dies */
+typedef struct
+{
+ uint8_t fuse_index; /* Index programmed into PNAME fuses to match this entry. Must never change once fused parts ship */
+ const char prefix[4]; /* Prefix before model number, usually "CN". Third letter is customer code shown after the model */
+ uint8_t model_base; /* First two digits of the model number */
+ uint16_t num_balls; /* Number of balls on package, included in SKU */
+ const char segment[4]; /* Market segment SKU is for, 2-3 character string */
+ uint16_t fuses[12]; /* List of fuses required for operation of this SKU */
+} model_sku_info_t;
+
+/* In the model_sku_info_t.fuses[] array, we use a special value
+ FUSES_CHECK_FUSF to represent that we need to check FUSF_CTL bit
+ 6, checking for trusted boot */
+#define FUSES_CHECK_FUSF 0xffff
+
+/***************************************************/
+/* SKU table for t88 */
+/* From "Thunder Part Number fuse overview Rev 16.xlsx" */
+/***************************************************/
+static const model_sku_info_t t88_sku_info[] =
+{
+ /* Index zero reserved for no fuses programmed */
+ { 0x01, "CN", 88, 2601, "AAP", /* 48, 32 cores */
+ { /* List of fuses for this SKU */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x02, "CN", 88, 2601, "AAS", /* 24 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x03, "CN", 88, 2601, "ST", /* 48, 32 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
+ BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(0), /* Disable PEM0-1 */
+ BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(2), /* Disable PEM4-5 */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x04, "CN", 88, 2601, "STT", /* 48 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(0), /* Disable PEM0-1 */
+ BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(2), /* Disable PEM4-5 */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x05, "CN", 88, 2601, "STS", /* 24 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable LMC2-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
+ BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
+ BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(0), /* Disable PEM0-1 */
+ BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(2), /* Disable PEM4-5 */
+ BDK_MIO_FUS_FUSE_NUM_E_BGX_DISX(1), /* Disable BGX1 */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x06, "CN", 88, 2601, "STP", /* 48, 32 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x07, "CN", 88, 2601, "NT", /* 48, 32 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x08, "CN", 88, 2601, "NTS", /* 24 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable LMC2-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
+ BDK_MIO_FUS_FUSE_NUM_E_BGX_DISX(1), /* Disable BGX1 */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x09, "CN", 88, 2601, "NTP", /* 48, 32 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(1),/* Disable SATA4-7 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x0a, "CN", 88, 2601, "CP", /* 48,32 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2, /* Disable HFA */
+ BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(0), /* Disable HNA */
+ BDK_MIO_FUS_FUSE_NUM_E_NOZIP, /* Disable Compression */
+ BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x0b, "CN", 88, 2601, "CPS", /* 24 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2, /* Disable HFA */
+ BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(0), /* Disable HNA */
+ BDK_MIO_FUS_FUSE_NUM_E_NOZIP, /* Disable Compression */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable LMC2-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
+ BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
+ BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
+ BDK_MIO_FUS_FUSE_NUM_E_BGX_DISX(1), /* Disable BGX1 */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x0c, "CN", 88, 2601, "SNT", /* 48,32 cores, Nitrox connects to PEM2x8, QLM4-5 */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_RSVD231X(0), /* Nitrox 3 is present */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x0d, "CN", 88, 2601, "SC", /* 48,32 cores, Nitrox connects to PEM2x8, QLM4-5 */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_RSVD231X(0), /* Nitrox 3 is present */
+ BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2, /* Disable HFA */
+ BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(0), /* Disable HNA */
+ BDK_MIO_FUS_FUSE_NUM_E_NOZIP, /* Disable Compression */
+ BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
+ 0 /* End of fuse list marker */
+ }
+ },
+ /* Index gap for adding more CN88 variants */
+ { 0x20, "CN", 86, 1676, "AAP", /* No part, match unfused CN86XX */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(6), /* Alternate package fuse */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x21, "CN", 86, 1676, "SCP", /* 8 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(6), /* Alternate package fuse */
+ BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1),/* L2C is half size */
+ BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2, /* Disable HFA */
+ BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(0), /* Disable HNA */
+ BDK_MIO_FUS_FUSE_NUM_E_NOZIP, /* Disable Compression */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable LMC2-3 */
+ BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
+ BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
+ 0 /* End of fuse list marker */
+ }
+ },
+ {} /* End of SKU list marker */
+};
+
+/***************************************************/
+/* SKU table for t83 */
+/* From "Thunder Part Number fuse overview Rev 16.xlsx" */
+/***************************************************/
+static const model_sku_info_t t83_sku_info[] =
+{
+ /* Index zero reserved for no fuses programmed */
+ { 0x01, "CN", 83, 1676, "SCP", /* 24, 20, 16, 12, 8 cores */
+ { /* List of fuses for this SKU */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x02, "CN", 83, 1676, "CP", /* 24, 20, 16, 12, 8 cores */
+ { /* List of fuses for this SKU */
+ /* Disable all Nitrox cores, CPT0 and CPT1 */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(0), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(1), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(2), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(3), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(4), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(5), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(6), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(7), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(8), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(9), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(10), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(11), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(16), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(17), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(18), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(19), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(20), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(21), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(22), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(23), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(24), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(25), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(26), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(27), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(28), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(29), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(30), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(31), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(32), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(33), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(34), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(35), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(36), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(37), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(38), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(39), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(40), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(41), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(42), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(43), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(44), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(45), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(46), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(47), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(0), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(1), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(2), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(3), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(4), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(5), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(6), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(7), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(8), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(9), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(10), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(11), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(16), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(17), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(18), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(19), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(20), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(21), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(22), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(23), /* Nitrox */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x03, "CN", 83, 1676, "AUS", /* 24, 20, 16, 12, 8 cores */
+ { /* List of fuses for this SKU */
+ FUSES_CHECK_FUSF, /* Trusted boot */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x04, "CN", 82, 1676, "SCP", /* 12, 8 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1),/* L2C is half size */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable upper LMC */
+ /* Disable Nitrox cores CPT0[24-47] and CPT1[12-23] */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(24), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(25), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(26), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(27), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(28), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(29), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(30), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(31), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(32), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(33), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(34), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(35), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(36), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(37), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(38), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(39), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(40), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(41), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(42), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(43), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(44), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(45), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(46), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(47), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(16), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(17), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(18), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(19), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(20), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(21), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(22), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(23), /* Nitrox */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x05, "CN", 82, 1676, "CP", /* 12, 8 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1),/* L2C is half size */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable upper LMC */
+ /* Disable all Nitrox cores, CPT0 and CPT1 */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(0), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(1), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(2), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(3), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(4), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(5), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(6), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(7), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(8), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(9), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(10), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(11), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(16), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(17), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(18), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(19), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(20), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(21), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(22), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(23), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(24), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(25), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(26), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(27), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(28), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(29), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(30), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(31), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(32), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(33), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(34), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(35), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(36), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(37), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(38), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(39), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(40), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(41), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(42), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(43), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(44), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(45), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(46), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(47), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(0), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(1), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(2), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(3), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(4), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(5), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(6), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(7), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(8), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(9), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(10), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(11), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(16), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(17), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(18), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(19), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(20), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(21), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(22), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(23), /* Nitrox */
+ 0 /* End of fuse list marker */
+ }
+ },
+ {} /* End of SKU list marker */
+};
+
+/***************************************************/
+/* SKU table for t81 */
+/* From "Thunder Part Number fuse overview Rev 16.xlsx" */
+/***************************************************/
+static const model_sku_info_t t81_sku_info[] =
+{
+ /* Index zero reserved for no fuses programmed */
+ { 0x01, "CN", 81, 676, "SCP", /* 4, 2 cores */
+ { /* List of fuses for this SKU */
+ /* No fuses */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x02, "CN", 81, 676, "CP", /* 4, 2 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(1), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(2), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(3), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(4), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(5), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(6), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(7), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(8), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(9), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(10), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(11), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(16), /* Nitrox */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x07, "CN", 81, 676, "AUS", /* 4, 2 cores */
+ { /* List of fuses for this SKU */
+ FUSES_CHECK_FUSF, /* Trusted boot */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x08, "CN", 81, 676, "AUC", /* 4, 2 cores */
+ { /* List of fuses for this SKU */
+ FUSES_CHECK_FUSF, /* Trusted boot */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(1), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(2), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(3), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(4), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(5), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(6), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(7), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(8), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(9), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(10), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(11), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(16), /* Nitrox */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x03, "CN", 80, 676, "SCP", /* 4, 2 cores */
+ { /* List of fuses for this SKU */
+ /* Note that CHIP_ID(7) is suppose to be blown, but a few chips
+ have incorrect fuses. We allow CN80XX SKUs with or without
+ CHIP_ID(7) */
+ //BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(7), /* Alternate package fuse 2? */
+ BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1), /* L2C is half size */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF, /* LMC is half width */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x04, "CN", 80, 676, "CP", /* 4, 2 cores */
+ { /* List of fuses for this SKU */
+ /* Note that CHIP_ID(7) is suppose to be blown, but a few chips
+ have incorrect fuses. We allow CN80XX SKUs with or without
+ CHIP_ID(7) */
+ //BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(7), /* Alternate package fuse 2? */
+ BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1), /* L2C is half size */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF, /* LMC is half width */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(1), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(2), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(3), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(4), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(5), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(6), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(7), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(8), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(9), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(10), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(11), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(16), /* Nitrox */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x05, "CN", 80, 555, "SCP", /* 4, 2 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(6), /* Alternate package fuse */
+ BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1), /* L2C is half size */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF, /* LMC is half width */
+ 0 /* End of fuse list marker */
+ }
+ },
+ { 0x06, "CN", 80, 555, "CP", /* 4, 2 cores */
+ { /* List of fuses for this SKU */
+ BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(6), /* Alternate package fuse */
+ BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1), /* L2C is half size */
+ BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF, /* LMC is half width */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(1), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(2), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(3), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(4), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(5), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(6), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(7), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(8), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(9), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(10), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(11), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(12), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(13), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(14), /* Nitrox */
+ //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(15), /* Nitrox */
+ BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(16), /* Nitrox */
+ 0 /* End of fuse list marker */
+ }
+ },
+ {} /* End of SKU list marker */
+};
+
+/***************************************************/
+/* SKU table for t93 */
+/***************************************************/
+static const model_sku_info_t t93_sku_info[] =
+{
+ /* Index zero reserved for no fuses programmed */
+ { 0x01, "CN", 93, 1676, "SCP", /* 24, 20, 16, 12, 8 cores */
+ { /* List of fuses for this SKU */
+ /* No fuses */
+ 0 /* End of fuse list marker */
+ }
+ },
+ {} /* End of SKU list marker */
+};
+
+/**
+ * Given a core count, return the last two digits of a model number
+ *
+ * @param cores Number of cores
+ *
+ * @return Two digit model number
+ */
+static int model_digits_for_cores(int cores)
+{
+ /* If the number of cores is between two model levels, use the lower
+ level. This assumes that a model guarantees a minimum number of
+ cores. This should never happen, but you never know */
+ switch (cores)
+ {
+ case 1: return 10; /* CNxx10 = 1 core */
+ case 2: return 20; /* CNxx20 = 2 cores */
+ case 3: return 25; /* CNxx25 = 3 cores */
+ case 4: return 30; /* CNxx30 = 4 cores */
+ case 5: return 32; /* CNxx32 = 5 cores */
+ case 6: return 34; /* CNxx34 = 6 cores */
+ case 7: return 38; /* CNxx38 = 7 cores */
+ case 8: return 40; /* CNxx40 = 8 cores */
+ case 9: return 42; /* CNxx42 = 9 cores */
+ case 10: return 45; /* CNxx45 = 10 cores */
+ case 11: return 48; /* CNxx48 = 11 cores */
+ case 12: return 50; /* CNxx50 = 12 cores */
+ case 13: return 52; /* CNxx52 = 13 cores */
+ case 14: return 55; /* CNxx55 = 14 cores */
+ case 15: return 58; /* CNxx58 = 15 cores */
+ case 16 ... 19: return 60; /* CNxx60 = 16 cores */
+ case 20 ... 23: return 65; /* CNxx65 = 20 cores */
+ case 24 ... 31: return 70; /* CNxx70 = 24 cores */
+ case 32 ... 39: return 80; /* CNxx80 = 32 cores */
+ case 40 ... 43: return 85; /* CNxx85 = 40 cores */
+ case 44 ... 47: return 88; /* CNxx88 = 44 cores */
+ default: return 90; /* CNxx90 = 48 cores */
+ }
+}
+
+/**
+ * Return non-zero if the die is in an alternate package. The
+ * normal is_model() checks will treat alternate package parts
+ * as all the same, where this function can be used to detect
+ * them. The return value is the upper two bits of
+ * MIO_FUS_DAT2[chip_id]. Most alternate packages use bit 6,
+ * which will return 1 here. Parts with a second alternative
+ * will use bit 7, which will return 2.
+ *
+ * @param arg_model One of the CAVIUM_* constants for chip models and passes
+ *
+ * @return Non-zero if an alternate package
+ * 0 = Normal package
+ * 1 = Alternate package 1 (CN86XX, CN80XX with 555 balls)
+ * 2 = Alternate package 2 (CN80XX with 676 balls)
+ * 3 = Alternate package 3 (Currently unused)
+ */
+int cavium_is_altpkg(uint32_t arg_model)
+{
+ if (CAVIUM_IS_MODEL(arg_model))
+ {
+ BDK_CSR_INIT(mio_fus_dat2, bdk_numa_local(), BDK_MIO_FUS_DAT2);
+ /* Bits 7:6 are used for alternate packages. Return the exact
+ number so multiple alternate packages can be detected
+ (CN80XX is an example) */
+ int altpkg = mio_fus_dat2.s.chip_id >> 6;
+ if (altpkg)
+ return altpkg;
+ /* Due to a documentation mixup, some CN80XX parts do not have chip_id
+ bit 7 set. As a backup, use lmc_mode32 to find these parts. Both
+ bits are suppose to be fused, but some parts only have lmc_mode32 */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && mio_fus_dat2.s.lmc_mode32)
+ return 2;
+ return 0;
+ }
+ else
+ return 0;
+}
+
+/**
+ * Return the SKU string for a chip
+ *
+ * @param node Node to get SKU for
+ *
+ * @return Chip's SKU
+ */
+const char* bdk_model_get_sku(int node)
+{
+ /* Storage for SKU is per node. Static variable stores the value
+ so we don't decode on every call */
+ static char chip_sku[BDK_NUMA_MAX_NODES][32] = { { 0, }, };
+
+ /* Return the cached string if we've already filled it in */
+ if (chip_sku[node][0])
+ return chip_sku[node];
+
+ /* Figure out which SKU list to use */
+ const model_sku_info_t *sku_info;
+ uint64_t result;
+ asm ("mrs %[rd],MIDR_EL1" : [rd] "=r" (result));
+ result = bdk_extract(result, 4, 12);
+ switch (result)
+ {
+ case 0xa1:
+ sku_info = t88_sku_info;
+ break;
+ case 0xa2:
+ sku_info = t81_sku_info;
+ break;
+ case 0xa3:
+ sku_info = t83_sku_info;
+ break;
+ case 0xb2:
+ sku_info = t93_sku_info;
+ break;
+ default:
+ bdk_fatal("SKU detect: Unknown die\n");
+ }
+
+ /* Read the SKU index from the PNAME fuses */
+ int match_index = -1;
+ // FIXME: Implement PNAME reads
+
+ /* Search the SKU list for the best match, where all the fuses match.
+ Only needed if the PNAME fuses don't specify the index */
+ if (match_index == -1)
+ {
+ match_index = 0;
+ int match_score = -1;
+ int index = 0;
+ while (sku_info[index].fuse_index)
+ {
+ int score = 0;
+ int fuse_index = 0;
+ /* Count the number of fuses that match. A mismatch forces the worst
+ score (-1) */
+ while (sku_info[index].fuses[fuse_index])
+ {
+ int fuse;
+ /* FUSES_CHECK_FUSF is special for trusted parts */
+ if (sku_info[index].fuses[fuse_index] == FUSES_CHECK_FUSF)
+ {
+ BDK_CSR_INIT(fusf_ctl, node, BDK_FUSF_CTL);
+ fuse = (fusf_ctl.u >> 6) & 1;
+ }
+ else
+ {
+ fuse = bdk_fuse_read(node, sku_info[index].fuses[fuse_index]);
+ }
+ if (fuse)
+ {
+ /* Match, improve the score */
+ score++;
+ }
+ else
+ {
+ /* Mismatch, force score bad */
+ score = -1;
+ break;
+ }
+ fuse_index++;
+ }
+ /* If this score is better than the last match, use this index as the
+ match */
+ if (score > match_score)
+ {
+ match_score = score;
+ match_index = index;
+ }
+ index++;
+ }
+ }
+
+ /* Use the SKU table to determine the defaults for the SKU parts */
+ const char *prefix = sku_info[match_index].prefix;
+ int model = 100 * sku_info[match_index].model_base;
+ int cores = bdk_get_num_cores(node);
+ const char *customer_code = "";
+ int rclk_limit = bdk_clock_get_rate(node, BDK_CLOCK_RCLK) / 1000000;
+ const char *bg_str = "BG"; /* Default Ball Grid array */
+ int balls = sku_info[match_index].num_balls; /* Num package balls */
+ const char *segment = sku_info[match_index].segment; /* Market segment */
+ char prod_phase[4]; /* Blank = production, PR = Prototype, ES = Engineering sample */
+ char prod_rev[5]; /* Product revision */
+ const char *rohs_option = "G"; /* RoHS is always G for current parts */
+
+ /* Update the model number with the number of cores */
+ model = (model / 100) * 100 + model_digits_for_cores(cores);
+
+ /* Update the RCLK setting based on MIO_FUS_DAT3[core_pll_mul] */
+ uint64_t core_pll_mul;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ BDK_CSR_INIT(mio_fus_dat3, node, BDK_MIO_FUS_DAT3);
+ core_pll_mul = mio_fus_dat3.s.core_pll_mul;
+ }
+ else
+ core_pll_mul = bdk_fuse_read_range(bdk_numa_local(), BDK_FUS_FUSE_NUM_E_CORE_MAX_MULX(0), 7);
+
+ if (core_pll_mul)
+ {
+ /* CORE_PLL_MUL covers bits 5:1, so we need to multiple by 2. The
+ documentation doen't mention this clearly: There is a 300Mhz
+ addition to the base multiplier */
+ rclk_limit = core_pll_mul * 2 * 50 + 300;
+ }
+
+ /* FIXME: Hardcode production as there is no way to tell */
+ prod_phase[0] = 0;
+
+ /* Read the Pass information from fuses. Note that pass info in
+ MIO_FUS_DAT2[CHIP_ID] is encoded as
+ bit[7] = Unused, zero
+ bit[6] = Alternate package
+ bit[5..3] = Major pass
+ bit[2..0] = Minor pass */
+ int major_pass;
+ int minor_pass;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ BDK_CSR_INIT(mio_fus_dat2, node, BDK_MIO_FUS_DAT2);
+ major_pass = ((mio_fus_dat2.s.chip_id >> 3) & 7) + 1;
+ minor_pass = mio_fus_dat2.s.chip_id & 7;
+ }
+ else
+ {
+ /* FIXME: We don't support getting the pass for other node on CN9XXX */
+ bdk_ap_midr_el1_t midr_el1;
+ BDK_MRS(MIDR_EL1, midr_el1.u);
+ major_pass = (midr_el1.s.variant & 7) + 1;
+ minor_pass = midr_el1.s.revision;
+ }
+
+ if (major_pass == 1)
+ {
+ /* Pass 1.x is special in that we don't show the implied 'X' */
+ if (minor_pass == 0)
+ {
+ /* Completely blank for 1.0 */
+ prod_rev[0] = 0;
+ }
+ else
+ {
+ /* If we are production and not pass 1.0, the product phase
+ changes from blank to "-P". The product revision then
+ follows the product phase without a '-' */
+ if (prod_phase[0] == 0)
+ {
+ /* Change product phase to "-P" */
+ prod_phase[0] = '-';
+ prod_phase[1] = 'P';
+ prod_phase[2] = 0;
+ }
+ /* No separator between phase and revision */
+ prod_rev[0] = '1';
+ prod_rev[1] = '0' + minor_pass;
+ prod_rev[2] = 0;
+ }
+ }
+ else
+ {
+ /* Pass 2.0 and above 12345678 */
+ const char pass_letter[8] = "XYWVUTSR";
+ prod_rev[0] = '-';
+ prod_rev[1] = pass_letter[major_pass-1];
+ if (minor_pass == 0)
+ {
+ /* Nothing after the letter code */
+ prod_rev[2] = 0;
+ }
+ else
+ {
+ /* Add major and minor after the letter code */
+ prod_rev[2] = '0' + major_pass;
+ prod_rev[3] = '0' + minor_pass;
+ prod_rev[4] = 0;
+ }
+ }
+
+ /* Special check for CN88XX pass 2.0 and 2.1. Documentation mistakenly
+ specified 2.0 as -PR and 2.1 as -Y. Rather than fix the docs, OPs has
+ decided to special case this SKU */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (major_pass == 2))
+ {
+ if (minor_pass == 0)
+ {
+ prod_phase[0] = '-'; /* SKU ends with -PR-Y-G */
+ prod_phase[1] = 'P';
+ prod_phase[2] = 'R';
+ prod_phase[3] = 0;
+ }
+ else if (minor_pass == 1)
+ {
+ prod_rev[0] = '-'; /* SKU ends with -Y-G */
+ prod_rev[1] = 'Y';
+ prod_rev[2] = 0;
+ }
+ }
+
+ /* Read PNAME fuses, looking for SKU overrides */
+ // FIXME: Implement PNAME reads
+
+ /* Build the SKU string */
+ snprintf(chip_sku[node], sizeof(chip_sku[node]), "%s%d%s-%d%s%d-%s%s%s-%s",
+ prefix, model, customer_code, rclk_limit, bg_str, balls, segment,
+ prod_phase, prod_rev, rohs_option);
+
+ return chip_sku[node];
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c
new file mode 100644
index 0000000000..33d34ba669
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c
@@ -0,0 +1,91 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <stdio.h>
+
+int __bdk_numa_master_node = -1; /* Which node is the master */
+static int __bdk_numa_exists_mask = 0; /* Bitmask of nodes that exist */
+static bdk_spinlock_t __bdk_numa_lock;
+
+/**
+ * Get a bitmask of the nodes that exist
+ *
+ * @return bitmask
+ */
+uint64_t bdk_numa_get_exists_mask(void)
+{
+ return __bdk_numa_exists_mask;
+}
+
+/**
+ * Add a node to the exists mask
+ *
+ * @param node Node to add
+ */
+void bdk_numa_set_exists(bdk_node_t node)
+{
+ bdk_spinlock_lock(&__bdk_numa_lock);
+ __bdk_numa_exists_mask |= 1 << node;
+ if (__bdk_numa_master_node == -1)
+ __bdk_numa_master_node = node;
+ bdk_spinlock_unlock(&__bdk_numa_lock);
+}
+
+/**
+ * Return true if a node exists
+ *
+ * @param node Node to check
+ *
+ * @return Non zero if the node exists
+ */
+int bdk_numa_exists(bdk_node_t node)
+{
+ return __bdk_numa_exists_mask & (1 << node);
+}
+
+/**
+ * Return true if there is only one node
+ *
+ * @return
+ */
+extern int bdk_numa_is_only_one()
+{
+ return __bdk_numa_exists_mask == 1;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-platform.c b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-platform.c
new file mode 100644
index 0000000000..8cac04a214
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-platform.c
@@ -0,0 +1,59 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-ocla.h"
+
+bdk_platform_t __bdk_platform;
+
+void __bdk_platform_init()
+{
+ BDK_CSR_INIT(c, bdk_numa_master(), BDK_OCLAX_CONST(0));
+ if (c.u == 0)
+ {
+ __bdk_platform = BDK_PLATFORM_ASIM;
+ }
+ else
+ {
+ int plat2 = bdk_fuse_read(bdk_numa_master(), 197);
+ int plat1 = bdk_fuse_read(bdk_numa_master(), 196);
+ int plat0 = bdk_fuse_read(bdk_numa_master(), 195);
+ __bdk_platform = (plat2 << 2) | (plat1 << 1) | plat0;
+ }
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c
new file mode 100644
index 0000000000..83ab14cbc7
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c
@@ -0,0 +1,81 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-mio_tws.h"
+
+/**
+ * Report boot status to the BMC or whomever might care. This function
+ * will return quickly except for a status of "power cycle". In the power cycle
+ * case it is assumed the board is in a bad state and should not continue until
+ * a power cycle restarts us.
+ *
+ * @param status Status to report. Enumerated in bdk_boot_status_t
+ */
+void bdk_boot_status(bdk_boot_status_t status)
+{
+ bdk_node_t node = bdk_numa_master();
+ int twsi = bdk_config_get_int(BDK_CONFIG_BMC_TWSI);
+
+ /* Update status */
+ if (twsi != -1)
+ {
+ BDK_CSR_DEFINE(sw_twsi, BDK_MIO_TWSX_SW_TWSI(twsi));
+ sw_twsi.u = 0;
+ sw_twsi.s.v = 1; /* Valid data */
+ sw_twsi.s.slonly = 1; /* Slave only */
+ sw_twsi.s.data = status;
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI(twsi), sw_twsi.u);
+ }
+
+ /* As a special case, power cycle will display a message and try a
+ soft reset if we can't power cycle in 5 seconds */
+ if (status == BDK_BOOT_STATUS_REQUEST_POWER_CYCLE)
+ {
+ if (twsi != -1)
+ {
+ printf("Requested power cycle\n");
+ bdk_wait_usec(5000000); /* 5 sec */
+ printf("Power cycle failed, trying soft reset\n");
+ }
+ else
+ printf("Performing soft reset\n");
+ bdk_reset_chip(node);
+ }
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-boot/bdk-watchdog.c b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-watchdog.c
new file mode 100644
index 0000000000..48f955a7ef
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-watchdog.c
@@ -0,0 +1,108 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-gti.h"
+
+/**
+ * Setup the watchdog to expire in timeout_ms milliseconds. When the watchdog
+ * expires, the chip three things happen:
+ * 1) Expire 1: interrupt that is ignored by the BDK
+ * 2) Expire 2: DEL3T interrupt, which is disabled and ignored
+ * 3) Expire 3: Soft reset of the chip
+ *
+ * Since we want a soft reset, we actually program the watchdog to expire at
+ * the timeout / 3.
+ *
+ * @param timeout_ms Timeout in milliseconds. If this is zero, the timeout is taken from the
+ * global configuration option BDK_BRD_CFG_WATCHDOG_TIMEOUT
+ */
+void bdk_watchdog_set(unsigned int timeout_ms)
+{
+ if (timeout_ms == 0)
+ timeout_ms = bdk_config_get_int(BDK_CONFIG_WATCHDOG_TIMEOUT);
+
+ if (timeout_ms > 0)
+ {
+ uint64_t sclk = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_SCLK);
+ uint64_t timeout_sclk = sclk * timeout_ms / 1000;
+ /* Per comment above, we want the watchdog to expire at 3x the rate specified */
+ timeout_sclk /= 3;
+ /* Watchdog counts in 1024 cycle steps */
+ uint64_t timeout_wdog = timeout_sclk >> 10;
+ /* We can only specify the upper 16 bits of a 24 bit value. Round up */
+ timeout_wdog = (timeout_wdog + 0xff) >> 8;
+ /* If the timeout overflows the hardware limit, set max */
+ if (timeout_wdog >= 0x10000)
+ timeout_wdog = 0xffff;
+
+ BDK_TRACE(INIT, "Watchdog: Set to expire %lu SCLK cycles\n", timeout_wdog << 18);
+ BDK_CSR_MODIFY(c, bdk_numa_local(), BDK_GTI_CWD_WDOGX(bdk_get_core_num()),
+ c.s.len = timeout_wdog;
+ c.s.mode = 3);
+ }
+}
+
+/**
+ * Signal the watchdog that we are still running
+ */
+void bdk_watchdog_poke(void)
+{
+ BDK_CSR_WRITE(bdk_numa_local(), BDK_GTI_CWD_POKEX(bdk_get_core_num()), 0);
+}
+
+/**
+ * Disable the hardware watchdog
+ */
+void bdk_watchdog_disable(void)
+{
+ BDK_CSR_WRITE(bdk_numa_local(), BDK_GTI_CWD_WDOGX(bdk_get_core_num()), 0);
+ BDK_TRACE(INIT, "Watchdog: Disabled\n");
+}
+
+/**
+ * Return true if the watchdog is configured and running
+ *
+ * @return Non-zero if watchdog is running
+ */
+int bdk_watchdog_is_running(void)
+{
+ BDK_CSR_INIT(wdog, bdk_numa_local(), BDK_GTI_CWD_WDOGX(bdk_get_core_num()));
+ return wdog.s.mode != 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c
new file mode 100644
index 0000000000..94d7d76752
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c
@@ -0,0 +1,183 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-l2c.h"
+
+#define EXTRACT(v, lsb, width) (((v) >> (lsb)) & ((1ull << (width)) - 1))
+#define INSERT(a, v, lsb, width) a|=(((v) & ((1ull << (width)) - 1)) << (lsb))
+
+/**
+ * Given a physical DRAM address, extract information about the node, LMC, DIMM,
+ * prank, lrank, bank, row, and column that was accessed.
+ *
+ * @param address Physical address to decode
+ * @param node Node the address was for
+ * @param lmc LMC controller the address was for
+ * @param dimm DIMM the address was for
+ * @param prank Physical RANK on the DIMM
+ * @param lrank Logical RANK on the DIMM
+ * @param bank BANK on the DIMM
+ * @param row Row on the DIMM
+ * @param col Column on the DIMM
+ */
+void
+bdk_dram_address_extract_info(uint64_t address, int *node, int *lmc, int *dimm,
+ int *prank, int *lrank, int *bank, int *row, int *col)
+{
+ int bitno = CAVIUM_IS_MODEL(CAVIUM_CN83XX) ? 19 : 20;
+ *node = EXTRACT(address, 40, 2); /* Address bits [41:40] */
+ /* Determine the LMC controller */
+ BDK_CSR_INIT(l2c_ctl, *node, BDK_L2C_CTL);
+ int bank_lsb, xbits;
+
+ /* xbits depends on number of LMCs */
+ xbits = __bdk_dram_get_num_lmc(*node) >> 1; // 4->2; 2->1; 1->0
+ bank_lsb = 7 + xbits;
+
+ /* LMC number is probably aliased */
+ if (l2c_ctl.s.disidxalias)
+ *lmc = EXTRACT(address, 7, xbits);
+ else
+ *lmc = EXTRACT(address, 7, xbits) ^ EXTRACT(address, bitno, xbits) ^ EXTRACT(address, 12, xbits);
+
+ /* Figure out the bank field width */
+ BDK_CSR_INIT(lmcx_config, *node, BDK_LMCX_CONFIG(*lmc));
+ int bank_width = __bdk_dram_get_num_bank_bits(*node, *lmc);
+
+ /* Extract additional info from the LMC_CONFIG CSR */
+ BDK_CSR_INIT(ext_config, *node, BDK_LMCX_EXT_CONFIG(*lmc));
+ int dimm_lsb = 28 + lmcx_config.s.pbank_lsb + xbits;
+ int dimm_width = 40 - dimm_lsb;
+ int prank_lsb = dimm_lsb - lmcx_config.s.rank_ena;
+ int prank_width = dimm_lsb - prank_lsb;
+ int lrank_lsb = prank_lsb - ext_config.s.dimm0_cid;
+ int lrank_width = prank_lsb - lrank_lsb;
+ int row_lsb = 14 + lmcx_config.s.row_lsb + xbits;
+ int row_width = lrank_lsb - row_lsb;
+ int col_hi_lsb = bank_lsb + bank_width;
+ int col_hi_width= row_lsb - col_hi_lsb;
+
+ /* Extract the parts of the address */
+ *dimm = EXTRACT(address, dimm_lsb, dimm_width);
+ *prank = EXTRACT(address, prank_lsb, prank_width);
+ *lrank = EXTRACT(address, lrank_lsb, lrank_width);
+ *row = EXTRACT(address, row_lsb, row_width);
+
+ /* bank calculation may be aliased... */
+ BDK_CSR_INIT(lmcx_control, *node, BDK_LMCX_CONTROL(*lmc));
+ if (lmcx_control.s.xor_bank)
+ *bank = EXTRACT(address, bank_lsb, bank_width) ^ EXTRACT(address, 12 + xbits, bank_width);
+ else
+ *bank = EXTRACT(address, bank_lsb, bank_width);
+
+ /* LMC number already extracted */
+ int col_hi = EXTRACT(address, col_hi_lsb, col_hi_width);
+ *col = EXTRACT(address, 3, 4) | (col_hi << 4);
+ /* Bus byte is address bits [2:0]. Unused here */
+}
+
+/**
+ * Construct a physical address given the node, LMC, DIMM, prank, lrank, bank, row, and column.
+ *
+ * @param node Node the address was for
+ * @param lmc LMC controller the address was for
+ * @param dimm DIMM the address was for
+ * @param prank Physical RANK on the DIMM
+ * @param lrank Logical RANK on the DIMM
+ * @param bank BANK on the DIMM
+ * @param row Row on the DIMM
+ * @param col Column on the DIMM
+ */
+uint64_t
+bdk_dram_address_construct_info(bdk_node_t node, int lmc, int dimm,
+ int prank, int lrank, int bank, int row, int col)
+
+{
+ uint64_t address = 0;
+ int bitno = CAVIUM_IS_MODEL(CAVIUM_CN83XX) ? 19 : 20;
+
+ // insert node bits
+ INSERT(address, node, 40, 2); /* Address bits [41:40] */
+
+ /* xbits depends on number of LMCs */
+ int xbits = __bdk_dram_get_num_lmc(node) >> 1; // 4->2; 2->1; 1->0
+ int bank_lsb = 7 + xbits;
+
+ /* Figure out the bank field width */
+ int bank_width = __bdk_dram_get_num_bank_bits(node, lmc);
+
+ /* Extract additional info from the LMC_CONFIG CSR */
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(lmc));
+ BDK_CSR_INIT(ext_config, node, BDK_LMCX_EXT_CONFIG(lmc));
+ int dimm_lsb = 28 + lmcx_config.s.pbank_lsb + xbits;
+ int dimm_width = 40 - dimm_lsb;
+ int prank_lsb = dimm_lsb - lmcx_config.s.rank_ena;
+ int prank_width = dimm_lsb - prank_lsb;
+ int lrank_lsb = prank_lsb - ext_config.s.dimm0_cid;
+ int lrank_width = prank_lsb - lrank_lsb;
+ int row_lsb = 14 + lmcx_config.s.row_lsb + xbits;
+ int row_width = lrank_lsb - row_lsb;
+ int col_hi_lsb = bank_lsb + bank_width;
+ int col_hi_width = row_lsb - col_hi_lsb;
+
+ /* Insert some other parts of the address */
+ INSERT(address, dimm, dimm_lsb, dimm_width);
+ INSERT(address, prank, prank_lsb, prank_width);
+ INSERT(address, lrank, lrank_lsb, lrank_width);
+ INSERT(address, row, row_lsb, row_width);
+ INSERT(address, col >> 4, col_hi_lsb, col_hi_width);
+ INSERT(address, col, 3, 4);
+
+ /* bank calculation may be aliased... */
+ BDK_CSR_INIT(lmcx_control, node, BDK_LMCX_CONTROL(lmc));
+ int new_bank = bank;
+ if (lmcx_control.s.xor_bank)
+ new_bank ^= EXTRACT(address, 12 + xbits, bank_width);
+ INSERT(address, new_bank, bank_lsb, bank_width);
+
+ /* Determine the actual C bits from the input LMC controller arg */
+ /* The input LMC number was probably aliased with other fields */
+ BDK_CSR_INIT(l2c_ctl, node, BDK_L2C_CTL);
+ int new_lmc = lmc;
+ if (!l2c_ctl.s.disidxalias)
+ new_lmc ^= EXTRACT(address, bitno, xbits) ^ EXTRACT(address, 12, xbits);
+ INSERT(address, new_lmc, 7, xbits);
+
+ return address;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c
new file mode 100644
index 0000000000..3465c5d98b
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c
@@ -0,0 +1,163 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <unistd.h>
+
+BDK_REQUIRE_DEFINE(DRAM_CONFIG);
+
+/**
+ * Lookup a DRAM configuration by name and initialize DRAM using it
+ *
+ * @param node Node to configure
+ * @param ddr_clock_override
+ * If non zero, override the DRAM frequency specified
+ * in the config with this value
+ *
+ * @return Amount of DRAM in MB, or negative on failure
+ */
+int bdk_dram_config(int node, int ddr_clock_override)
+{
+ const dram_config_t *config = libdram_config_load(node);
+ if (!config)
+ {
+ printf("N%d: No DRAM config specified, skipping DRAM init\n", node);
+ return 0;
+ }
+
+ BDK_TRACE(DRAM, "N%d: Starting DRAM init (config=%p, ddr_clock_override=%d)\n", node, config, ddr_clock_override);
+ int mbytes = libdram_config(node, config, ddr_clock_override);
+ BDK_TRACE(DRAM, "N%d: DRAM init returned %d\n", node, mbytes);
+ if (mbytes <= 0)
+ {
+ printf("ERROR: DDR initialization failed\n");
+ return -1;
+ }
+
+ return mbytes;
+}
+
+/**
+ * Do DRAM configuration tuning
+ *
+ * @param node Node to tune
+ *
+ * @return Success or Fail
+ */
+int bdk_dram_tune(int node)
+{
+ int ret;
+ BDK_TRACE(DRAM, "N%d: Starting DRAM tuning\n", node);
+ ret = libdram_tune(node);
+ BDK_TRACE(DRAM, "N%d: DRAM tuning returned %d\n", node, ret);
+ return ret;
+}
+
+/**
+ * Do all the DRAM Margin tests
+ *
+ * @param node Node to test
+ *
+ * @return Success or Fail
+ */
+void bdk_dram_margin(int node)
+{
+ BDK_TRACE(DRAM, "N%d: Starting DRAM margining\n", node);
+ libdram_margin(node);
+ BDK_TRACE(DRAM, "N%d: Finished DRAM margining.\n", node);
+ return;
+}
+
+/**
+ * Return the string of the DRAM configuration info at the specified node.
+ * If the node is not configured, NULL is returned.
+ *
+ * @param node node to retrieve
+ *
+ * @return string or NULL
+ */
+const char* bdk_dram_get_info_string(int node)
+{
+ #define INFO_STRING_LEN 40
+ static char info_string[INFO_STRING_LEN];
+ static const char *info_ptr = info_string;
+
+ snprintf(info_string, INFO_STRING_LEN,
+ " %ld MB, %ld MT/s, %s %s",
+ bdk_dram_get_size_mbytes(node),
+ bdk_config_get_int(BDK_CONFIG_DDR_SPEED, node),
+ (__bdk_dram_is_ddr4(node, 0)) ? "DDR4" : "DDR3",
+ (__bdk_dram_is_rdimm(node, 0)) ? "RDIMM" : "UDIMM");
+
+ return info_ptr;
+}
+
+
+/**
+ * Return the highest address currently used by the BDK. This address will
+ * be about 4MB above the top of the BDK to make sure small growths between the
+ * call and its use don't cause corruption. Any call to memory allocation can
+ * change this value.
+ *
+ * @return Size of the BDK in bytes
+ */
+uint64_t bdk_dram_get_top_of_bdk(void)
+{
+ /* Make sure the start address is higher that the BDK's active range.
+ *
+ * As sbrk() returns a node address, mask off the node portion of
+ * the address to make it a physical offset. Doing this simplifies the
+ * address checks and calculations which only work with physical offsets.
+ */
+ uint64_t top_of_bdk = (bdk_ptr_to_phys(sbrk(0)) & bdk_build_mask(40));
+ uint64_t l2_size = bdk_l2c_get_cache_size_bytes(bdk_numa_master());
+ if (top_of_bdk <= l2_size)
+ {
+ /* Early BDK code takes care of the first L2 sized area of memory */
+ top_of_bdk = l2_size;
+ }
+ else
+ {
+ /* Give 4MB of extra so the BDK has room to grow */
+ top_of_bdk += 4 << 20;
+ /* Align it on a 64KB boundary */
+ top_of_bdk >>= 16;
+ top_of_bdk <<= 16;
+ }
+ return top_of_bdk;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c
new file mode 100644
index 0000000000..122afb2a18
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c
@@ -0,0 +1,213 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+
+/**
+ * Return the number of LMC controllers in use
+ *
+ * @param node Node to probe
+ *
+ * @return 2 or 4 depending on the mode
+ */
+int __bdk_dram_get_num_lmc(bdk_node_t node)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ BDK_CSR_INIT(lmcx_dll_ctl2, node, BDK_LMCX_DLL_CTL2(2)); // sample LMC2
+ return (lmcx_dll_ctl2.s.intf_en) ? 4 : 2;
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ {
+ BDK_CSR_INIT(lmcx_dll_ctl1, node, BDK_LMCX_DLL_CTL2(1)); // sample LMC1
+ return (lmcx_dll_ctl1.s.intf_en) ? 2 : 1;
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ {
+ return 1;
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
+ {
+ BDK_CSR_INIT(lmcx_dll_ctl1, node, BDK_LMCX_DLL_CTL2(2));
+ if (lmcx_dll_ctl1.s.intf_en)
+ return 3;
+ lmcx_dll_ctl1.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL2(1));
+ return (lmcx_dll_ctl1.s.intf_en) ? 2 : 1;
+ }
+ bdk_error("__bdk_dram_get_num_lmc() needs update for this chip\n");
+ return 1;
+}
+
+/**
+ * Return whether the node/LMC is in DRESET
+ *
+ * @param node Node to probe
+ * @param node LMC to probe
+ *
+ * @return 1 or 0
+ */
+static int __bdk_dram_is_lmc_in_dreset(bdk_node_t node, int lmc)
+{
+ BDK_CSR_INIT(lmcx_dll_ctl2, node, BDK_LMCX_DLL_CTL2(lmc)); // can always read this
+ return (lmcx_dll_ctl2.s.dreset != 0) ? 1 : 0;
+}
+
+/**
+ * Return a mask of the number of row bits in use
+ *
+ * @param node Node to probe
+ *
+ */
+uint32_t __bdk_dram_get_row_mask(bdk_node_t node, int lmc)
+{
+ // PROTECT!!!
+ if (__bdk_dram_is_lmc_in_dreset(node, lmc)) // check LMCn
+ return 0;
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(lmc)); // sample LMCn
+ int numbits = 14 + lmcx_config.s.pbank_lsb - lmcx_config.s.row_lsb - lmcx_config.s.rank_ena;
+ return ((1ul << numbits) - 1);
+}
+
+/**
+ * Return a mask of the number of column bits in use
+ *
+ * @param node Node to probe
+ *
+ */
+uint32_t __bdk_dram_get_col_mask(bdk_node_t node, int lmc)
+{
+ // PROTECT!!!
+ if (__bdk_dram_is_lmc_in_dreset(node, lmc)) // check LMCn
+ return 0;
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(lmc)); // sample LMCn
+ int numbits = 11 + lmcx_config.s.row_lsb - __bdk_dram_get_num_bank_bits(node, lmc);
+ return ((1ul << numbits) - 1);
+}
+
+/**
+ * Return the number of bank bits in use
+ *
+ * @param node Node to probe
+ *
+ */
+// all DDR3, and DDR4 x16 today, use only 3 bank bits; DDR4 x4 and x8 always have 4 bank bits
+// NOTE: this will change in the future, when DDR4 x16 devices can come with 16 banks!! FIXME!!
+int __bdk_dram_get_num_bank_bits(bdk_node_t node, int lmc)
+{
+ // PROTECT!!!
+ if (__bdk_dram_is_lmc_in_dreset(node, lmc)) // check LMCn
+ return 0;
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(lmc)); // sample LMCn
+ int bank_width = (__bdk_dram_is_ddr4(node, lmc) && (lmcx_config.s.bg2_enable)) ? 4 : 3;
+ return bank_width;
+}
+
+/**
+ * Return whether the node has DDR3 or DDR4 DRAM
+ *
+ * @param node Node to probe
+ *
+ * @return 0 (DDR3) or 1 (DDR4)
+ */
+int __bdk_dram_is_ddr4(bdk_node_t node, int lmc)
+{
+ // PROTECT!!!
+ if (__bdk_dram_is_lmc_in_dreset(node, lmc)) // check LMCn
+ return 0;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 1;
+ BDK_CSR_INIT(lmcx_ddr_pll_ctl, node, BDK_LMCX_DDR_PLL_CTL(lmc)); // sample LMCn
+ return (lmcx_ddr_pll_ctl.cn83xx.ddr4_mode != 0);
+}
+
+/**
+ * Return whether the node has Registered DIMMs or Unbuffered DIMMs
+ *
+ * @param node Node to probe
+ *
+ * @return 0 (Unbuffered) or 1 (Registered)
+ */
+int __bdk_dram_is_rdimm(bdk_node_t node, int lmc)
+{
+ // PROTECT!!!
+ if (__bdk_dram_is_lmc_in_dreset(node, lmc)) // check LMCn
+ return 0;
+ BDK_CSR_INIT(lmcx_control, node, BDK_LMCX_CONTROL(lmc)); // sample LMCn
+ return (lmcx_control.s.rdimm_ena != 0);
+}
+
+/**
+ * Get the amount of DRAM configured for a node. This is read from the LMC
+ * controller after DRAM is setup.
+ *
+ * @param node Node to query
+ *
+ * @return Size in megabytes
+ */
+uint64_t bdk_dram_get_size_mbytes(int node)
+{
+ if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
+ return 2 << 10; /* 2GB is available on t88 and t81
+ ** some t83 models have 8gb, but it is too long to init */
+ /* Return zero if dram isn't enabled */
+ if (!__bdk_is_dram_enabled(node))
+ return 0;
+
+ uint64_t memsize = 0;
+ const int num_dram_controllers = __bdk_dram_get_num_lmc(node);
+ for (int lmc = 0; lmc < num_dram_controllers; lmc++)
+ {
+ if (bdk_is_platform(BDK_PLATFORM_ASIM))
+ {
+ /* Asim doesn't simulate the rank detection, fake 4GB per controller */
+ memsize += 4ull << 30;
+ }
+ else
+ {
+ // PROTECT!!!
+ if (__bdk_dram_is_lmc_in_dreset(node, lmc)) // check LMCn
+ return 0;
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(lmc));
+ int num_ranks = bdk_pop(lmcx_config.s.init_status);
+ uint64_t rank_size = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena);
+ memsize += rank_size * num_ranks;
+ }
+ }
+ return memsize >> 20;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c
new file mode 100644
index 0000000000..9fe8570454
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c
@@ -0,0 +1,115 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include "bdk.h"
+
+/* Used for all memory reads/writes related to the test */
+#define READ64(address) __bdk_dram_read64(address)
+#define WRITE64(address, data) __bdk_dram_write64(address, data)
+
+/**
+ * Address bus test. This test writes a single value to each power of two in the
+ * area, looking for false aliases that would be created by address lines being
+ * shorted or tied together.
+ *
+ * @param area
+ * @param max_address
+ * @param bursts
+ *
+ * @return
+ */
+int __bdk_dram_test_mem_address_bus(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+
+ /* Clear our work area. Checking for aliases later could get false
+ positives if it matched stale data */
+ void *ptr = (area) ? bdk_phys_to_ptr(area) : NULL;
+ bdk_zero_memory(ptr, max_address - area);
+ __bdk_dram_flush_to_mem_range(area, max_address);
+
+ /* Each time we write, we'll write this pattern xored the address it is
+ written too */
+ uint64_t pattern = 0x0fedcba987654321;
+
+ /* Walk through the region incrementing our offset by a power of two. The
+ first few writes will be to the same cache line (offset 0x8, 0x10, 0x20,
+ and 0x40. Offset 0x80 and beyond will be to different cache lines */
+ uint64_t offset = 0x8;
+ while (area + offset < max_address)
+ {
+ uint64_t address = area + offset;
+ /* Write one location with pattern xor address */
+ uint64_t p = pattern ^ address;
+ WRITE64(address, p);
+ __bdk_dram_flush_to_mem(address);
+ offset <<= 1;
+ }
+
+ /* Read all of the area to make sure no other locations were written */
+ uint64_t a = area;
+ offset = 0x8;
+ uint64_t next_write = area + offset;
+ while (a < max_address)
+ {
+ if (a + 256 < max_address)
+ BDK_PREFETCH(a + 256, 0);
+ for (int i=0; i<16; i++)
+ {
+ uint64_t data = READ64(a);
+ uint64_t correct;
+ if (a == next_write)
+ {
+ correct = pattern ^ next_write;
+ offset <<= 1;
+ next_write = area + offset;
+ }
+ else
+ correct = 0;
+ if (bdk_unlikely(data != correct))
+ {
+ failures++;
+ __bdk_dram_report_error(a, data, correct, 0, -1);
+ }
+ a += 8;
+ }
+ }
+
+ return failures;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c
new file mode 100644
index 0000000000..c3fa1ffd8d
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c
@@ -0,0 +1,252 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include "bdk.h"
+
+/* Used for all memory reads/writes related to the test */
+#define READ64(address) __bdk_dram_read64(address)
+#define WRITE64(address, data) __bdk_dram_write64(address, data)
+
+/* Build a 64bit mask out of a single hex digit */
+#define REPEAT2(v) ((((uint64_t)v) << 4) | ((uint64_t)v))
+#define REPEAT4(v) ((REPEAT2(v) << 8) | REPEAT2(v))
+#define REPEAT8(v) ((REPEAT4(v) << 16) | REPEAT4(v))
+#define REPEAT16(v) ((REPEAT8(v) << 32) | REPEAT8(v))
+
+/**
+ * Read memory and check that the data bus pattern is present. The pattern is a
+ * sequence if 16 dwords created from the 16 hex digits repeated in each word.
+ *
+ * @param address Physical address to read. This must be cache line aligned.
+ * @param bursts Number of time to repeat the read test to verify stability
+ *
+ * @return Number of errors, zero means success
+ */
+static int read_data_bus_burst(uint64_t address, int bursts)
+{
+ int failures = 0;
+
+ /* Loop over the burst so people using a scope have time to capture
+ traces */
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ /* Invalidate all caches so we must read from DRAM */
+ __bdk_dram_flush_to_mem(address);
+ BDK_DCACHE_INVALIDATE;
+
+ for (uint64_t digit = 0; digit < 16; digit++)
+ {
+ uint64_t a = address + digit * 8;
+ uint64_t data = READ64(a);
+ uint64_t correct = REPEAT16(digit);
+ if (data != correct)
+ {
+ failures++;
+ __bdk_dram_report_error(a, data, correct, burst, -1);
+ }
+ }
+ }
+ return failures;
+}
+
+/**
+ * Write memory with a data bus pattern and check that it can be read correctly.
+ * The pattern is a sequence if 16 dwords created from the 16 hex digits repeated
+ * in each word.
+ *
+ * @param address Physical address to write. This must be cache line aligned. 128 bytes will be
+ * written starting at this address.
+ * @param bursts Number of time to repeat the write+read test to verify stability
+ *
+ * @return Number of errors, zero means success
+ */
+static int write_data_bus_burst(uint64_t address, int bursts)
+{
+ BDK_TRACE(DRAM_TEST, "[0x%016lx:0x%016lx] Writing incrementing digits\n",
+ address, address + 127);
+ /* Loop over the burst so people using a scope have time to capture
+ traces */
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ /* Fill a cache line with an incrementing pattern. Each nibble
+ in the 64bit word increments from 0 to 0xf */
+ for (uint64_t digit = 0; digit < 16; digit++)
+ WRITE64(address + digit * 8, REPEAT16(digit));
+ /* Force the cache line out to memory */
+ __bdk_dram_flush_to_mem(address);
+ }
+ return read_data_bus_burst(address, bursts);
+}
+
+/**
+ * Read back the pattern written by write_data_bus_walk() nad
+ * make sure it was stored properly.
+ *
+ * @param address Physical address to read. This must be cache line aligned.
+ * @param bursts Number of time to repeat the read test to verify stability
+ * @param pattern Pattern basis for writes. See
+ * write_data_bus_walk()
+ *
+ * @return Number of errors, zero means success
+ */
+static int read_data_bus_walk(uint64_t address, int burst, uint64_t pattern)
+{
+ int failures = 0;
+
+ /* Invalidate all caches so we must readfrom DRAM */
+ __bdk_dram_flush_to_mem(address);
+ BDK_DCACHE_INVALIDATE;
+
+ uint64_t correct = pattern;
+ for (uint64_t word = 0; word < 16; word++)
+ {
+ uint64_t a = address + word * 8;
+ uint64_t data = READ64(a);
+ if (data != correct)
+ {
+ failures++;
+ __bdk_dram_report_error(a, data, correct, burst, -1);
+ }
+ uint64_t tmp = correct >> 63; /* Save top bit */
+ correct <<= 1; /* Shift left one bit */
+ correct |= tmp; /* Restore the top bit as bit 0 */
+ }
+
+ return failures;
+}
+
+/**
+ * Write a pattern to a cache line, rotating it one bit for each DWORD. Read back
+ * the pattern and make sure it was stored properly. The input pattern is rotated
+ * left by one bit for each DWORD written.
+ *
+ * @param address Physical address to write. This must be cache line aligned. 128 bytes will be
+ * written starting at this address.
+ * @param bursts Number of time to repeat the write+read test to verify stability
+ * @param pattern Pattern basis
+ *
+ * @return Number of errors, zero means success
+ */
+static void write_data_bus_walk(uint64_t address, int burst, uint64_t pattern)
+{
+ BDK_TRACE(DRAM_TEST, "[0x%016lx:0x%016lx] Writing walking pattern 0x%016lx\n",
+ address, address + 127, pattern);
+
+ uint64_t a = address;
+ uint64_t d = pattern;
+
+ /* Fill a cache line with pattern. Each 64bit work will have the
+ pattern rotated left one bit */
+ for (uint64_t word = 0; word < 16; word++)
+ {
+ WRITE64(a, d);
+ a += 8;
+ uint64_t tmp = d >> 63; /* Save top bit */
+ d <<= 1; /* Shift left one bit */
+ d |= tmp; /* Restore the top bit as bit 0 */
+ }
+ /* Force the cache line out to memory */
+ __bdk_dram_flush_to_mem(address);
+}
+
+/**
+ * The goal of these tests are to toggle every DDR data pin, one at a time or in
+ * related groups, to isolate any short circuits between the data pins or open
+ * circuits where the pin is not connected to the DDR memory. A board which fails
+ * one of these tests has severe problems and will not be able to run any of the
+ * later test patterns.
+ *
+ * @param start_address
+ * Physical address of a cache line to
+ * use for the test. Only this cache line is
+ * written.
+ * @param end_address
+ * Top end of the address range. Currently unused
+ * @param bursts Number of time to repeats writes+reads to insure stability
+ *
+ * @return Number of errors, zero means success
+ */
+int __bdk_dram_test_mem_data_bus(uint64_t start_address, uint64_t end_address, int bursts)
+{
+ int failures = 0;
+
+ /* Incrementing pattern: 0x0 - 0xf in each nibble */
+ failures += write_data_bus_burst(start_address, bursts);
+
+ /* Walking ones. Run with 1, 2, and 3 bits walking */
+ for (int bits = 1; bits <= 3; bits++)
+ {
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ /* Each write_data_bus_walk() call write 16 dword, so step by 16 */
+ for (int i = 0; i < 64; i += 16)
+ {
+ uint64_t pattern = bdk_build_mask(bits) << i;
+ write_data_bus_walk(start_address + i*8, burst, pattern);
+ }
+ /* Each read_data_bus_walk() call write 16 dword, so step by 16 */
+ for (int i = 0; i < 64; i += 16)
+ {
+ uint64_t pattern = bdk_build_mask(bits) << i;
+ failures += read_data_bus_walk(start_address + i*8, burst, pattern);
+ }
+ }
+ }
+
+ /* Walking zeros. Run with 1, 2, and 3 bits walking */
+ for (int bits = 1; bits <= 3; bits++)
+ {
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ /* Each write_data_bus_walk() call write 16 dword, so step by 16 */
+ for (int i = 0; i < 64; i += 16)
+ {
+ uint64_t pattern = ~(bdk_build_mask(bits) << i);
+ write_data_bus_walk(start_address + i*8, burst, pattern);
+ }
+ /* Each read_data_bus_walk() call write 16 dword, so step by 16 */
+ for (int i = 0; i < 64; i += 16)
+ {
+ uint64_t pattern = ~(bdk_build_mask(bits) << i);
+ failures += read_data_bus_walk(start_address + i*8, burst, pattern);
+ }
+ }
+ }
+ return failures;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c
new file mode 100644
index 0000000000..46e205dd80
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c
@@ -0,0 +1,103 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include "bdk.h"
+
+/* Used for all memory reads/writes related to the test */
+#define READ64(address) __bdk_dram_read64(address)
+#define WRITE64(address, data) __bdk_dram_write64(address, data)
+
+/**
+ * Fast scan test. This test is meant to find gross errors caused by read/write
+ * level failing on a single rank or dimm. The idea is to scan through all of
+ * memory in large steps. The large steps hit each rank multiple times, but not
+ * every byte. If the whole rank has errors, his should find it quickly. This test
+ * is suitable for an alive test during early boot.
+ *
+ * @param area Starting physical address
+ * @param max_address
+ * Ending physical address, exclusive
+ * @param bursts Burst to run
+ *
+ * @return Number of errors
+ */
+int __bdk_dram_test_fast_scan(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ const uint64_t step = 0x10008; /* The 8 is so we walk through cache lines too */
+ const uint64_t pattern1 = 0xaaaaaaaaaaaaaaaa;
+ const uint64_t pattern2 = 0x5555555555555555;
+
+ /* Walk through the region incrementing our offset by STEP */
+ uint64_t a = area;
+ while (a + 16 <= max_address)
+ {
+ WRITE64(a, pattern1);
+ WRITE64(a+8, pattern2);
+ __bdk_dram_flush_to_mem_range(a, a + 16);
+ a += step;
+ }
+
+ /* Read back, checking the writes */
+ a = area;
+ while (a + 16 <= max_address)
+ {
+ /* Prefetch 3 ahead for better performance */
+ uint64_t pre = a + step * 2;
+ if (pre + 16 < max_address)
+ BDK_PREFETCH(pre, 0);
+ /* Check pattern 1 */
+ uint64_t data1 = READ64(a);
+ if (bdk_unlikely(data1 != pattern1))
+ {
+ failures++;
+ __bdk_dram_report_error(a, data1, pattern1, 0, -1);
+ }
+ /* Check pattern 2 */
+ uint64_t data2 = READ64(a+8);
+ if (bdk_unlikely(data2 != pattern2))
+ {
+ failures++;
+ __bdk_dram_report_error(a+8, data2, pattern2, 0, -1);
+ }
+ a += step;
+ }
+
+ return failures;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c
new file mode 100644
index 0000000000..e6c4b57721
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c
@@ -0,0 +1,829 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include "bdk.h"
+
+// choose prediction-based algorithms for mem_xor and mem_rows tests
+#define USE_PREDICTION_CODE_VERSIONS 1 // change to 0 to go back to the original versions
+
+/* Used for all memory reads/writes related to the test */
+#define READ64(address) __bdk_dram_read64(address)
+#define WRITE64(address, data) __bdk_dram_write64(address, data)
+
+/**
+ * Fill an memory area with the address of each 64-bit word in the area.
+ * Reread to confirm the pattern.
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area (exclusive)
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_self_addr(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ /* Write the pattern to memory. Each location receives the address
+ * of the location.
+ */
+ for (uint64_t address = area; address < max_address; address+=8)
+ WRITE64(address, address);
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Read by ascending address the written memory and confirm that it
+ * has the expected data pattern.
+ */
+ for (uint64_t address = area; address < max_address; )
+ {
+ if (address + 256 < max_address)
+ BDK_PREFETCH(address + 256, 0);
+ for (int i=0; i<16; i++)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != address))
+ failures += __bdk_dram_retry_failure(burst, address, data, address);
+ address += 8;
+ }
+ }
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Read by descending address the written memory and confirm that it
+ * has the expected data pattern.
+ */
+ uint64_t end = max_address - sizeof(uint64_t);
+ for (uint64_t address = end; address >= area; )
+ {
+ if (address - 256 >= area)
+ BDK_PREFETCH(address - 256, 0);
+ for (int i=0; i<16; i++)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != address))
+ failures += __bdk_dram_retry_failure(burst, address, data, address);
+ address -= 8;
+ }
+ }
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Read from random addresses within the memory area.
+ */
+ uint64_t probes = (max_address - area) / 128;
+ uint64_t address_ahead1 = area;
+ uint64_t address_ahead2 = area;
+ for (uint64_t i = 0; i < probes; i++)
+ {
+ /* Create a pipeline of prefetches:
+ address = address read this loop
+ address_ahead1 = prefetch started last loop
+ address_ahead2 = prefetch started this loop */
+ uint64_t address = address_ahead1;
+ address_ahead1 = address_ahead2;
+ address_ahead2 = bdk_rng_get_random64() % (max_address - area);
+ address_ahead2 += area;
+ address_ahead2 &= -8;
+ BDK_PREFETCH(address_ahead2, 0);
+
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != address))
+ failures += __bdk_dram_retry_failure(burst, address, data, address);
+ }
+ }
+ return failures;
+}
+
+/**
+ * Write "pattern" and its compliment to memory and verify it was written
+ * properly. Memory will be filled with DWORDs pattern, ~pattern, pattern,
+ * ~pattern, ...
+ *
+ * @param area Start physical address of memory
+ * @param max_address
+ * End of physical memory region
+ * @param pattern Pattern to write
+ * @param passes Number of time to repeat the test
+ *
+ * @return Number of errors, zero on success
+ */
+static uint32_t test_mem_pattern(uint64_t area, uint64_t max_address, uint64_t pattern,
+ int passes)
+{
+ int failures = 0;
+
+ for (int pass = 0; pass < passes; pass++)
+ {
+ if (pass & 0x1)
+ pattern = ~pattern;
+
+ for (uint64_t address = area; address < max_address; address += 8)
+ WRITE64(address, pattern);
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Read the written memory and confirm that it has the expected
+ * data pattern.
+ */
+ uint64_t address = area;
+ while (address < max_address)
+ {
+ if (address + 256 < max_address)
+ BDK_PREFETCH(address + 256, 0);
+ for (int i=0; i<16; i++)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != pattern))
+ failures += __bdk_dram_retry_failure(pass, address, data, pattern);
+ address += 8;
+ }
+ }
+ }
+ return failures;
+}
+
+/**
+ * Walking zero written to memory, left shift
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_leftwalk0(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ for (uint64_t pattern = 1; pattern != 0; pattern = pattern << 1)
+ failures += test_mem_pattern(area, max_address, ~pattern, 1);
+ }
+ return failures;
+}
+
+/**
+ * Walking one written to memory, left shift
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_leftwalk1(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ for (uint64_t pattern = 1; pattern != 0; pattern = pattern << 1)
+ failures += test_mem_pattern(area, max_address, pattern, 1);
+ }
+ return failures;
+}
+
+/**
+ * Walking zero written to memory, right shift
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_rightwalk0(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ for (uint64_t pattern = 1ull << 63; pattern != 0; pattern = pattern >> 1)
+ failures += test_mem_pattern(area, max_address, ~pattern, 1);
+ }
+ return failures;
+}
+
+/**
+ * Walking one written to memory, right shift
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_rightwalk1(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ for (uint64_t pattern = 1ull<<63; pattern != 0; pattern = pattern >> 1)
+ failures += test_mem_pattern(area, max_address, pattern, 1);
+ }
+ return failures;
+}
+
+/**
+ * Apply the March C- testing algorithm to the given memory area.
+ * 1) Write "pattern" to memory.
+ * 2) Verify "pattern" and write "~pattern".
+ * 3) Verify "~pattern" and write "pattern".
+ * 4) Verify "pattern" and write "~pattern".
+ * 5) Verify "~pattern" and write "pattern".
+ * 6) Verify "pattern".
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param pattern
+ *
+ * @return Number of errors, zero on success
+ */
+static int test_mem_march_c(uint64_t area, uint64_t max_address, uint64_t pattern)
+{
+ int failures = 0;
+
+ /* Pass 1 ascending addresses, fill memory with pattern. */
+ BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase1, address incrementing, pattern 0x%016lx\n", area, max_address-1, pattern);
+ for (uint64_t address = area; address < max_address; address += 8)
+ WRITE64(address, pattern);
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Pass 2: ascending addresses, read pattern and write ~pattern */
+ BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase2, address incrementing, pattern 0x%016lx\n", area, max_address-1, ~pattern);
+ for (uint64_t address = area; address < max_address; address += 8)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != pattern))
+ failures += __bdk_dram_retry_failure(1, address, data, pattern);
+ WRITE64(address, ~pattern);
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Pass 3: ascending addresses, read ~pattern and write pattern. */
+ BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase3, address incrementing, pattern 0x%016lx\n", area, max_address-1, pattern);
+ for (uint64_t address = area; address < max_address; address += 8)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != ~pattern))
+ failures += __bdk_dram_retry_failure(1, address, data, ~pattern);
+ WRITE64(address, pattern);
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Pass 4: descending addresses, read pattern and write ~pattern. */
+ BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase4, address decrementing, pattern 0x%016lx\n", area, max_address-1, ~pattern);
+ uint64_t end = max_address - sizeof(uint64_t);
+ for (uint64_t address = end; address >= area; address -= 8)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != pattern))
+ failures += __bdk_dram_retry_failure(1, address, data, pattern);
+ WRITE64(address, ~pattern);
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Pass 5: descending addresses, read ~pattern and write pattern. */
+ BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase5, address decrementing, pattern 0x%016lx\n", area, max_address-1, pattern);
+ for (uint64_t address = end; address >= area; address -= 8)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != ~pattern))
+ failures += __bdk_dram_retry_failure(1, address, data, ~pattern);
+ WRITE64(address, pattern);
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Pass 6: ascending addresses, read pattern. */
+ BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase6, address incrementing\n", area, max_address-1);
+ for (uint64_t address = area; address < max_address; address += 8)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != pattern))
+ failures += __bdk_dram_retry_failure(1, address, data, pattern);
+ }
+
+ return failures;
+}
+
+/**
+ * Use test_mem_march_c() with a all ones pattern
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_solid(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ for (int burst = 0; burst < bursts; burst++)
+ failures += test_mem_march_c(area, max_address, -1);
+ return failures;
+}
+
+/**
+ * Use test_mem_march_c() with a 0x55 pattern
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_checkerboard(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ for (int burst = 0; burst < bursts; burst++)
+ failures += test_mem_march_c(area, max_address, 0x5555555555555555L);
+ return failures;
+}
+
+/**
+ * Write a pseudo random pattern to memory and verify it
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_random(uint64_t area, uint64_t max_address, int bursts)
+{
+ /* This constant is used to increment the pattern after every DWORD. This
+ makes only the first DWORD truly random, but saves us processing
+ power generating the random values */
+ const uint64_t INC = 0x1010101010101010ULL;
+
+ int failures = 0;
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ const uint64_t init_pattern = bdk_rng_get_random64();
+ uint64_t pattern = init_pattern;
+
+ /* Write the pattern to memory. Each location receives the address
+ * of the location. A second write pass is needed to force all of
+ * the cached memory out to the DDR.
+ */
+ for (uint64_t address = area; address < max_address; address += 8)
+ {
+ WRITE64(address, pattern);
+ pattern += INC;
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Read the written memory and confirm that it has the expected
+ * data pattern.
+ */
+ pattern = init_pattern;
+ for (uint64_t address = area; address < max_address; address += 8)
+ {
+ uint64_t data = READ64(address);
+ if (bdk_unlikely(data != pattern))
+ failures += __bdk_dram_retry_failure(burst, address, data, pattern);
+ pattern += INC;
+ }
+ }
+ return failures;
+}
+
+#if !USE_PREDICTION_CODE_VERSIONS
+/**
+ * test_mem_xor
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of time to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_xor(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+
+ uint64_t extent = max_address - area;
+ uint64_t count = (extent / sizeof(uint64_t)) / 2;
+
+ /* Fill both halves of the memory area with identical randomized data.
+ */
+ uint64_t address1 = area;
+ uint64_t address2 = area + count * sizeof(uint64_t);
+
+ uint64_t pattern = bdk_rng_get_random64();
+
+ for (uint64_t j = 0; j < count; j++)
+ {
+ uint64_t p = pattern * address1;
+ WRITE64(address1, p);
+ WRITE64(address2, p);
+ address1 += 8;
+ address2 += 8;
+ }
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Make a series of passes over the memory areas. */
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ /* XOR the data with a random value, applying the change to both
+ * memory areas.
+ */
+ address1 = area;
+ address2 = area + count * sizeof(uint64_t);
+
+ pattern = bdk_rng_get_random64();
+
+ for (uint64_t j = 0; j < count; j++)
+ {
+ if ((address1 & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1, BDK_CACHE_LINE_SIZE);
+ if ((address2 & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address2, BDK_CACHE_LINE_SIZE);
+ WRITE64(address1, READ64(address1) ^ pattern);
+ WRITE64(address2, READ64(address2) ^ pattern);
+ address1 += 8;
+ address2 += 8;
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Look for differences in the areas. If there is a mismatch, reset
+ * both memory locations with the same pattern. Failing to do so
+ * means that on all subsequent passes the pair of locations remain
+ * out of sync giving spurious errors.
+ */
+ address1 = area;
+ address2 = area + count * sizeof(uint64_t);
+ for (uint64_t j = 0; j < count; j++)
+ {
+ if ((address1 & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1, BDK_CACHE_LINE_SIZE);
+ if ((address2 & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address2, BDK_CACHE_LINE_SIZE);
+ uint64_t d1 = READ64(address1);
+ uint64_t d2 = READ64(address2);
+ if (bdk_unlikely(d1 != d2))
+ {
+ failures += __bdk_dram_retry_failure2(burst, address1, d1, address2, d2);
+
+ // Synchronize the two areas, adjusting for the error.
+ WRITE64(address1, d2);
+ WRITE64(address2, d2);
+ }
+ address1 += 8;
+ address2 += 8;
+ }
+ }
+ return failures;
+}
+
+/**
+ * test_mem_rows
+ *
+ * Write a pattern of alternating 64-bit words of all one bits and then all 0
+ * bits. This pattern generates the maximum amount of simultaneous switching
+ * activity on the memory channels. Each pass flips the pattern with words
+ * going from all ones to all zeros and vice versa.
+ *
+ * @param area Start of the physical memory area
+ * @param max_address
+ * End of the physical memory area
+ * @param bursts Number of times to repeat the test over the entire area
+ *
+ * @return Number of errors, zero on success
+ */
+int __bdk_dram_test_mem_rows(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ uint64_t pattern = 0x0;
+ uint64_t extent = (max_address - area);
+ uint64_t count = (extent / 2) / sizeof(uint64_t); // in terms of 64bit words
+
+ /* Fill both halves of the memory area with identical data pattern. Odd
+ * address 64-bit words get the pattern, while even address words get the
+ * inverted pattern.
+ */
+ uint64_t address1 = area;
+ uint64_t address2 = area + count * sizeof(uint64_t);
+
+ for (uint64_t j = 0; j < (count / 2); j++)
+ {
+ WRITE64(address1, pattern);
+ WRITE64(address2, pattern);
+ address1 += 8;
+ address2 += 8;
+ WRITE64(address1, ~pattern);
+ WRITE64(address2, ~pattern);
+ address1 += 8;
+ address2 += 8;
+ }
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Make a series of passes over the memory areas. */
+ for (int burst = 0; burst < bursts; burst++)
+ {
+ /* Invert the data, applying the change to both memory areas. Thus on
+ * alternate passes, the data flips from 0 to 1 and vice versa.
+ */
+ address1 = area;
+ address2 = area + count * sizeof(uint64_t);
+ for (uint64_t j = 0; j < count; j++)
+ {
+ WRITE64(address1, ~READ64(address1));
+ WRITE64(address2, ~READ64(address2));
+ address1 += 8;
+ address2 += 8;
+ }
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Look for differences in the areas. If there is a mismatch, reset
+ * both memory locations with the same pattern. Failing to do so
+ * means that on all subsequent passes the pair of locations remain
+ * out of sync giving spurious errors.
+ */
+ address1 = area;
+ address2 = area + count * sizeof(uint64_t);
+ for (uint64_t j = 0; j < count; j++)
+ {
+ uint64_t d1 = READ64(address1);
+ uint64_t d2 = READ64(address2);
+ if (bdk_unlikely(d1 != d2))
+ {
+ failures += __bdk_dram_retry_failure2(burst, address1, d1, address2, d2);
+
+ // Synchronize the two areas, adjusting for the error.
+ WRITE64(address1, d2);
+ WRITE64(address2, d2);
+ }
+ address1 += 8;
+ address2 += 8;
+ }
+ }
+ return failures;
+}
+#endif /* !USE_PREDICTION_CODE_VERSIONS */
+
+#if USE_PREDICTION_CODE_VERSIONS
+//////////////////////////// this is the new code...
+
+int __bdk_dram_test_mem_xor(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+ int burst;
+
+ uint64_t extent = max_address - area;
+ uint64_t count = (extent / sizeof(uint64_t)) / 2;
+ uint64_t offset = count * sizeof(uint64_t);
+ uint64_t area2 = area + offset;
+
+ /* Fill both halves of the memory area with identical randomized data.
+ */
+ uint64_t address1 = area;
+
+ uint64_t pattern1 = bdk_rng_get_random64();
+ uint64_t pattern2 = 0;
+ uint64_t this_pattern;
+
+ uint64_t p;
+ uint64_t d1, d2;
+
+ // move the multiplies outside the loop
+ uint64_t pbase = address1 * pattern1;
+ uint64_t pincr = 8 * pattern1;
+ uint64_t ppred;
+
+ p = pbase;
+ while (address1 < area2)
+ {
+ WRITE64(address1 , p);
+ WRITE64(address1 + offset, p);
+ address1 += 8;
+ p += pincr;
+ }
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Make a series of passes over the memory areas. */
+ for (burst = 0; burst < bursts; burst++)
+ {
+ /* XOR the data with a random value, applying the change to both
+ * memory areas.
+ */
+ address1 = area;
+
+ this_pattern = bdk_rng_get_random64();
+ pattern2 ^= this_pattern;
+
+ while (address1 < area2)
+ {
+#if 1
+ if ((address1 & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1, BDK_CACHE_LINE_SIZE);
+ if (((address1 + offset) & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1 + offset, BDK_CACHE_LINE_SIZE);
+#endif
+ WRITE64(address1 , READ64(address1 ) ^ this_pattern);
+ WRITE64(address1 + offset, READ64(address1 + offset) ^ this_pattern);
+ address1 += 8;
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Look for differences from the expected pattern in both areas.
+ * If there is a mismatch, reset the appropriate memory location
+ * with the correct pattern. Failing to do so
+ * means that on all subsequent passes the erroring locations
+ * will be out of sync, giving spurious errors.
+ */
+ address1 = area;
+ ppred = pbase;
+
+ while (address1 < area2)
+ {
+#if 1
+ if ((address1 & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1, BDK_CACHE_LINE_SIZE);
+ if (((address1 + offset) & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1 + offset, BDK_CACHE_LINE_SIZE);
+#endif
+ d1 = READ64(address1 );
+ d2 = READ64(address1 + offset);
+
+ p = ppred ^ pattern2;
+
+ if (bdk_unlikely(d1 != p)) {
+ failures += __bdk_dram_retry_failure(burst, address1, d1, p);
+ // Synchronize the area, adjusting for the error.
+ //WRITE64(address1, p); // retries should do this
+ }
+ if (bdk_unlikely(d2 != p)) {
+ failures += __bdk_dram_retry_failure(burst, address1 + offset, d2, p);
+ // Synchronize the area, adjusting for the error.
+ //WRITE64(address1 + offset, p); // retries should do this
+ }
+
+ address1 += 8;
+ ppred += pincr;
+
+ } /* while (address1 < area2) */
+ } /* for (int burst = 0; burst < bursts; burst++) */
+ return failures;
+}
+
+//////////////// this is the new code...
+
+int __bdk_dram_test_mem_rows(uint64_t area, uint64_t max_address, int bursts)
+{
+ int failures = 0;
+
+ uint64_t pattern1 = 0x0;
+ uint64_t extent = (max_address - area);
+ uint64_t count = (extent / 2) / sizeof(uint64_t); // in terms of 64bit words
+ uint64_t offset = count * sizeof(uint64_t);
+ uint64_t area2 = area + offset;
+ uint64_t pattern2;
+ uint64_t d1, d2;
+ int burst;
+
+ /* Fill both halves of the memory area with identical data pattern. Odd
+ * address 64-bit words get the pattern, while even address words get the
+ * inverted pattern.
+ */
+ uint64_t address1 = area;
+
+ pattern2 = pattern1; // start with original pattern
+
+ while (address1 < area2)
+ {
+ WRITE64(address1 , pattern2);
+ WRITE64(address1 + offset, pattern2);
+ address1 += 8;
+ pattern2 = ~pattern2; // flip for next slots
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Make a series of passes over the memory areas. */
+ for (burst = 0; burst < bursts; burst++)
+ {
+ /* Invert the data, applying the change to both memory areas. Thus on
+ * alternate passes, the data flips from 0 to 1 and vice versa.
+ */
+ address1 = area;
+
+ while (address1 < area2)
+ {
+ if ((address1 & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1 , BDK_CACHE_LINE_SIZE);
+ if (((address1 + offset) & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1 + offset, BDK_CACHE_LINE_SIZE);
+
+ WRITE64(address1 , ~READ64(address1 ));
+ WRITE64(address1 + offset, ~READ64(address1 + offset));
+ address1 += 8;
+ }
+
+ __bdk_dram_flush_to_mem_range(area, max_address);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Look for differences in the areas. If there is a mismatch, reset
+ * both memory locations with the same pattern. Failing to do so
+ * means that on all subsequent passes the pair of locations remain
+ * out of sync giving spurious errors.
+ */
+ address1 = area;
+ pattern1 = ~pattern1; // flip the starting pattern to match above loop
+ pattern2 = pattern1; // slots have been flipped by the above loop
+
+ while (address1 < area2)
+ {
+ if ((address1 & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1 , BDK_CACHE_LINE_SIZE);
+ if (((address1 + offset) & BDK_CACHE_LINE_MASK) == 0)
+ BDK_PREFETCH(address1 + offset, BDK_CACHE_LINE_SIZE);
+
+ d1 = READ64(address1 );
+ d2 = READ64(address1 + offset);
+
+ if (bdk_unlikely(d1 != pattern2)) {
+ failures += __bdk_dram_retry_failure(burst, address1, d1, pattern2);
+ // Synchronize the area, adjusting for the error.
+ //WRITE64(address1, pattern2); // retries should do this
+ }
+ if (bdk_unlikely(d2 != pattern2)) {
+ failures += __bdk_dram_retry_failure(burst, address1 + offset, d2, pattern2);
+ // Synchronize the two areas, adjusting for the error.
+ //WRITE64(address1 + offset, pattern2); // retries should do this
+ }
+
+ address1 += 8;
+ pattern2 = ~pattern2; // flip for next pair of slots
+ }
+ }
+ return failures;
+}
+#endif /* USE_PREDICTION_CODE_VERSIONS */
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c
new file mode 100644
index 0000000000..53137502fc
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c
@@ -0,0 +1,860 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-gti.h"
+#include "libbdk-arch/bdk-csrs-ocx.h"
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(DRAM_TEST);
+
+#define MAX_ERRORS_TO_REPORT 50
+#define RETRY_LIMIT 1000
+
+typedef struct
+{
+ const char * name; /* Friendly name for the test */
+ __bdk_dram_test_t test_func; /* Function to call */
+ int bursts; /* Bursts parameter to pass to the test */
+ int max_cores; /* Maximum number of cores the test should be run on in parallel. Zero means all */
+} dram_test_info_t;
+
+static const dram_test_info_t TEST_INFO[] = {
+ /* Name, Test function, Bursts, Max Cores */
+ { "Data Bus", __bdk_dram_test_mem_data_bus, 8, 1},
+ { "Address Bus", __bdk_dram_test_mem_address_bus, 0, 1},
+ { "Marching Rows", __bdk_dram_test_mem_rows, 16, 0},
+ { "Random Data", __bdk_dram_test_mem_random, 32, 0},
+ { "Random XOR (32 Burst)", __bdk_dram_test_mem_xor, 32, 0},
+ { "Self Address", __bdk_dram_test_mem_self_addr, 1, 0},
+ { "March C- Solid Bits", __bdk_dram_test_mem_solid, 1, 0},
+ { "March C- Checkerboard", __bdk_dram_test_mem_checkerboard, 1, 0},
+ { "Walking Ones Left", __bdk_dram_test_mem_leftwalk1, 1, 0},
+ { "Walking Ones Right", __bdk_dram_test_mem_rightwalk1, 1, 0},
+ { "Walking Zeros Left", __bdk_dram_test_mem_leftwalk0, 1, 0},
+ { "Walking Zeros Right", __bdk_dram_test_mem_rightwalk0, 1, 0},
+ { "Random XOR (224 Burst)", __bdk_dram_test_mem_xor, 224, 0},
+ { "Fast Scan", __bdk_dram_test_fast_scan, 0, 0},
+ { NULL, NULL, 0, 0}
+};
+
+/* These variables count the number of ECC errors. They should only be accessed atomically */
+int64_t __bdk_dram_ecc_single_bit_errors[BDK_MAX_MEM_CHANS];
+int64_t __bdk_dram_ecc_double_bit_errors[BDK_MAX_MEM_CHANS];
+
+static int64_t dram_test_thread_done;
+static int64_t dram_test_thread_errors;
+static uint64_t dram_test_thread_start;
+static uint64_t dram_test_thread_end;
+static uint64_t dram_test_thread_size;
+
+/**
+ * Force the memory at the pointer location to be written to memory and evicted
+ * from L2. L1 will be unaffected.
+ *
+ * @param address Physical memory location
+ */
+void __bdk_dram_flush_to_mem(uint64_t address)
+{
+ BDK_MB;
+ /* The DRAM code doesn't use the normal bdk_phys_to_ptr() because of the
+ NULL check in it. This greatly slows down the memory tests */
+ char *ptr = (void*)address;
+ BDK_CACHE_WBI_L2(ptr);
+}
+
+/**
+ * Force a memory region to be written to DRAM and evicted from L2
+ *
+ * @param area Start of the region
+ * @param max_address
+ * End of the region (exclusive)
+ */
+void __bdk_dram_flush_to_mem_range(uint64_t area, uint64_t max_address)
+{
+ /* The DRAM code doesn't use the normal bdk_phys_to_ptr() because of the
+ NULL check in it. This greatly slows down the memory tests */
+ char *ptr = (void*)area;
+ char *end = (void*)max_address;
+ BDK_MB;
+ while (ptr < end)
+ {
+ BDK_CACHE_WBI_L2(ptr);
+ ptr += 128;
+ }
+}
+
+/**
+ * Convert a test enumeration into a string
+ *
+ * @param test Test to convert
+ *
+ * @return String for display
+ */
+const char *bdk_dram_get_test_name(int test)
+{
+ if (test < (int)(sizeof(TEST_INFO) / sizeof(TEST_INFO[0])))
+ return TEST_INFO[test].name;
+ else
+ return NULL;
+}
+
+static bdk_dram_test_flags_t dram_test_flags; // FIXME: Don't use global
+/**
+ * This function is run as a thread to perform memory tests over multiple cores.
+ * Each thread gets a section of memory to work on, which is controlled by global
+ * variables at the beginning of this file.
+ *
+ * @param arg Number of the region we should check
+ * @param arg1 Pointer the the test_info structure
+ */
+static void dram_test_thread(int arg, void *arg1)
+{
+ const dram_test_info_t *test_info = arg1;
+ const int bursts = test_info->bursts;
+ const int range_number = arg;
+
+ /* Figure out our work memory range.
+ *
+ * Note start_address and end_address just provide the physical offset
+ * portion of the address and do not have the node bits set. This is
+ * to simplify address checks and calculations. Later, when about to run
+ * the memory test, the routines adds in the node bits to form the final
+ * addresses.
+ */
+ uint64_t start_address = dram_test_thread_start + dram_test_thread_size * range_number;
+ uint64_t end_address = start_address + dram_test_thread_size;
+ if (end_address > dram_test_thread_end)
+ end_address = dram_test_thread_end;
+
+ bdk_node_t test_node = bdk_numa_local();
+ if (dram_test_flags & BDK_DRAM_TEST_USE_CCPI)
+ test_node ^= 1;
+ /* Insert the node part of the address */
+ start_address = bdk_numa_get_address(test_node, start_address);
+ end_address = bdk_numa_get_address(test_node, end_address);
+ /* Test the region */
+ BDK_TRACE(DRAM_TEST, " Node %d, core %d, Testing [0x%011lx:0x%011lx]\n",
+ bdk_numa_local(), bdk_get_core_num() & 127, start_address, end_address - 1);
+ test_info->test_func(start_address, end_address, bursts);
+
+ /* Report that we're done */
+ BDK_TRACE(DRAM_TEST, "Thread %d on node %d done with memory test\n", range_number, bdk_numa_local());
+ bdk_atomic_add64_nosync(&dram_test_thread_done, 1);
+}
+
+/**
+ * Run the memory test.
+ *
+ * @param test_info
+ * @param start_address
+ * Physical address to start at
+ * @param length Length of memory block
+ * @param flags Flags to control memory test options. Zero defaults to testing all
+ * node with statistics and progress output.
+ *
+ * @return Number of errors found. Zero is success. Negative means the test
+ * did not run due to some other failure.
+ */
+static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start_address,
+ uint64_t length, bdk_dram_test_flags_t flags)
+{
+ /* Figure out the addess of the byte one off the top of memory */
+ uint64_t max_address = bdk_dram_get_size_mbytes(bdk_numa_local());
+ BDK_TRACE(DRAM_TEST, "DRAM available per node: %lu MB\n", max_address);
+ max_address <<= 20;
+
+ /* Make sure we have enough */
+ if (max_address < (16<<20))
+ {
+ bdk_error("DRAM size is too small\n");
+ return -1;
+ }
+
+ /* Make sure the amount is sane */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ if (max_address > (1ull << 40)) /* 40 bits in CN8XXX */
+ max_address = 1ull << 40;
+ }
+ else
+ {
+ if (max_address > (1ull << 43)) /* 43 bits in CN9XXX */
+ max_address = 1ull << 43;
+ }
+ BDK_TRACE(DRAM_TEST, "DRAM max address: 0x%011lx\n", max_address-1);
+
+ /* Make sure the start address is lower than the top of memory */
+ if (start_address >= max_address)
+ {
+ bdk_error("Start address is larger than the amount of memory: 0x%011lx versus 0x%011lx\n",
+ start_address, max_address);
+ return -1;
+ }
+ if (length == (uint64_t)-1)
+ length = max_address - start_address;
+
+ /* Final range checks */
+ uint64_t end_address = start_address + length;
+ if (end_address > max_address)
+ {
+ end_address = max_address;
+ length = end_address - start_address;
+ }
+ if (length == 0)
+ return 0;
+
+ /* Ready to run the test. Figure out how many cores we need */
+ int max_cores = test_info->max_cores;
+ int total_cores_all_nodes = max_cores;
+
+ /* Figure out the number of cores available in the system */
+ if (max_cores == 0)
+ {
+ max_cores += bdk_get_num_running_cores(bdk_numa_local());
+ /* Calculate the total number of cores being used. The per node number
+ is confusing to people */
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ if (flags & (1 << node))
+ {
+ if (flags & BDK_DRAM_TEST_USE_CCPI)
+ total_cores_all_nodes += bdk_get_num_running_cores(node ^ 1);
+ else
+ total_cores_all_nodes += bdk_get_num_running_cores(node);
+ }
+ }
+ if (!(flags & BDK_DRAM_TEST_NO_BANNERS))
+ printf("Starting Test \"%s\" for [0x%011lx:0x%011lx] using %d core(s)\n",
+ test_info->name, start_address, end_address - 1, total_cores_all_nodes);
+
+ /* Remember the LMC perf counters for stats after the test */
+ uint64_t start_dram_dclk[BDK_NUMA_MAX_NODES][4];
+ uint64_t start_dram_ops[BDK_NUMA_MAX_NODES][4];
+ uint64_t stop_dram_dclk[BDK_NUMA_MAX_NODES][4];
+ uint64_t stop_dram_ops[BDK_NUMA_MAX_NODES][4];
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & (1 << node))
+ {
+ const int num_dram_controllers = __bdk_dram_get_num_lmc(node);
+ for (int i = 0; i < num_dram_controllers; i++)
+ {
+ start_dram_dclk[node][i] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(i));
+ start_dram_ops[node][i] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(i));
+ }
+ }
+ }
+ /* Remember the CCPI link counters for stats after the test */
+ uint64_t start_ccpi_data[BDK_NUMA_MAX_NODES][3];
+ uint64_t start_ccpi_idle[BDK_NUMA_MAX_NODES][3];
+ uint64_t start_ccpi_err[BDK_NUMA_MAX_NODES][3];
+ uint64_t stop_ccpi_data[BDK_NUMA_MAX_NODES][3];
+ uint64_t stop_ccpi_idle[BDK_NUMA_MAX_NODES][3];
+ uint64_t stop_ccpi_err[BDK_NUMA_MAX_NODES][3];
+ if (!bdk_numa_is_only_one())
+ {
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & (1 << node))
+ {
+ for (int link = 0; link < 3; link++)
+ {
+ start_ccpi_data[node][link] = BDK_CSR_READ(node, BDK_OCX_TLKX_STAT_DATA_CNT(link));
+ start_ccpi_idle[node][link] = BDK_CSR_READ(node, BDK_OCX_TLKX_STAT_IDLE_CNT(link));
+ start_ccpi_err[node][link] = BDK_CSR_READ(node, BDK_OCX_TLKX_STAT_ERR_CNT(link));
+ }
+ }
+ }
+ }
+
+ /* WARNING: This code assumes the same memory range is being tested on
+ all nodes. The same number of cores are used on each node to test
+ its local memory */
+ uint64_t work_address = start_address;
+ dram_test_flags = flags;
+ bdk_atomic_set64(&dram_test_thread_errors, 0);
+ while ((work_address < end_address) && ((dram_test_thread_errors == 0) || (flags & BDK_DRAM_TEST_NO_STOP_ERROR)))
+ {
+ /* Check at most MAX_CHUNK_SIZE across each iteration. We only report
+ progress between chunks, so keep them reasonably small */
+ const uint64_t MAX_CHUNK_SIZE = 1ull << 28; /* 256MB */
+ uint64_t size = end_address - work_address;
+ if (size > MAX_CHUNK_SIZE)
+ size = MAX_CHUNK_SIZE;
+
+ /* Divide memory evenly between the cores. Round the size up so that
+ all memory is covered. The last core may have slightly less memory to
+ test */
+ uint64_t thread_size = (size + (max_cores - 1)) / max_cores;
+ thread_size += 127;
+ thread_size &= -128;
+ dram_test_thread_start = work_address;
+ dram_test_thread_end = work_address + size;
+ dram_test_thread_size = thread_size;
+ BDK_WMB;
+
+ /* Poke the watchdog */
+ BDK_CSR_WRITE(bdk_numa_local(), BDK_GTI_CWD_POKEX(0), 0);
+
+ /* disable progress output when batch mode is ON */
+ if (!(flags & BDK_DRAM_TEST_NO_PROGRESS)) {
+
+ /* Report progress percentage */
+ int percent_x10 = (work_address - start_address) * 1000 / (end_address - start_address);
+ printf(" %3d.%d%% complete, testing [0x%011lx:0x%011lx]\r",
+ percent_x10 / 10, percent_x10 % 10, work_address, work_address + size - 1);
+ fflush(stdout);
+ }
+
+ work_address += size;
+
+ /* Start threads for all the cores */
+ int total_count = 0;
+ bdk_atomic_set64(&dram_test_thread_done, 0);
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & (1 << node))
+ {
+ const int num_cores = bdk_get_num_cores(node);
+ int per_node = 0;
+ for (int core = 0; core < num_cores; core++)
+ {
+ if (per_node >= max_cores)
+ break;
+ int run_node = (flags & BDK_DRAM_TEST_USE_CCPI) ? node ^ 1 : node;
+ BDK_TRACE(DRAM_TEST, "Starting thread %d on node %d for memory test\n", per_node, node);
+ if (bdk_thread_create(run_node, 0, dram_test_thread, per_node, (void *)test_info, 0))
+ {
+ bdk_error("Failed to create thread %d for memory test on node %d\n", per_node, node);
+ }
+ else
+ {
+ per_node++;
+ total_count++;
+ }
+ }
+ }
+ }
+
+#if 0
+ /* Wait for threads to finish */
+ while (bdk_atomic_get64(&dram_test_thread_done) < total_count)
+ bdk_thread_yield();
+#else
+#define TIMEOUT_SECS 30 // FIXME: long enough so multicore RXOR 224 should not print out
+ /* Wait for threads to finish, with progress */
+ int cur_count;
+ uint64_t cur_time;
+ uint64_t period = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) * TIMEOUT_SECS; // FIXME?
+ uint64_t timeout = bdk_clock_get_count(BDK_CLOCK_TIME) + period;
+ do {
+ bdk_thread_yield();
+ cur_count = bdk_atomic_get64(&dram_test_thread_done);
+ cur_time = bdk_clock_get_count(BDK_CLOCK_TIME);
+ if (cur_time >= timeout) {
+ BDK_TRACE(DRAM_TEST, "N%d: Waiting for %d cores\n",
+ bdk_numa_local(), total_count - cur_count);
+ timeout = cur_time + period;
+ }
+ } while (cur_count < total_count);
+#endif
+ }
+
+ /* Get the DRAM perf counters */
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & (1 << node))
+ {
+ const int num_dram_controllers = __bdk_dram_get_num_lmc(node);
+ for (int i = 0; i < num_dram_controllers; i++)
+ {
+ stop_dram_dclk[node][i] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(i));
+ stop_dram_ops[node][i] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(i));
+ }
+ }
+ }
+ /* Get the CCPI link counters */
+ if (!bdk_numa_is_only_one())
+ {
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & (1 << node))
+ {
+ for (int link = 0; link < 3; link++)
+ {
+ stop_ccpi_data[node][link] = BDK_CSR_READ(node, BDK_OCX_TLKX_STAT_DATA_CNT(link));
+ stop_ccpi_idle[node][link] = BDK_CSR_READ(node, BDK_OCX_TLKX_STAT_IDLE_CNT(link));
+ stop_ccpi_err[node][link] = BDK_CSR_READ(node, BDK_OCX_TLKX_STAT_ERR_CNT(link));
+ }
+ }
+ }
+ }
+
+ /* disable progress output when batch mode is ON */
+ if (!(flags & BDK_DRAM_TEST_NO_PROGRESS)) {
+
+ /* Report progress percentage as complete */
+ printf(" %3d.%d%% complete, testing [0x%011lx:0x%011lx]\n",
+ 100, 0, start_address, end_address - 1);
+ fflush(stdout);
+ }
+
+ if (!(flags & BDK_DRAM_TEST_NO_STATS))
+ {
+ /* Display LMC load */
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & (1 << node))
+ {
+ const int num_dram_controllers = __bdk_dram_get_num_lmc(node);
+ for (int i = 0; i < num_dram_controllers; i++)
+ {
+ uint64_t ops = stop_dram_ops[node][i] - start_dram_ops[node][i];
+ uint64_t dclk = stop_dram_dclk[node][i] - start_dram_dclk[node][i];
+ if (dclk == 0)
+ dclk = 1;
+ uint64_t percent_x10 = ops * 1000 / dclk;
+ printf(" Node %d, LMC%d: ops %lu, cycles %lu, used %lu.%lu%%\n",
+ node, i, ops, dclk, percent_x10 / 10, percent_x10 % 10);
+ }
+ }
+ }
+ if (flags & BDK_DRAM_TEST_USE_CCPI)
+ {
+ /* Display CCPI load */
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & (1 << node))
+ {
+ for (int link = 0; link < 3; link++)
+ {
+ uint64_t busy = stop_ccpi_data[node][link] - start_ccpi_data[node][link];
+ busy += stop_ccpi_err[node][link] - start_ccpi_err[node][link];
+ uint64_t total = stop_ccpi_idle[node][link] - start_ccpi_idle[node][link];
+ total += busy;
+ if (total == 0)
+ continue;
+ uint64_t percent_x10 = busy * 1000 / total;
+ printf(" Node %d, CCPI%d: busy %lu, total %lu, used %lu.%lu%%\n",
+ node, link, busy, total, percent_x10 / 10, percent_x10 % 10);
+ }
+ }
+ }
+ }
+ }
+ return dram_test_thread_errors;
+}
+
+/**
+ * Perform a memory test.
+ *
+ * @param test Test type to run
+ * @param start_address
+ * Physical address to start at
+ * @param length Length of memory block
+ * @param flags Flags to control memory test options. Zero defaults to testing all
+ * node with statistics and progress output.
+ *
+ * @return Number of errors found. Zero is success. Negative means the test
+ * did not run due to some other failure.
+ */
+int bdk_dram_test(int test, uint64_t start_address, uint64_t length, bdk_dram_test_flags_t flags)
+{
+ /* These limits are arbitrary. They just make sure we aren't doing something
+ silly, like test a non cache line aligned memory region */
+ if (start_address & 0xffff)
+ {
+ bdk_error("DRAM test start address must be aligned on a 64KB boundary\n");
+ return -1;
+ }
+ if (length & 0xffff)
+ {
+ bdk_error("DRAM test length must be a multiple of 64KB\n");
+ return -1;
+ }
+
+ const char *name = bdk_dram_get_test_name(test);
+ if (name == NULL)
+ {
+ bdk_error("Invalid DRAM test number %d\n", test);
+ return -1;
+ }
+
+ /* If no nodes are selected assume the user meant all nodes */
+ if ((flags & (BDK_DRAM_TEST_NODE0 | BDK_DRAM_TEST_NODE1 | BDK_DRAM_TEST_NODE2 | BDK_DRAM_TEST_NODE3)) == 0)
+ flags |= BDK_DRAM_TEST_NODE0 | BDK_DRAM_TEST_NODE1 | BDK_DRAM_TEST_NODE2 | BDK_DRAM_TEST_NODE3;
+
+ /* Remove nodes from the flags that don't exist */
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & BDK_DRAM_TEST_USE_CCPI)
+ {
+ if (!bdk_numa_exists(node ^ 1))
+ flags &= ~(1 << node);
+ }
+ else
+ {
+ if (!bdk_numa_exists(node))
+ flags &= ~(1 << node);
+ }
+ }
+
+
+ /* Make sure the start address is higher that the BDK's active range */
+ uint64_t top_of_bdk = bdk_dram_get_top_of_bdk();
+ if (start_address < top_of_bdk)
+ start_address = top_of_bdk;
+
+ /* Clear ECC error counters before starting the test */
+ for (int chan = 0; chan < BDK_MAX_MEM_CHANS; chan++) {
+ bdk_atomic_set64(&__bdk_dram_ecc_single_bit_errors[chan], 0);
+ bdk_atomic_set64(&__bdk_dram_ecc_double_bit_errors[chan], 0);
+ }
+
+ /* Make sure at least one core from each node is running */
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (flags & (1<<node))
+ {
+ int use_node = (flags & BDK_DRAM_TEST_USE_CCPI) ? node ^ 1 : node;
+ if (bdk_get_running_coremask(use_node) == 0)
+ bdk_init_cores(use_node, 1);
+ }
+ }
+
+ /* This returns any data compare errors found */
+ int errors = __bdk_dram_run_test(&TEST_INFO[test], start_address, length, flags);
+
+ /* Poll for any errors right now to make sure any ECC errors are reported */
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (bdk_numa_exists(node) && bdk_error_check)
+ bdk_error_check(node);
+ }
+
+ /* Check ECC error counters after the test */
+ int64_t ecc_single = 0;
+ int64_t ecc_double = 0;
+ int64_t ecc_single_errs[BDK_MAX_MEM_CHANS];
+ int64_t ecc_double_errs[BDK_MAX_MEM_CHANS];
+
+ for (int chan = 0; chan < BDK_MAX_MEM_CHANS; chan++) {
+ ecc_single += (ecc_single_errs[chan] = bdk_atomic_get64(&__bdk_dram_ecc_single_bit_errors[chan]));
+ ecc_double += (ecc_double_errs[chan] = bdk_atomic_get64(&__bdk_dram_ecc_double_bit_errors[chan]));
+ }
+
+ /* Always print any ECC errors */
+ if (ecc_single || ecc_double)
+ {
+ printf("Test \"%s\": ECC errors, %ld/%ld/%ld/%ld corrected, %ld/%ld/%ld/%ld uncorrected\n",
+ name,
+ ecc_single_errs[0], ecc_single_errs[1], ecc_single_errs[2], ecc_single_errs[3],
+ ecc_double_errs[0], ecc_double_errs[1], ecc_double_errs[2], ecc_double_errs[3]);
+ }
+ if (errors || ecc_double || ecc_single) {
+ printf("Test \"%s\": FAIL: %ld single, %ld double, %d compare errors\n",
+ name, ecc_single, ecc_double, errors);
+ }
+ else
+ BDK_TRACE(DRAM_TEST, "Test \"%s\": PASS\n", name);
+
+ return (errors + ecc_double + ecc_single);
+}
+
+/**
+ * Report a DRAM address in decoded format.
+ *
+ * @param address Physical address the error occurred at
+ *
+ */
+static void __bdk_dram_report_address_decode(uint64_t address, char *buffer, int len)
+{
+ int node, lmc, dimm, prank, lrank, bank, row, col;
+
+ bdk_dram_address_extract_info(address, &node, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
+
+ snprintf(buffer, len, "[0x%011lx] (N%d,LMC%d,DIMM%d,Rank%d/%d,Bank%02d,Row 0x%05x,Col 0x%04x)",
+ address, node, lmc, dimm, prank, lrank, bank, row, col);
+}
+
+/**
+ * Report a DRAM address in a new decoded format.
+ *
+ * @param address Physical address the error occurred at
+ * @param xor XOR of data read vs expected data
+ *
+ */
+static void __bdk_dram_report_address_decode_new(uint64_t address, uint64_t orig_xor, char *buffer, int len)
+{
+ int node, lmc, dimm, prank, lrank, bank, row, col;
+
+ int byte = 8; // means no byte-lanes in error, should not happen
+ uint64_t bits, print_bits = 0;
+ uint64_t xor = orig_xor;
+
+ // find the byte-lane(s) with errors
+ for (int i = 0; i < 8; i++) {
+ bits = xor & 0xffULL;
+ xor >>= 8;
+ if (bits) {
+ if (byte != 8) {
+ byte = 9; // means more than 1 byte-lane was present
+ print_bits = orig_xor; // print the full original
+ break; // quit now
+ } else {
+ byte = i; // keep checking
+ print_bits = bits;
+ }
+ }
+ }
+
+ bdk_dram_address_extract_info(address, &node, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
+
+ snprintf(buffer, len, "N%d.LMC%d: CMP byte %d xor 0x%02lx (DIMM%d,Rank%d/%d,Bank%02d,Row 0x%05x,Col 0x%04x)[0x%011lx]",
+ node, lmc, byte, print_bits, dimm, prank, lrank, bank, row, col, address);
+}
+
+/**
+ * Report a DRAM error. Errors are not shown after MAX_ERRORS_TO_REPORT is
+ * exceeded. Used when a single address is involved in the failure.
+ *
+ * @param address Physical address the error occurred at
+ * @param data Data read from memory
+ * @param correct Correct data
+ * @param burst Which burst this is from, informational only
+ * @param fails -1 for no retries done, >= 0 number of failures during retries
+ *
+ * @return Zero if a message was logged, non-zero if the error limit has been reached
+ */
+void __bdk_dram_report_error(uint64_t address, uint64_t data, uint64_t correct, int burst, int fails)
+{
+ char buffer[128];
+ char failbuf[32];
+ int64_t errors = bdk_atomic_fetch_and_add64(&dram_test_thread_errors, 1);
+ uint64_t xor = data ^ correct;
+
+ if (errors < MAX_ERRORS_TO_REPORT)
+ {
+ if (fails < 0) {
+ snprintf(failbuf, sizeof(failbuf), " ");
+ } else {
+ int percent_x10 = fails * 1000 / RETRY_LIMIT;
+ snprintf(failbuf, sizeof(failbuf), ", retries failed %3d.%d%%",
+ percent_x10 / 10, percent_x10 % 10);
+ }
+
+ __bdk_dram_report_address_decode_new(address, xor, buffer, sizeof(buffer));
+ bdk_error("%s%s\n", buffer, failbuf);
+
+ if (errors == MAX_ERRORS_TO_REPORT-1)
+ bdk_error("No further DRAM errors will be reported\n");
+ }
+ return;
+}
+
+/**
+ * Report a DRAM error. Errors are not shown after MAX_ERRORS_TO_REPORT is
+ * exceeded. Used when two addresses might be involved in the failure.
+ *
+ * @param address1 First address involved in the failure
+ * @param data1 Data from the first address
+ * @param address2 Second address involved in the failure
+ * @param data2 Data from second address
+ * @param burst Which burst this is from, informational only
+ * @param fails -1 for no retries done, >= 0 number of failures during retries
+ *
+ * @return Zero if a message was logged, non-zero if the error limit has been reached
+ */
+void __bdk_dram_report_error2(uint64_t address1, uint64_t data1, uint64_t address2, uint64_t data2,
+ int burst, int fails)
+{
+ int64_t errors = bdk_atomic_fetch_and_add64(&dram_test_thread_errors, 1);
+ if (errors < MAX_ERRORS_TO_REPORT)
+ {
+ char buffer1[80], buffer2[80];
+ char failbuf[32];
+
+ if (fails < 0) {
+ snprintf(failbuf, sizeof(failbuf), " ");
+ } else {
+ snprintf(failbuf, sizeof(failbuf), ", retried %d failed %d", RETRY_LIMIT, fails);
+ }
+ __bdk_dram_report_address_decode(address1, buffer1, sizeof(buffer1));
+ __bdk_dram_report_address_decode(address2, buffer2, sizeof(buffer2));
+
+ bdk_error("compare: data1: 0x%016lx, xor: 0x%016lx%s\n"
+ " %s\n %s\n",
+ data1, data1 ^ data2, failbuf,
+ buffer1, buffer2);
+
+ if (errors == MAX_ERRORS_TO_REPORT-1)
+ bdk_error("No further DRAM errors will be reported\n");
+ }
+ return;
+}
+
+/* Report the circumstances of a failure and try re-reading the memory
+ * location to see if the error is transient or permanent.
+ *
+ * Note: re-reading requires using evicting addresses
+ */
+int __bdk_dram_retry_failure(int burst, uint64_t address, uint64_t data, uint64_t expected)
+{
+ int refail = 0;
+
+ // bypass the retries if we are already over the limit...
+ if (bdk_atomic_get64(&dram_test_thread_errors) < MAX_ERRORS_TO_REPORT) {
+
+ /* Try re-reading the memory location. A transient error may fail
+ * on one read and work on another. Keep on retrying even when a
+ * read succeeds.
+ */
+ for (int i = 0; i < RETRY_LIMIT; i++) {
+
+ __bdk_dram_flush_to_mem(address);
+ BDK_DCACHE_INVALIDATE;
+
+ uint64_t new = __bdk_dram_read64(address);
+
+ if (new != expected) {
+ refail++;
+ }
+ }
+ } else
+ refail = -1;
+
+ // this will increment the errors always, but maybe not print...
+ __bdk_dram_report_error(address, data, expected, burst, refail);
+
+ return 1;
+}
+
+/**
+ * retry_failure2
+ *
+ * @param burst
+ * @param address1
+ * @param address2
+ */
+int __bdk_dram_retry_failure2(int burst, uint64_t address1, uint64_t data1, uint64_t address2, uint64_t data2)
+{
+ int refail = 0;
+
+ // bypass the retries if we are already over the limit...
+ if (bdk_atomic_get64(&dram_test_thread_errors) < MAX_ERRORS_TO_REPORT) {
+
+ for (int i = 0; i < RETRY_LIMIT; i++) {
+ __bdk_dram_flush_to_mem(address1);
+ __bdk_dram_flush_to_mem(address2);
+ BDK_DCACHE_INVALIDATE;
+
+ uint64_t d1 = __bdk_dram_read64(address1);
+ uint64_t d2 = __bdk_dram_read64(address2);
+
+ if (d1 != d2) {
+ refail++;
+ }
+ }
+ } else
+ refail = -1;
+
+ // this will increment the errors always, but maybe not print...
+ __bdk_dram_report_error2(address1, data1, address2, data2, burst, refail);
+
+ return 1;
+}
+
+/**
+ * Inject a DRAM error at a specific address in memory. The injection can either
+ * be a single bit inside the byte, or a double bit error in the ECC byte. Double
+ * bit errors may corrupt memory, causing software to crash. The corruption is
+ * written to memory and will continue to exist until the cache line is written
+ * again. After a call to this function, the BDK should report a ECC error. Double
+ * bit errors corrupt bits 0-1.
+ *
+ * @param address Physical address to corrupt. Any byte alignment is supported
+ * @param bit Bit to corrupt in the byte (0-7), or -1 to create a double bit fault in the ECC
+ * byte.
+ */
+void bdk_dram_test_inject_error(uint64_t address, int bit)
+{
+ uint64_t aligned_address = address & -16;
+ int corrupt_bit = -1;
+ if (bit >= 0)
+ corrupt_bit = (address & 0xf) * 8 + bit;
+
+ /* Extract the DRAM controller information */
+ int node, lmc, dimm, prank, lrank, bank, row, col;
+ bdk_dram_address_extract_info(address, &node, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
+
+ /* Read the current data */
+ uint64_t data = __bdk_dram_read64(aligned_address);
+
+ /* Program LMC to inject the error */
+ if ((corrupt_bit >= 0) && (corrupt_bit < 64))
+ BDK_CSR_WRITE(node, BDK_LMCX_CHAR_MASK0(lmc), 1ull << corrupt_bit);
+ else if (bit == -1)
+ BDK_CSR_WRITE(node, BDK_LMCX_CHAR_MASK0(lmc), 3);
+ else
+ BDK_CSR_WRITE(node, BDK_LMCX_CHAR_MASK0(lmc), 0);
+ if (corrupt_bit >= 64)
+ BDK_CSR_WRITE(node, BDK_LMCX_CHAR_MASK2(lmc), 1ull << (corrupt_bit - 64));
+ else
+ BDK_CSR_WRITE(node, BDK_LMCX_CHAR_MASK2(lmc), 0);
+ BDK_CSR_MODIFY(c, node, BDK_LMCX_ECC_PARITY_TEST(lmc),
+ c.s.ecc_corrupt_idx = (address & 0x7f) >> 4;
+ c.s.ecc_corrupt_ena = 1);
+ BDK_CSR_READ(node, BDK_LMCX_ECC_PARITY_TEST(lmc));
+
+ /* Perform a write and push it to DRAM. This creates the error */
+ __bdk_dram_write64(aligned_address, data);
+ __bdk_dram_flush_to_mem(aligned_address);
+
+ /* Disable error injection */
+ BDK_CSR_MODIFY(c, node, BDK_LMCX_ECC_PARITY_TEST(lmc),
+ c.s.ecc_corrupt_ena = 0);
+ BDK_CSR_READ(node, BDK_LMCX_ECC_PARITY_TEST(lmc));
+ BDK_CSR_WRITE(node, BDK_LMCX_CHAR_MASK0(lmc), 0);
+ BDK_CSR_WRITE(node, BDK_LMCX_CHAR_MASK2(lmc), 0);
+
+ /* Read back the data, which should now cause an error */
+ printf("Loading the injected error address 0x%lx, node=%d, lmc=%d, dimm=%d, rank=%d/%d, bank=%d, row=%d, col=%d\n",
+ address, node, lmc, dimm, prank, lrank, bank, row, col);
+ __bdk_dram_read64(aligned_address);
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c b/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c
new file mode 100644
index 0000000000..8394ad8c5e
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c
@@ -0,0 +1,124 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-pccpf.h"
+#include "libbdk-arch/bdk-csrs-rnm.h"
+
+BDK_REQUIRE_DEFINE(RNM);
+
+/**
+ * Reads 8 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+uint8_t bdk_rng_get_random8(void)
+{
+
+ return bdk_read64_uint8(bdk_numa_get_address(bdk_numa_local(), BDK_RNM_RANDOM));
+}
+
+/**
+ * Reads 16 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+uint16_t bdk_rng_get_random16(void)
+{
+ return bdk_read64_uint16(bdk_numa_get_address(bdk_numa_local(), BDK_RNM_RANDOM));
+}
+
+/**
+ * Reads 32 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+uint32_t bdk_rng_get_random32(void)
+{
+ return bdk_read64_uint32(bdk_numa_get_address(bdk_numa_local(), BDK_RNM_RANDOM));
+}
+
+/**
+ * Reads 64 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+uint64_t bdk_rng_get_random64(void)
+{
+ return bdk_read64_uint64(bdk_numa_get_address(bdk_numa_local(), BDK_RNM_RANDOM));
+}
+
+/**
+ * The RNM probe function
+ *
+ * @param device RNM to probe
+ *
+ * @return Zero on success, negative on failure
+ */
+static int probe(bdk_device_t *device)
+{
+ bdk_device_rename(device, "N%d.RNM%d", device->node, device->instance);
+ return 0;
+}
+
+/**
+ * RNM init() function
+ *
+ * @param device RNM to initialize
+ *
+ * @return Zero on success, negative on failure
+ */
+static int init(bdk_device_t *device)
+{
+ BDK_BAR_MODIFY(c, device, BDK_RNM_CTL_STATUS,
+ c.s.ent_en = 1;
+ c.s.rng_en = 1);
+ /* Read back after enable so we know it is done. Needed on t88 pass 2.0 emulator */
+ BDK_BAR_READ(device, BDK_RNM_CTL_STATUS);
+ /* Errata (RNM-22528) First consecutive reads to RNM_RANDOM return same
+ value. Before using the random entropy, read RNM_RANDOM at least once
+ and discard the data */
+ bdk_rng_get_random64();
+ return 0;
+}
+
+bdk_driver_t __bdk_driver_rnm = {
+ .id = (BDK_PCC_PROD_E_GEN << 24) | BDK_PCC_VENDOR_E_CAVIUM | (BDK_PCC_DEV_IDL_E_RNM << 16),
+ .probe = probe,
+ .init = init,
+};
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c
new file mode 100644
index 0000000000..f81285dffd
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c
@@ -0,0 +1,221 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-gti.h"
+#include "libbdk-arch/bdk-csrs-ocx.h"
+
+/**
+ * Called in __bdk_init to setup the global timer
+ */
+void bdk_clock_setup(bdk_node_t node)
+{
+ const bdk_node_t local_node = bdk_numa_local();
+
+ /* Check if the counter was already setup */
+ BDK_CSR_INIT(cntcr, node, BDK_GTI_CC_CNTCR);
+ if (cntcr.s.en)
+ return;
+
+ /* Configure GTI to tick at BDK_GTI_RATE */
+ uint64_t sclk = bdk_clock_get_rate(node, BDK_CLOCK_SCLK);
+ uint64_t inc = (BDK_GTI_RATE << 32) / sclk;
+ BDK_CSR_WRITE(node, BDK_GTI_CC_CNTRATE, inc);
+ BDK_CSR_WRITE(node, BDK_GTI_CTL_CNTFRQ, BDK_GTI_RATE);
+ cntcr.s.en = 1;
+ if (node != local_node)
+ {
+ /* Synchronize with local node. Very simple set of counter, will be
+ off a little */
+ BDK_CSR_WRITE(node, BDK_GTI_CC_CNTCV, bdk_clock_get_count(BDK_CLOCK_TIME));
+ }
+ /* Enable the counter */
+ BDK_CSR_WRITE(node, BDK_GTI_CC_CNTCR, cntcr.u);
+ BDK_CSR_READ(node, BDK_GTI_CC_CNTCR);
+
+ if (node != local_node)
+ {
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ {
+ /* Assume the delay in each direction is the same, sync the counters */
+ int64_t local1 = bdk_clock_get_count(BDK_CLOCK_TIME);
+ int64_t remote = BDK_CSR_READ(node, BDK_GTI_CC_CNTCV);
+ int64_t local2 = bdk_clock_get_count(BDK_CLOCK_TIME);
+ int64_t expected = (local1 + local2) / 2;
+ BDK_CSR_WRITE(node, BDK_GTI_CC_CNTADD, expected - remote);
+ BDK_TRACE(INIT, "N%d.GTI: Clock synchronization with master\n"
+ " expected: %ld, remote %ld\n"
+ " Counter correction: %ld\n",
+ node, expected, remote, expected - remote);
+ }
+ else
+ {
+ /* Due to errata TBD, we need to use OCX_PP_CMD to write
+ GTI_CC_CNTMB in order for timestamps to update. These constants
+ are the addresses we need for both local and remote GTI_CC_CNTMB */
+ const uint64_t LOCAL_GTI_CC_CNTMB = bdk_numa_get_address(local_node, BDK_GTI_CC_CNTMB);
+ const uint64_t REMOTE_GTI_CC_CNTMB = bdk_numa_get_address(node, BDK_GTI_CC_CNTMB);
+ /* Build partial OCX_PP_CMD command used for writes. Address will
+ be filled later */
+ BDK_CSR_DEFINE(pp_cmd, BDK_OCX_PP_CMD);
+ pp_cmd.u = 0;
+ pp_cmd.s.wr_mask = 0xff;
+
+ const int NUM_AVERAGE = 16; /* Choose a power of two to avoid division */
+ int64_t local_to_remote_sum = 0;
+ int64_t local_to_remote_min = 1000000;
+ int64_t local_to_remote_max = -1000000;
+ int64_t remote_to_local_sum = 0;
+ int64_t remote_to_local_min = 1000000;
+ int64_t remote_to_local_max = -1000000;
+ for (int loop = 0; loop < NUM_AVERAGE; loop++)
+ {
+ /* Perform a write to the remote GTI_CC_CNTMB to cause timestamp
+ update. We don't care about the value actually written */
+ pp_cmd.s.addr = REMOTE_GTI_CC_CNTMB;
+ BDK_CSR_WRITE(local_node, BDK_OCX_PP_CMD, pp_cmd.u);
+ BDK_CSR_READ(local_node, BDK_OCX_PP_CMD);
+
+ int64_t remote = BDK_CSR_READ(node, BDK_GTI_CC_CNTMBTS);
+ int64_t local = BDK_CSR_READ(local_node, BDK_GTI_CC_CNTMBTS);
+ int64_t delta = remote - local;
+
+ local_to_remote_sum += delta;
+ if (delta < local_to_remote_min)
+ local_to_remote_min = delta;
+ if (delta > local_to_remote_max)
+ local_to_remote_max = delta;
+
+ /* Perform a write to the local GTI_CC_CNTMB to cause timestamp
+ update. We don't care about the value actually written */
+ pp_cmd.s.addr = LOCAL_GTI_CC_CNTMB;
+ BDK_CSR_WRITE(node, BDK_OCX_PP_CMD, pp_cmd.u);
+ BDK_CSR_READ(node, BDK_OCX_PP_CMD);
+
+ remote = BDK_CSR_READ(node, BDK_GTI_CC_CNTMBTS);
+ local = BDK_CSR_READ(local_node, BDK_GTI_CC_CNTMBTS);
+ delta = local - remote;
+
+ remote_to_local_sum += delta;
+ if (delta < remote_to_local_min)
+ remote_to_local_min = delta;
+ if (delta > remote_to_local_max)
+ remote_to_local_max = delta;
+ }
+ /* Calculate average, rounding to nearest */
+ int64_t local_to_remote = (local_to_remote_sum + NUM_AVERAGE/2) / NUM_AVERAGE;
+ int64_t remote_to_local = (remote_to_local_sum + NUM_AVERAGE/2) / NUM_AVERAGE;
+ /* Calculate remote node offset */
+ int64_t remote_offset = (remote_to_local - local_to_remote) / 2;
+ BDK_CSR_WRITE(node, BDK_GTI_CC_CNTADD, remote_offset);
+ BDK_TRACE(INIT, "N%d.GTI: Clock synchronization with master\n"
+ " local -> remote: min %ld, avg %ld, max %ld\n"
+ " remote -> local: min %ld, avg %ld, max %ld\n"
+ " Counter correction: %ld\n",
+ node,
+ local_to_remote_min, local_to_remote, local_to_remote_max,
+ remote_to_local_min, remote_to_local, remote_to_local_max,
+ remote_offset);
+ }
+ }
+}
+
+/**
+ * Get cycle count based on the clock type.
+ *
+ * @param clock - Enumeration of the clock type.
+ * @return - Get the number of cycles executed so far.
+ */
+uint64_t __bdk_clock_get_count_slow(bdk_clock_t clock)
+{
+ bdk_node_t node = bdk_numa_local();
+ BDK_CSR_INIT(rst_boot, node, BDK_RST_BOOT);
+ if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
+ {
+ /* Force RCLK and SCLK to be 1GHz on emulator */
+ rst_boot.s.c_mul = 20;
+ rst_boot.s.pnr_mul = 20;
+ }
+ uint64_t ref_cntr = BDK_CSR_READ(node, BDK_RST_REF_CNTR);
+ switch(clock)
+ {
+ case BDK_CLOCK_TIME:
+ return 0; /* Handled in fast path */
+ case BDK_CLOCK_MAIN_REF:
+ return ref_cntr;
+ case BDK_CLOCK_RCLK:
+ return ref_cntr * rst_boot.s.c_mul;
+ case BDK_CLOCK_SCLK:
+ return ref_cntr * rst_boot.s.pnr_mul;
+ }
+ return 0;
+}
+
+/**
+ * Get clock rate based on the clock type.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special value.
+ * @param clock - Enumeration of the clock type.
+ * @return - return the clock rate.
+ */
+uint64_t __bdk_clock_get_rate_slow(bdk_node_t node, bdk_clock_t clock)
+{
+ /* This is currently defined to be 50Mhz */
+ const uint64_t REF_CLOCK = 50000000;
+
+ BDK_CSR_INIT(mio_rst_boot, node, BDK_RST_BOOT);
+ if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
+ {
+ /* Force RCLK and SCLK to be 1GHz on emulator */
+ mio_rst_boot.s.c_mul = 20;
+ mio_rst_boot.s.pnr_mul = 20;
+ }
+ switch (clock)
+ {
+ case BDK_CLOCK_TIME:
+ return BDK_GTI_RATE; /* Programed as part of setup */
+ case BDK_CLOCK_MAIN_REF:
+ return REF_CLOCK;
+ case BDK_CLOCK_RCLK:
+ return REF_CLOCK * mio_rst_boot.s.c_mul;
+ case BDK_CLOCK_SCLK:
+ return REF_CLOCK * mio_rst_boot.s.pnr_mul;
+ }
+ return 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c
new file mode 100644
index 0000000000..d4b412d439
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c
@@ -0,0 +1,1946 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <stdarg.h>
+#include <libfdt.h>
+#include <unistd.h>
+#include "libbdk-arch/bdk-csrs-mio_fus.h"
+#include "libbdk-arch/bdk-csrs-fus.h"
+
+/* Set this define to override the trace the BDK uses. This is most
+ useful with trusted boot when the setup menus are not able to
+ configure the trace level. A possible example: */
+//#define BDK_TRACE_OVERRIDE (1ull << BDK_TRACE_ENABLE_INIT)
+#define BDK_TRACE_OVERRIDE 0
+
+typedef enum
+{
+ BDK_CONFIG_TYPE_INT,
+ BDK_CONFIG_TYPE_STR,
+ BDK_CONFIG_TYPE_STR_LIST,
+ BDK_CONFIG_TYPE_BINARY,
+} bdk_config_type_t;
+
+typedef struct
+{
+ const char *format; /* Printf style format string to create the item name */
+ const bdk_config_type_t ctype;/* Type of this item */
+ int64_t default_value; /* Default value when no present. String defaults are cast to pointers from this */
+ const int64_t min_value;/* Minimum valid value for INT parameters. Unused for Strings */
+ const int64_t max_value;/* Maximum valid value for INT parameters. Unused for Strings */
+} bdk_config_info_t;
+
+static void config_set_defaults(void);
+
+/* Tracing defaults to the level specified here before config files are loaded */
+uint64_t bdk_trace_enables = BDK_TRACE_OVERRIDE;
+
+/* Global variables that contain the config inside a FDT */
+static void *config_fdt;
+static int config_node;
+
+static bdk_config_info_t config_info[__BDK_CONFIG_END] = {
+ /* Board manufacturing data */
+ [BDK_CONFIG_BOARD_MODEL] = {
+ .format = "BOARD-MODEL", /* String, No parameters */
+ .ctype = BDK_CONFIG_TYPE_STR,
+ .default_value = (long)"unknown",
+ },
+ [BDK_CONFIG_BOARD_REVISION] = {
+ .format = "BOARD-REVISION", /* String, No parameters */
+ .ctype = BDK_CONFIG_TYPE_STR,
+ .default_value = (long)"unknown",
+ },
+ [BDK_CONFIG_BOARD_SERIAL] = {
+ .format = "BOARD-SERIAL", /* String, No parameters */
+ .ctype = BDK_CONFIG_TYPE_STR,
+ .default_value = (long)"unknown",
+ },
+ [BDK_CONFIG_MAC_ADDRESS] = {
+ .format = "BOARD-MAC-ADDRESS", /* Int64, No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Default updated at boot based on fuses */
+ .min_value = 0,
+ .max_value = 0xffffffffffffll,
+ },
+ [BDK_CONFIG_MAC_ADDRESS_NUM] = {
+ .format = "BOARD-MAC-ADDRESS-NUM", /* Int, No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 256,
+ },
+ [BDK_CONFIG_MAC_ADDRESS_NUM_OVERRIDE] = {
+ .format = "BOARD-MAC-ADDRESS-NUM-OVERRIDE", /* Int, No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1,
+ .min_value = -1,
+ .max_value = 256,
+ },
+
+ /* Board generic */
+ [BDK_CONFIG_BMC_TWSI] = {
+ .format = "BMC-TWSI", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* TWSI bus number, -1 = disabled */
+ .min_value = -1,
+ .max_value = 5,
+ },
+ [BDK_CONFIG_WATCHDOG_TIMEOUT] = {
+ .format = "WATCHDOG-TIMEOUT", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* 0 = disabled */
+ .min_value = 0,
+ .max_value = 10000,
+ },
+ [BDK_CONFIG_TWSI_WRITE] = {
+ .format = "TWSI-WRITE", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_STR_LIST,
+ },
+ [BDK_CONFIG_MDIO_WRITE] = {
+ .format = "MDIO-WRITE", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_STR_LIST,
+ },
+
+ /* Board wiring of network ports and PHYs */
+ [BDK_CONFIG_PHY_ADDRESS] = {
+ .format = "PHY-ADDRESS.N%d.BGX%d.P%d", /* Parameters: Node, Interface, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Default to no PHY */
+ .min_value = -1,
+ .max_value = 0xffffffffll,
+ },
+ [BDK_CONFIG_BGX_ENABLE] = {
+ .format = "BGX-ENABLE.N%d.BGX%d.P%d", /* Parameters: Node, BGX, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 1, /* 0 = disable, 1 = enable */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ /* Non-EBB specific SFF8104 board and alike */
+ [BDK_CONFIG_AQUANTIA_PHY] = {
+ .format = "AQUANTIA-PHY.N%d.BGX%d.P%d", /*Parameters: Node, BGX, Port */
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xffffll,
+ },
+
+
+ /* BDK Configuration params */
+ [BDK_CONFIG_VERSION] = {
+ .format = "BDK-VERSION",
+ .ctype = BDK_CONFIG_TYPE_STR,
+ },
+ [BDK_CONFIG_NUM_PACKET_BUFFERS] = {
+ .format = "BDK-NUM-PACKET-BUFFERS",
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Default updated at boot */
+ .min_value = 0,
+ .max_value = 1000000,
+ },
+ [BDK_CONFIG_PACKET_BUFFER_SIZE] = {
+ .format = "BDK-PACKET-BUFFER-SIZE",
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 1024, /* bytes */
+ .min_value = 128,
+ .max_value = 32768,
+ },
+ [BDK_CONFIG_SHOW_LINK_STATUS] = {
+ .format = "BDK-SHOW-LINK-STATUS",
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 1, /* 0 = off, 1 = on */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_COREMASK] = {
+ .format = "BDK-COREMASK",
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Zero means all cores */
+ .min_value = 0,
+ .max_value = 0xffffffffffffll,
+ },
+ [BDK_CONFIG_BOOT_MENU_TIMEOUT] = {
+ .format = "BDK-BOOT-MENU-TIMEOUT",
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 10, /* seconds */
+ .min_value = 0,
+ .max_value = 300,
+ },
+ [BDK_CONFIG_BOOT_PATH_OPTION] = {
+ .format = "BDK-BOOT-PATH-OPTION",
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* 0 = normal, 1 = diagnostics */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_BOOT_NEXT_STAGE] = {
+ .format = "BDK-CONFIG-BOOT-NEXT-STAGE-%s",
+ .ctype = BDK_CONFIG_TYPE_STR,
+ },
+ [BDK_CONFIG_TRACE] = {
+ .format = "BDK-CONFIG-TRACE",
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* bitmask */
+ .min_value = 0,
+ .max_value = 0x7fffffffffffffffull,
+ },
+
+ /* Chip feature items */
+ [BDK_CONFIG_MULTI_NODE] = {
+ .format = "MULTI-NODE", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 2, /* 2 = Auto */
+ .min_value = 0,
+ .max_value = 2,
+ },
+ [BDK_CONFIG_PCIE_EA] = {
+ .format = "PCIE-ENHANCED-ALLOCATION", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 1, /* 1 = EA supported, 0 = EA not supported */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_PCIE_ORDERING] = {
+ .format = "PCIE-ORDERING", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* 1 = Wait for commit, 0 = Don't wait for commit */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_PCIE_PRESET_REQUEST_VECTOR] = {
+ .format = "PCIE-PRESET-REQUEST-VECTOR.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0x593, /* Value for PCIERCX_CFG554[PRV] */
+ .min_value = 0,
+ .max_value = 0xffff,
+ },
+ [BDK_CONFIG_PCIE_WIDTH] = {
+ .format = "PCIE-WIDTH.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Width override for PCIe links */
+ .min_value = -1,
+ .max_value = 16,
+ },
+ [BDK_CONFIG_PCIE_PHYSICAL_SLOT] = {
+ .format = "PCIE-PHYSICAL-SLOT.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Define which physical slot we connect to on the board */
+ .min_value = -1,
+ .max_value = 8191,
+ },
+ [BDK_CONFIG_PCIE_FLASH] = {
+ .format = "PCIE-FLASH.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_STR_LIST,
+ },
+ [BDK_CONFIG_CCPI_LANE_REVERSE] = {
+ .format = "CCPI-LANE-REVERSE", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* 0 = No forced lane reversal, 1 = forced lane reversal */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_CHIP_SKU] = {
+ .format = "CHIP-SKU.NODE%d", /* Parameter: Node */
+ .ctype = BDK_CONFIG_TYPE_STR,
+ .default_value = (long)"TBD",
+ },
+ [BDK_CONFIG_CHIP_SERIAL] = {
+ .format = "CHIP-SERIAL.NODE%d", /* Parameter: Node */
+ .ctype = BDK_CONFIG_TYPE_STR,
+ .default_value = (long)"TBD",
+ },
+ [BDK_CONFIG_CHIP_UNIQUE_ID] = {
+ .format = "CHIP-UNIQUE-ID.NODE%d", /* Parameter: Node */
+ .ctype = BDK_CONFIG_TYPE_STR,
+ .default_value = (long)"TBD",
+ },
+
+ /* QLM related config */
+ [BDK_CONFIG_QLM_AUTO_CONFIG] = {
+ .format = "QLM-AUTO-CONFIG", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* 0 = off, 1 = on */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ /* SFF8104 related QLM config */
+ [BDK_CONFIG_QLM_DIP_AUTO_CONFIG] = {
+ .format = "QLM-DIP-AUTO-CONFIG", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* 0 = off, 1 = on */
+ .min_value = 0,
+ .max_value = 1,
+ },
+
+ [BDK_CONFIG_QLM_MODE] = {
+ .format = "QLM-MODE.N%d.QLM%d", /* Parameters: Node, QLM */
+ .ctype = BDK_CONFIG_TYPE_STR,
+ },
+ [BDK_CONFIG_QLM_FREQ] = {
+ .format = "QLM-FREQ.N%d.QLM%d", /* Parameters: Node, QLM */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Mhz */
+ .min_value = 0,
+ .max_value = 10312,
+ },
+ [BDK_CONFIG_QLM_CLK] = {
+ .format = "QLM-CLK.N%d.QLM%d", /* Parameters: Node, QLM */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 2, /* 2 = External */
+ .min_value = 0,
+ .max_value = 2,
+ },
+ [BDK_CONFIG_QLM_TUNING_TX_SWING] = {
+ .format = "QLM-TUNING-TX-SWING.N%d.QLM%d.LANE%d", /* Parameters: Node, QLM, Lane */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Default of no tuning */
+ .min_value = -1,
+ .max_value = 31,
+ },
+ [BDK_CONFIG_QLM_TUNING_TX_PREMPTAP] = {
+ .format = "QLM-TUNING-TX-PREMPTAP.N%d.QLM%d.LANE%d", /* Parameters: Node, QLM, Lane */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Default of no tuning */
+ .min_value = -1,
+ .max_value = 511,
+ },
+ [BDK_CONFIG_QLM_TUNING_TX_GAIN] = {
+ .format = "QLM-TUNING-TX-GAIN.N%d.QLM%d.LANE%d", /* Parameters: Node, QLM, Lane */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Default of no tuning */
+ .min_value = -1,
+ .max_value = 7,
+ },
+ [BDK_CONFIG_QLM_TUNING_TX_VBOOST] = {
+ .format = "QLM-TUNING-TX-VBOOST.N%d.QLM%d.LANE%d", /* Parameters: Node, QLM, Lane */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Default of no tuning */
+ .min_value = -1,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_QLM_CHANNEL_LOSS] = {
+ .format = "QLM-CHANNEL-LOSS.N%d.QLM%d", /* Parameters: Node, QLM */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Default will use Cavium defaults */
+ .min_value = -1,
+ .max_value = 40,
+ },
+
+ /* DRAM configuration options */
+ [BDK_CONFIG_DDR_SPEED] = {
+ .format = "DDR-SPEED.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* In MT/s */
+ .min_value = 0,
+ .max_value = 2400,
+ },
+ [BDK_CONFIG_DDR_ALT_REFCLK] = {
+ .format = "DDR-ALT-REFCLK.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Mhz */
+ .min_value = 0,
+ .max_value = 100,
+ },
+ [BDK_CONFIG_DDR_SPD_ADDR] = {
+ .format = "DDR-CONFIG-SPD-ADDR.DIMM%d.LMC%d.N%d", /* Parameters: DIMM, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xffff,
+ },
+ [BDK_CONFIG_DDR_SPD_DATA] = {
+ .format = "DDR-CONFIG-SPD-DATA.DIMM%d.LMC%d.N%d", /* Parameters: DIMM, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_BINARY,
+ },
+ [BDK_CONFIG_DDR_RANKS_DQX_CTL] = {
+ .format = "DDR-CONFIG-DQX-CTL.RANKS%d.DIMMS%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xf,
+ },
+ [BDK_CONFIG_DDR_RANKS_WODT_MASK] = {
+ .format = "DDR-CONFIG-WODT-MASK.RANKS%d.DIMMS%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xfffffff,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE1_PASR] = {
+ .format = "DDR-CONFIG-MODE1-PASR.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0x7,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE1_ASR] = {
+ .format = "DDR-CONFIG-MODE1-ASR.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE1_SRT] = {
+ .format = "DDR-CONFIG-MODE1-SRT.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR] = {
+ .format = "DDR-CONFIG-MODE1-RTT-WR.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT, // Split for extension bit
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0x7,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE1_DIC] = {
+ .format = "DDR-CONFIG-MODE1-DIC.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0x3,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE1_RTT_NOM] = {
+ .format = "DDR-CONFIG-MODE1-RTT-NOM.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0x7,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE1_DB_OUTPUT_IMPEDANCE] = {
+ .format = "DDR-CONFIG-MODE1-DB-OUTPUT-IMPEDANCE.RANKS%d.DIMMS%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT, // Not per RANK, only one
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0x7,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE2_RTT_PARK] = {
+ .format = "DDR-CONFIG-MODE2-RTT-PARK.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0x7,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE2_VREF_VALUE] = {
+ .format = "DDR-CONFIG-MODE2-VREF-VALUE.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0x3f,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE2_VREF_RANGE] = {
+ .format = "DDR-CONFIG-MODE2-VREF-RANGE.RANKS%d.DIMMS%d.RANK%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_RANKS_MODE2_VREFDQ_TRAIN_EN] = {
+ .format = "DDR-CONFIG-MODE2-VREFDQ-TRAIN-EN.RANKS%d.DIMMS%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT, // Not per RANK, only one
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+
+ [BDK_CONFIG_DDR_RANKS_RODT_CTL] = {
+ .format = "DDR-CONFIG-RODT-CTL.RANKS%d.DIMMS%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xf,
+ },
+ [BDK_CONFIG_DDR_RANKS_RODT_MASK] = {
+ .format = "DDR-CONFIG-RODT-MASK.RANKS%d.DIMMS%d.LMC%d.N%d", /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xfffffff,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_MIN_RTT_NOM_IDX] = {
+ .format = "DDR-CONFIG-CUSTOM-MIN-RTT-NOM-IDX.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 1,
+ .min_value = 0,
+ .max_value = 7,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_MAX_RTT_NOM_IDX] = {
+ .format = "DDR-CONFIG-CUSTOM-MAX-RTT-NOM-IDX.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 5,
+ .min_value = 0,
+ .max_value = 7,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_MIN_RODT_CTL] = {
+ .format = "DDR-CONFIG-CUSTOM-MIN-RODT-CTL.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 1,
+ .min_value = 0,
+ .max_value = 7,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_MAX_RODT_CTL] = {
+ .format = "DDR-CONFIG-CUSTOM-MAX-RODT-CTL.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 5,
+ .min_value = 0,
+ .max_value = 7,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_CK_CTL] = {
+ .format = "DDR-CONFIG-CUSTOM-CK-CTL.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xffff,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_CMD_CTL] = {
+ .format = "DDR-CONFIG-CUSTOM-CMD-CTL.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xffff,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_CTL_CTL] = {
+ .format = "DDR-CONFIG-CUSTOM-CTL-CTL.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xf,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_MIN_CAS_LATENCY] = {
+ .format = "DDR-CONFIG-CUSTOM-MIN-CAS-LATENCY.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xffff,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_OFFSET_EN] = {
+ .format = "DDR-CONFIG-CUSTOM-OFFSET-EN.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 1,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_OFFSET] = {
+ .format = "DDR-CONFIG-CUSTOM-OFFSET.%s.LMC%d.N%d", /* Parameters: Type(UDIMM,RDIMM), LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT, // UDIMM or RDIMM
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xf,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_RLEVEL_COMPUTE] = {
+ .format = "DDR-CONFIG-CUSTOM-RLEVEL-COMPUTE.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_RLEVEL_COMP_OFFSET] = {
+ .format = "DDR-CONFIG-CUSTOM-RLEVEL-COMP-OFFSET.%s.LMC%d.N%d", /* Parameters: Type(UDIMM,RDIMM), LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT, // UDIMM or RDIMM
+ .default_value = 2,
+ .min_value = 0,
+ .max_value = 0xffff,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_DDR2T] = {
+ .format = "DDR-CONFIG-CUSTOM-DDR2T.%s.LMC%d.N%d", /* Parameters: Type(UDIMM,RDIMM), LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT, // UDIMM or RDIMM
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_DISABLE_SEQUENTIAL_DELAY_CHECK] = {
+ .format = "DDR-CONFIG-CUSTOM-DISABLE-SEQUENTIAL-DELAY-CHECK.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_MAXIMUM_ADJACENT_RLEVEL_DELAY_INCREMENT] = {
+ .format = "DDR-CONFIG-CUSTOM-MAXIMUM-ADJACENT-RLEVEL-DELAY-INCREMENT.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xffff,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_PARITY] = {
+ .format = "DDR-CONFIG-CUSTOM-PARITY.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_FPRCH2] = {
+ .format = "DDR-CONFIG-CUSTOM-FPRCH2.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 0xf,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_MODE32B] = {
+ .format = "DDR-CONFIG-CUSTOM-MODE32B.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_MEASURED_VREF] = {
+ .format = "DDR-CONFIG-CUSTOM-MEASURED-VREF.LMC%d.N%d", /* Parameters: LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_DLL_WRITE_OFFSET] = {
+ .format = "DDR-CONFIG-CUSTOM-DLL-WRITE-OFFSET.BYTE%d.LMC%d.N%d", /* Parameters: Byte, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = -63,
+ .max_value = 63,
+ },
+ [BDK_CONFIG_DDR_CUSTOM_DLL_READ_OFFSET] = {
+ .format = "DDR-CONFIG-CUSTOM-DLL-READ-OFFSET.BYTE%d.LMC%d.N%d", /* Parameters: Byte, LMC, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0,
+ .min_value = -63,
+ .max_value = 63,
+ },
+
+ /* High level DRAM options */
+ [BDK_CONFIG_DRAM_VERBOSE] = {
+ .format = "DDR-VERBOSE", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* 0 = off */
+ .min_value = 0,
+ .max_value = 255,
+ },
+ [BDK_CONFIG_DRAM_BOOT_TEST] = {
+ .format = "DDR-TEST-BOOT", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* 0 = off, 1 = on */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_DRAM_CONFIG_GPIO] = {
+ .format = "DDR-CONFIG-GPIO", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* -1 = disabled, otherwise GPIO number */
+ .min_value = -1,
+ .max_value = 63,
+ },
+ [BDK_CONFIG_DRAM_SCRAMBLE] = {
+ .format = "DDR-CONFIG-SCRAMBLE", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 2, /* 0=off, 1=on, 2=trust on, non-trust off */
+ .min_value = 0,
+ .max_value = 2,
+ },
+
+ /* USB */
+ [BDK_CONFIG_USB_PWR_GPIO] = {
+ .format = "USB-PWR-GPIO.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* GPIO number, or -1 for none */
+ .min_value = -1,
+ .max_value = 49,
+ },
+ [BDK_CONFIG_USB_PWR_GPIO_POLARITY] = {
+ .format = "USB-PWR-GPIO-POLARITY.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 1, /* GPIO polarity: 1=high, 0=low */
+ .min_value = 0,
+ .max_value = 1,
+ },
+ [BDK_CONFIG_USB_REFCLK_SRC] = {
+ .format = "USB-REFCLK-SRC.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Clock Source (SS:HS)
+ ** 0 - SS(USB_REF_CLK) HS(USB_REF_CLK)
+ ** 1 - SS(DLMC_REF_CLK0) HS(DLMC_REF_CLK0)
+ ** 2 - SS(DLMC_REF_CLK1) HS(DLMC_REF_CLK1)
+ ** 3 - SS(USB_REF_CLK) HS(PLL_REF_CLK)
+ ** 4 - SS(DLMC_REF_CLK0) HS(PLL_REF_CLK)
+ ** 5 - SS(DLMC_REF_CLK1) HS(PLL_REF_CLK)
+ */
+ .min_value = 0,
+ .max_value = 5,
+ },
+
+ /* Nitrox reset - For CN88XX SC and SNT part. High drives Nitrox DC_OK high */
+ [BDK_CONFIG_NITROX_GPIO] = {
+ .format = "NITROX-GPIO.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* GPIO number, or -1 for none */
+ .min_value = -1,
+ .max_value = 49,
+ },
+
+ /* How EYE diagrams are captured from a QLM */
+ [BDK_CONFIG_EYE_ZEROS] = {
+ .format = "QLM-EYE-NUM-ZEROS", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 2,
+ .min_value = 1,
+ .max_value = 63,
+ },
+ [BDK_CONFIG_EYE_SAMPLE_TIME] = {
+ .format = "QLM-EYE-SAMPLE-TIME", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 400, /* us */
+ .min_value = 20, /* us */
+ .max_value = 10000000, /* us */
+ },
+ [BDK_CONFIG_EYE_SETTLE_TIME] = {
+ .format = "QLM-EYE-SETTLE-TIME", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 50, /* us */
+ .min_value = 20, /* us */
+ .max_value = 100000, /* us */
+ },
+
+ /* SGPIO */
+ [BDK_CONFIG_SGPIO_SCLOCK_FREQ] = {
+ .format = "SGPIO-SCLOCK-FREQ.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 10000, /* Hz */
+ .min_value = 128, /* Hz */
+ .max_value = 100000, /* Hz */
+ },
+ [BDK_CONFIG_SGPIO_PIN_POWER] = {
+ .format = "SGPIO-PIN-POWER.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* GPIO number, or -1 for none */
+ .min_value = -1,
+ .max_value = 50,
+ },
+ [BDK_CONFIG_SGPIO_PIN_SCLOCK] = {
+ .format = "SGPIO-PIN-SCLOCK.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* GPIO number, or -1 for none */
+ .min_value = -1,
+ .max_value = 50,
+ },
+ [BDK_CONFIG_SGPIO_PIN_SLOAD] = {
+ .format = "SGPIO-PIN-SLOAD.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* GPIO number, or -1 for none */
+ .min_value = -1,
+ .max_value = 50,
+ },
+ [BDK_CONFIG_SGPIO_PIN_SDATAOUT] = {
+ .format = "SGPIO-PIN-SDATAOUT.N%d.D%d", /* Parameters: Node, Dataline */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* GPIO number, or -1 for none */
+ .min_value = -1,
+ .max_value = 50,
+ },
+
+ /* VRM temperature throttling */
+ [BDK_CONFIG_VRM_TEMP_TRIP] = {
+ .format = "VRM-TEMP-TRIP.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 110, /* Degrees C */
+ .min_value = 0, /* Degrees C */
+ .max_value = 110, /* Degrees C. Max die temp plus 5 for uncertainty of measurement */
+ },
+ [BDK_CONFIG_VRM_TEMP_HIGH] = {
+ .format = "VRM-TEMP-HIGH.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 110, /* Degrees C */
+ .min_value = 0, /* Degrees C */
+ .max_value = 110, /* Degrees C. Max die temp plus 5 for uncertainty of measurement */
+ },
+ [BDK_CONFIG_VRM_TEMP_LOW] = {
+ .format = "VRM-TEMP-LOW.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 100, /* Degrees C */
+ .min_value = 0, /* Degrees C */
+ .max_value = 110, /* Degrees C. Max die temp plus 5 for uncertainty of measurement */
+ },
+ [BDK_CONFIG_VRM_THROTTLE_NORMAL] = {
+ .format = "VRM-THROTTLE-NORMAL.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 65, /* Percentage */
+ .min_value = 1, /* Percentage */
+ .max_value = 100, /* Percentage */
+ },
+ [BDK_CONFIG_VRM_THROTTLE_THERM] = {
+ .format = "VRM-THROTTLE-THERM.N%d", /* Parameters: Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 15, /* Percentage */
+ .min_value = 1, /* Percentage */
+ .max_value = 100, /* Percentage */
+ },
+
+ /* Generic GPIO, unrelated to a specific block */
+ [BDK_CONFIG_GPIO_PIN_SELECT] = {
+ .format = "GPIO-PIN-SELECT-GPIO%d.N%d", /* Parameters: GPIO, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Hardware default, normal GPIO pin */
+ .min_value = 0, /* GPIO_PIN_SEL_E enumeration */
+ .max_value = 65535, /* GPIO_PIN_SEL_E enumeration */
+ },
+ [BDK_CONFIG_GPIO_POLARITY] = {
+ .format = "GPIO-POLARITY-GPIO%d.N%d", /* Parameters: GPIO, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Hardware default, not inverted */
+ .min_value = 0, /* Not inverted */
+ .max_value = 1, /* Inverted */
+ },
+
+ /* PBUS */
+ [BDK_CONFIG_PBUS_CFG] = {
+ .format = "PBUS-CFG.REGION%d.N%d", /* Parameters: Region, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Hardware default */
+ .min_value = 0, /* No change */
+ .max_value = 0x0000ffffffffffffll, /* PBUS_REGX_CFG value */
+ },
+ [BDK_CONFIG_PBUS_TIM] = {
+ .format = "PBUS-TIM.REGION%d.N%d", /* Parameters: Region, Node */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Hardware default, not inverted */
+ .min_value = 0x8000000000000000ll, /* PBUS_REGX_TIM value, zero is no change */
+ .max_value = 0x7fffffffffffffffll, /* PBUS_REGX_TIM value */
+ },
+
+ /* Trusted boot information */
+ [BDK_CONFIG_TRUST_CSIB] = {
+ .format = "TRUST-CSIB", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_BINARY,
+ .default_value = 0, /* Hardware default */
+ },
+ [BDK_CONFIG_TRUST_ROT_ADDR] = {
+ .format = "TRUST-ROT-ADDR", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Non-trusted */
+ .min_value = 0, /* No key */
+ .max_value = 0x0000ffffffffffffll, /* Address in key memory */
+ },
+ [BDK_CONFIG_TRUST_BSSK_ADDR] = {
+ .format = "TRUST-BSSK-ADDR", /* No parameters */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* No HUK, so no BSSK */
+ .min_value = 0, /* No HUK, so no BSSK */
+ .max_value = 0x0000ffffffffffffll, /* Address in key memory */
+ },
+};
+
+/**
+ * Look up a configuration item in the environment.
+ *
+ * @param name
+ *
+ * @return
+ */
+static const char *get_value(const char *name, int *blob_size)
+{
+ if (!config_fdt)
+ {
+ bdk_error("bdk-config asked for %s before configuration loaded\n", name);
+ return NULL;
+ }
+
+ char n[64];
+ strncpy(n, name, sizeof(n));
+ n[sizeof(n)-1] = '\0';
+
+ while (*n)
+ {
+ const char *val = fdt_getprop(config_fdt, config_node, n, blob_size);
+ if (val)
+ return val;
+
+ char *p = strrchr(n, '.');
+ if (p)
+ *p = '\0';
+ else
+ break;
+ }
+ return NULL;
+}
+
+/**
+ * Get an integer configuration item
+ *
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ *
+ * @return The value of the configuration item, or def_value if the item is not set
+ */
+int64_t bdk_config_get_int(bdk_config_t cfg_item, ...)
+{
+ /* Make sure the correct access function was called */
+ if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_INT)
+ bdk_fatal("bdk_config_get_int() called for %s, not an int\n",
+ config_info[cfg_item].format);
+
+ char name[64];
+ va_list args;
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+ va_end(args);
+
+ const char *val = get_value(name, NULL);
+ if (val)
+ {
+ int count;
+ int64_t tmp;
+ if ((val[0] == '0') && (val[1] == 'x'))
+ count = sscanf(val + 2, "%lx", &tmp);
+ else
+ count = sscanf(val, "%li", &tmp);
+ if (count == 1)
+ {
+ if ((tmp < config_info[cfg_item].min_value) || (tmp > config_info[cfg_item].max_value))
+ {
+ bdk_warn("Out of range for %s = \"%s\", using default\n", name, val);
+ return config_info[cfg_item].default_value;
+ }
+ return tmp;
+ }
+ else
+ {
+ bdk_warn("Failed to parse %s = \"%s\", using default\n", name, val);
+ return config_info[cfg_item].default_value;
+ }
+ }
+ else
+ return config_info[cfg_item].default_value;
+}
+
+/**
+ * Get a string configuration item
+ *
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ *
+ * @return The value of the configuration item, or def_value if the item is not set
+ */
+const char *bdk_config_get_str(bdk_config_t cfg_item, ...)
+{
+ /* Make sure the correct access function was called */
+ if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_STR)
+ bdk_fatal("bdk_config_get_str() called for %s, not a str\n",
+ config_info[cfg_item].format);
+
+ char name[64];
+ va_list args;
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+
+ if (BDK_CONFIG_QLM_MODE == cfg_item)
+ {
+ char name2[64];
+ vsnprintf(name2, sizeof(name2)-1,"QLM-MODE.N%d.DLM%d" , args);
+ const char *val = get_value(name2, NULL);
+ if (val)
+ bdk_warn("%s: QLM-MODE.N%%d.DLM%%d format depricated. Please use QLM-MODE.N%%d.QLM%%d instead\n", name2);
+
+ }
+ va_end(args);
+
+ const char *val = get_value(name, NULL);
+ if (val)
+ return val;
+ else
+ return (const char *)config_info[cfg_item].default_value;
+}
+
+/**
+ * Get a binary blob
+ *
+ * @param blob_size Integer to receive the size of the blob
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ *
+ * @return The value of the configuration item, or def_value if the item is not set
+ */
+const void* bdk_config_get_blob(int *blob_size, bdk_config_t cfg_item, ...)
+{
+ char name[64];
+ va_list args;
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+ va_end(args);
+
+ const void *val = get_value(name, blob_size);
+ if (val)
+ return val;
+ else
+ return (const void *)config_info[cfg_item].default_value;
+}
+
+/**
+ * Set an integer configuration item. Note this only sets the item in memory,
+ * persistent storage is not updated. The optional parameters for the setting are
+ * not supplied, meaning this function only changes the global default.
+ *
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+void bdk_config_set_int_no_param(int64_t value, bdk_config_t cfg_item)
+{
+ /* Make sure the correct access function was called */
+ if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_INT)
+ bdk_fatal("bdk_config_set_int_no_param() called for %s, not an int\n",
+ config_info[cfg_item].format);
+
+ char name[64];
+ char valstr[20];
+ /* Create a name without the optional parameters */
+ strncpy(name, config_info[cfg_item].format, sizeof(name) - 1);
+ name[sizeof(name) - 1] = 0;
+ char *ptr = strchr(name, '.');
+ if (ptr)
+ *ptr = 0;
+
+ if (!config_fdt)
+ {
+ bdk_error("bdk-config set %s before configuration loaded\n", name);
+ return;
+ }
+ if ((value < config_info[cfg_item].min_value) || (value > config_info[cfg_item].max_value))
+ {
+ bdk_error("Set out of range for %s = \"0x%lx\", ignoring\n", name, value);
+ return;
+ }
+
+ if (value < 10)
+ snprintf(valstr, sizeof(valstr), "%ld", value);
+ else
+ snprintf(valstr, sizeof(valstr), "0x%lx", value);
+
+ int status = fdt_setprop_string(config_fdt, config_node, name, valstr);
+ if (status < 0)
+ bdk_fatal("Failed to set %s=%s in FDT\n", name, valstr);
+}
+
+/**
+ * Set an integer configuration item. Note this only sets the item in memory,
+ * persistent storage is not updated.
+ *
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+void bdk_config_set_int(int64_t value, bdk_config_t cfg_item, ...)
+{
+ /* Make sure the correct access function was called */
+ if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_INT)
+ bdk_fatal("bdk_config_set_int() called for %s, not an int\n",
+ config_info[cfg_item].format);
+
+ char name[64];
+ char valstr[20];
+ va_list args;
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+ va_end(args);
+
+ if (!config_fdt)
+ {
+ bdk_error("bdk-config set %s before configuration loaded\n", name);
+ return;
+ }
+ if ((value < config_info[cfg_item].min_value) || (value > config_info[cfg_item].max_value))
+ {
+ bdk_error("Set out of range for %s = \"0x%lx\", ignoring\n", name, value);
+ return;
+ }
+
+ if (value < 10)
+ snprintf(valstr, sizeof(valstr), "%ld", value);
+ else
+ snprintf(valstr, sizeof(valstr), "0x%lx", value);
+
+ int status = fdt_setprop_string(config_fdt, config_node, name, valstr);
+ if (status < 0)
+ bdk_fatal("Failed to set %s=%s in FDT\n", name, valstr);
+}
+
+/**
+ * Set an integer configuration item. Note this only sets the item in memory,
+ * persistent storage is not updated.
+ *
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+void bdk_config_set_str(const char *value, bdk_config_t cfg_item, ...)
+{
+ /* Make sure the correct access function was called */
+ if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_STR)
+ bdk_fatal("bdk_config_set_str() called for %s, not a str\n",
+ config_info[cfg_item].format);
+
+ char name[64];
+ va_list args;
+
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+ va_end(args);
+
+ if (!config_fdt)
+ {
+ bdk_error("bdk-config set %s before configuration loaded\n", name);
+ return;
+ }
+
+ int status;
+ if (value)
+ status = fdt_setprop_string(config_fdt, config_node, name, value);
+ else
+ status = fdt_delprop(config_fdt, config_node, name);
+
+ if ((status < 0) && (status != -FDT_ERR_NOTFOUND))
+ bdk_fatal("Failed to set %s=%s in FDT\n", name, value);
+}
+
+/**
+ * Set a blob configuration item. Note this only sets the
+ * item in memory, persistent storage is not updated. The optional
+ * parameters for the setting are not supplied, meaning this function
+ * only changes the global default.
+ *
+ * @param size Size of the item in bytes. A size of zero removes the device tree field
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+void bdk_config_set_blob_no_param(int size, const void *value, bdk_config_t cfg_item)
+{
+ /* Make sure the correct access function was called */
+ if ((config_info[cfg_item].ctype != BDK_CONFIG_TYPE_BINARY) &&
+ (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_STR_LIST))
+ bdk_fatal("bdk_config_set_blob() called for %s, not binary\n",
+ config_info[cfg_item].format);
+
+ char name[64];
+ /* Create a name without the optional parameters */
+ strncpy(name, config_info[cfg_item].format, sizeof(name) - 1);
+ name[sizeof(name) - 1] = 0;
+ char *ptr = strchr(name, '.');
+ if (ptr)
+ *ptr = 0;
+
+ if (!config_fdt)
+ {
+ bdk_error("bdk-config set %s before configuration loaded\n", name);
+ return;
+ }
+
+ int status;
+ if (size)
+ status = fdt_setprop(config_fdt, config_node, name, value, size);
+ else
+ status = fdt_delprop(config_fdt, config_node, name);
+
+ if ((status < 0) && (status != -FDT_ERR_NOTFOUND))
+ bdk_fatal("Failed to set %s in FDT\n", name);
+}
+
+/**
+ * Set a blob configuration item. Note this only sets the
+ * item in memory, persistent storage is not updated.
+ *
+ * @param size Size of the item in bytes. A size of zero removes the device tree field
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+void bdk_config_set_blob(int size, const void *value, bdk_config_t cfg_item, ...)
+{
+ /* Make sure the correct access function was called */
+ if ((config_info[cfg_item].ctype != BDK_CONFIG_TYPE_BINARY) &&
+ (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_STR_LIST))
+ bdk_fatal("bdk_config_set_blob() called for %s, not binary\n",
+ config_info[cfg_item].format);
+
+ char name[64];
+ va_list args;
+
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+ va_end(args);
+
+ if (!config_fdt)
+ {
+ bdk_error("bdk-config set %s before configuration loaded\n", name);
+ return;
+ }
+
+ int status;
+ if (size)
+ status = fdt_setprop(config_fdt, config_node, name, value, size);
+ else
+ status = fdt_delprop(config_fdt, config_node, name);
+
+ if ((status < 0) && (status != -FDT_ERR_NOTFOUND))
+ bdk_fatal("Failed to set %s in FDT\n", name);
+}
+
+/**
+ * Multiple functions need to display the config item help string in a format
+ * suitable for inclusion in a device tree. This function displays the help
+ * message properly indented and such.
+ *
+ * @param cfg Config item to display help for
+ */
+static void display_help(bdk_config_t cfg)
+{
+ /* Print the help text as a comment before the entry */
+ /* Indent with tabs like Linux requires */
+ printf("\n");
+ printf("\t/* ");
+ const char *ptr = bdk_config_get_help(cfg);
+ while (*ptr)
+ {
+ putchar(*ptr);
+ if (*ptr == '\n')
+ putchar('\t');
+ ptr++;
+ }
+ printf(" */\n");
+ /* Print the parameter and its default value a comment. This will be
+ a reference that is easy for the user to change */
+ printf("\t//%s = ", config_info[cfg].format);
+ switch (config_info[cfg].ctype)
+ {
+ case BDK_CONFIG_TYPE_INT:
+ if (config_info[cfg].default_value < 10)
+ printf("\"%ld\"", config_info[cfg].default_value);
+ else
+ printf("\"0x%lx\"", config_info[cfg].default_value);
+ break;
+ case BDK_CONFIG_TYPE_STR:
+ case BDK_CONFIG_TYPE_STR_LIST:
+ if (config_info[cfg].default_value)
+ printf("\"%s\"", (const char *)config_info[cfg].default_value);
+ else
+ printf("\"\"");
+ break;
+ case BDK_CONFIG_TYPE_BINARY:
+ printf("[]");
+ break;
+ }
+ printf(";\n");
+}
+
+/**
+ * Display the active configuration as a valid device tree
+ */
+void bdk_config_show(void)
+{
+ /* Output the standard DTS headers */
+ printf("/dts-v1/;\n");
+ printf("\n");
+ printf("/ {\n");
+ printf("cavium,bdk {\n");
+ for (bdk_config_t cfg = 0; cfg < __BDK_CONFIG_END; cfg++)
+ {
+ /* Show the help message */
+ display_help(cfg);
+
+ /* Figure out how much of the config item is fixed versus
+ the optional parameters */
+ const char *format = config_info[cfg].format;
+ const char *format_param = strchr(format, '.');
+ int format_length = 0;
+ if (format_param)
+ format_length = format_param - format;
+
+ /* Loop through all device tree entries displaying the ones that
+ match this format */
+ int offset = fdt_first_property_offset(config_fdt, config_node);
+ while (offset >= 0)
+ {
+ /* Get the device tree item */
+ const char *name = NULL;
+ int data_size = 0;
+ const char *data = fdt_getprop_by_offset(config_fdt, offset, &name, &data_size);
+ const char *data_end = data + data_size;
+ /* Find the first param */
+ const char *name_param = strchr(name, '.');
+ int name_length = 0;
+ if (name_param)
+ {
+ /* We want to compare up to the first param */
+ name_length = name_param - name;
+ /* If the lengths are different not including the parameters,
+ then we force a full matchn which will always fail */
+ if (name_length != format_length)
+ name_length = 0;
+ }
+ else /* No params, match base of format */
+ name_length = format_length;
+
+ /* Check if it matches the current config format */
+ int match;
+ if (name_length)
+ {
+ /* Check the prefix */
+ match = strncmp(name, format, name_length);
+ if (match == 0)
+ {
+ /* Prefix matched. We only really match if the next
+ character is the end of the string or a '.' */
+ if ((name[name_length] != 0) && (name[name_length] != '.'))
+ match = 1;
+ }
+ }
+ else
+ match = strcmp(name, format);
+ /* Print matching entries */
+ if (match == 0)
+ {
+ if (config_info[cfg].ctype == BDK_CONFIG_TYPE_BINARY)
+ {
+ printf("\t%s = [", name);
+ const char *ptr = data;
+ while (ptr < data_end)
+ {
+ printf(" %02x", (int)*ptr);
+ ptr++;
+ }
+ printf(" ]");
+ }
+ else
+ {
+ printf("\t%s = \"%s\"", name, data);
+ data += strlen(data) + 1;
+ while (data < data_end)
+ {
+ printf(",\n\t\t\"%s\"", data);
+ data += strlen(data) + 1;
+ }
+ }
+ printf(";\n");
+ }
+ offset = fdt_next_property_offset(config_fdt, offset);
+ }
+ }
+ /* Output the standard DTS footers */
+ printf("}; /* cavium,bdk */\n");
+ printf("}; /* / */\n");
+}
+
+/**
+ * Display a list of all possible config items with help text
+ */
+void bdk_config_help(void)
+{
+ /* Write out formatted as part of a device tree source (dts) file */
+ printf("/dts-v1/;\n");
+ printf("\n");
+ printf("/ {\n");
+ printf("cavium,bdk {\n");
+ for (bdk_config_t cfg = 0; cfg < __BDK_CONFIG_END; cfg++)
+ display_help(cfg);
+ printf("}; /* cavium,bdk */\n");
+ printf("}; /* / */\n");
+}
+
+
+/**
+ * Save the current configuration to flash
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_config_save(void)
+{
+ /* Pack the FDT so it uses less space */
+ int status = fdt_pack(config_fdt);
+ if (status < 0)
+ {
+ bdk_error("FDT error %d: %s\n", status, fdt_strerror(status));
+ return -1;
+ }
+
+ /* Calculate a CRC32 of the FDT */
+ int fdt_size = fdt_totalsize(config_fdt);
+ uint32_t crc32 = bdk_crc32(config_fdt, fdt_size, 0);
+
+ /* Open the output file */
+ FILE *outf = fopen("/fatfs/default.dtb", "wb");
+ if (!outf)
+ {
+ bdk_error("Failed to open flash");
+ return -1;
+ }
+
+ /* Write the FDT */
+ if (fwrite(config_fdt, fdt_size, 1, outf) != 1)
+ {
+ bdk_error("Failed to write FDT");
+ fclose(outf);
+ return -1;
+ }
+
+ /* Save the CRC32 in the same endianness as the FDT */
+ crc32 = cpu_to_fdt32(crc32);
+ if (fwrite(&crc32, sizeof(crc32), 1, outf) != 1)
+ {
+ bdk_error("Failed to write FDT CRC32");
+ fclose(outf);
+ return -1;
+ }
+
+ fclose(outf);
+ return 0;
+}
+
+/**
+ * Takes the current live device tree and exports it to a memory address suitable
+ * for passing to the next binary in register X1.
+ *
+ * @return Physical address of the device tree, or 0 on failure
+ */
+uint64_t __bdk_config_export_to_mem(void)
+{
+ void *end_ptr = sbrk(0);
+ bdk_node_t node = bdk_numa_master();
+ int fdt_size = fdt_totalsize(config_fdt);
+
+ /* Round size up to 4KB boundary, be sure to add 4 bytes for CRC32 */
+ int fdt_space = (fdt_size + 4 + 0xfff) & -4096;
+ /* First try 4MB - FDT size as this keeps the FDT in the 4MB secure space
+ setup by ATF */
+ void *fdt_ptr = bdk_phys_to_ptr(0x400000 - fdt_space);
+ if (!__bdk_is_dram_enabled(node))
+ {
+ /* Address must be in L2 */
+ int l2_size = bdk_l2c_get_cache_size_bytes(node);
+ void *l2_ptr = bdk_phys_to_ptr(l2_size - fdt_space);
+ if (l2_ptr < fdt_ptr)
+ fdt_ptr = l2_ptr;
+ if (fdt_ptr < end_ptr)
+ {
+ bdk_error("No room for FDT to pass to next binary\n");
+ return 0;
+ }
+ }
+ else
+ {
+ /* We have DRAM, make sure we're past the end of this image */
+ if (fdt_ptr < end_ptr)
+ fdt_ptr = end_ptr;
+ }
+ uint32_t crc32 = bdk_crc32(config_fdt, fdt_size, 0);
+ fdt_move(config_fdt, fdt_ptr, fdt_size);
+ /* CRC32 is stored in same endianness as FDT at the end */
+ *(uint32_t *)((const char *)fdt_ptr + fdt_size) = cpu_to_fdt32(crc32);
+ BDK_TRACE(FDT_OS, "Exported device tree to memory %p, size 0x%x, CRC32 %08x\n",
+ fdt_ptr, fdt_size, crc32);
+ return bdk_ptr_to_phys(fdt_ptr);
+}
+
+/**
+ * Return a pointer to the device tree used for configuration
+ *
+ * @return FDT or NULL on failure
+ */
+void* bdk_config_get_fdt(void)
+{
+ return config_fdt;
+}
+
+/**
+ * Set the device tree used for configuration
+ *
+ * @param fdt Device tree to use. Memory is assumed to be from malloc() and bdk_config takes
+ * over ownership on success
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_config_set_fdt(void *fdt)
+{
+ int offset = fdt_path_offset(fdt, "/cavium,bdk"); /* Find our node */
+ if (offset < 0)
+ return -1;
+ free(config_fdt);
+ config_fdt = fdt;
+ config_node = offset;
+ return 0;
+}
+
+/**
+ * Write all default values to a FDT. Missing config items get defaults in the
+ * BDK config, this function adds those defaults to the FDT. This way other code
+ * gets the default value without needing special code.
+ *
+ * @param fdt FDT structure to fill defaults into
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_config_expand_defaults(void *fdt)
+{
+ const struct fdt_property *prop;
+
+ /* The best defaults may have changed while this image was running if DRAM
+ is setup. Update the defaults before expanding them */
+ config_set_defaults();
+
+ int fdt_node = fdt_path_offset(fdt, "/cavium,bdk"); /* Find our node */
+ if (fdt_node < 0)
+ {
+ bdk_error("Failed to find top node, FDT error %d: %s\n",
+ fdt_node, fdt_strerror(fdt_node));
+ return -1;
+ }
+
+ /* Loop through all configuration items */
+ for (bdk_config_t cfg = 0; cfg < __BDK_CONFIG_END; cfg++)
+ {
+ /* Figure out the base name without and dot parameters */
+ const char *name = config_info[cfg].format;
+ const char *name_end = strchr(name, '.');
+ int name_len;
+ if (name_end)
+ name_len = name_end - name;
+ else
+ name_len = strlen(name);
+ /* Try and find the base name in the FDT */
+ prop = fdt_get_property_namelen(fdt, fdt_node, name, name_len, NULL);
+ /* If it wasn't found, then we need to add the default */
+ if (prop == NULL)
+ {
+ /* Create a copy of the name for use in FDT calls */
+ char temp_name[name_len + 1];
+ memcpy(temp_name, name, name_len);
+ temp_name[name_len] = 0;
+ /* Call the correct FDT call based on the type */
+ int status = 0;
+ switch (config_info[cfg].ctype)
+ {
+ case BDK_CONFIG_TYPE_INT:
+ {
+ char temp_value[20];
+ if (config_info[cfg].default_value < 10)
+ snprintf(temp_value, sizeof(temp_value), "%ld", config_info[cfg].default_value);
+ else
+ snprintf(temp_value, sizeof(temp_value), "0x%lx", config_info[cfg].default_value);
+ /* Store the default int value */
+ status = fdt_setprop_string(fdt, fdt_node, temp_name, temp_value);
+ break;
+ }
+ case BDK_CONFIG_TYPE_STR:
+ /* Store the default string value, if present */
+ if (config_info[cfg].default_value)
+ {
+ status = fdt_setprop_string(fdt, fdt_node, temp_name,
+ (const char *)config_info[cfg].default_value);
+ }
+ break;
+ case BDK_CONFIG_TYPE_STR_LIST:
+ /* Do nothing, string list default to empty */
+ break;
+ case BDK_CONFIG_TYPE_BINARY:
+ /* Do nothing, binary defaults to empty */
+ break;
+ }
+ if (status < 0)
+ {
+ bdk_error("Failed to set default for %s, FDT error %d: %s\n",
+ temp_name, status, fdt_strerror(status));
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Some of the default config values can vary based on runtime parameters. This
+ * function sets those default parameters. It must be run before anyone calls
+ * bdk_config_get_*().
+ */
+static void config_set_defaults(void)
+{
+ bool isEmulation = bdk_is_platform(BDK_PLATFORM_EMULATOR);
+ /* This is Cavium's OUI with the local admin bit. We will use this as a
+ default as it won't collide with official addresses, but is sort of
+ part of the Cavium range. The lower three bytes will be updated with
+ the wafer info */
+ uint64_t mac_address = 0x020fb7000000ull;
+ /* Set the lower MAC address bits based on the chip manufacturing
+ information. This should give reasonable MAC address defaults
+ for production parts */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ BDK_CSR_INIT(fus_dat0, bdk_numa_local(), BDK_MIO_FUS_DAT0);
+ mac_address |= fus_dat0.u & 0xffffff;
+ }
+ else
+ {
+ mac_address |= bdk_fuse_read_range(bdk_numa_local(), BDK_FUS_FUSE_NUM_E_MFG_INFOX(0), 24);
+ }
+ config_info[BDK_CONFIG_MAC_ADDRESS].default_value = mac_address;
+
+ /* Set the number of packet buffers */
+ int num_packet_buffers = 4096;
+ /* If DRAM is setup, allocate 8K buffers for 8 ports plus some slop */
+ if (__bdk_is_dram_enabled(bdk_numa_master()))
+ num_packet_buffers = 8192 * 16 + 1024;
+ else if (isEmulation) {
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ num_packet_buffers = 4096 * 4;
+ }
+ config_info[BDK_CONFIG_NUM_PACKET_BUFFERS].default_value = num_packet_buffers;
+ config_info[BDK_CONFIG_PACKET_BUFFER_SIZE].default_value = 1024;
+
+ /* Asim doesn't scale to 48 cores well. Limit to 4 */
+ if (bdk_is_platform(BDK_PLATFORM_ASIM))
+ config_info[BDK_CONFIG_COREMASK].default_value = 0xf;
+ /* CN88XX pass 1.x doesn't support EA */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ config_info[BDK_CONFIG_PCIE_EA].default_value = 0;
+ /* Emulator only supports 4 cores */
+ if (isEmulation)
+ config_info[BDK_CONFIG_COREMASK].default_value = 0xf;
+}
+
+/**
+ * BDK configuration items are stored in a device tree so thay can be passed to
+ * other software later. This function creates the initial empty device tree
+ * used for BDK configuration items. The values will be populated as configuration
+ * files are read from flash.
+ */
+static void config_setup_fdt(void)
+{
+ const int FDT_SIZE = 0x10000;
+ config_fdt = calloc(1, FDT_SIZE);
+ if (!config_fdt)
+ bdk_fatal("Unable to allocate memory for config FDT\n");
+ if (fdt_create_empty_tree(config_fdt, FDT_SIZE) < 0)
+ bdk_fatal("Unable to create FDT for config\n");
+ config_node = fdt_add_subnode(config_fdt, 0, "cavium,bdk");
+ if (config_node < 0)
+ bdk_fatal("Unable to create cavium,bdk node in FDT\n");
+}
+
+/**
+ * Parse a FDT and copy its properties to our configuration FDT
+ *
+ * @param fdt FDT to parse
+ */
+static int config_parse_fdt(const void *fdt, const char *base_path)
+{
+ /* Check the FDT header */
+ int result = fdt_check_header(fdt);
+ if (result)
+ goto fail;
+
+ /* Find our node */
+ result = fdt_path_offset(fdt, base_path);
+ if (result < 0)
+ goto fail;
+
+ /* Copy all parameters to our in memory FDT */
+ int offset = fdt_first_property_offset(fdt, result);
+ while (offset >= 0)
+ {
+ const char *name = NULL;
+ int blob_size = 0;
+ const char *data = fdt_getprop_by_offset(fdt, offset, &name, &blob_size);
+ result = fdt_setprop(config_fdt, config_node, name, data, blob_size);
+ offset = fdt_next_property_offset(fdt, offset);
+ }
+ return 0;
+fail:
+ bdk_error("FDT error %d: %s\n", result, fdt_strerror(result));
+ return -1;
+}
+
+/**
+ * Load a FDT from a file and pull in its configuration properties
+ *
+ * @param filename File to read from
+ * @param offset Offset into the file to read from
+ *
+ * @return Zero on success, negative on failure
+ */
+static int config_load_file(const char *filename, uint64_t offset)
+{
+ uint64_t ftd_size = 0;
+ bdk_signed_flags_t sign_flags = BDK_SIGNED_FLAG_NONE;
+ if (offset)
+ sign_flags = BDK_SIGNED_FLAG_ALLOW_UNSIGNED | BDK_SIGNED_FLAG_NOT_ENCRYPTED;
+ void *fdt = bdk_signed_load(filename, offset, BDK_SIGNED_DTS, sign_flags, &ftd_size);
+ if (!fdt)
+ return -1;
+
+ /* Make sure the read succeeded */
+ if (ftd_size < (int)sizeof(struct fdt_header))
+ {
+ bdk_error("Invalid device tee %s\n", filename);
+ free(fdt);
+ return -1;
+ }
+
+ if (fdt_check_header(fdt))
+ {
+ bdk_error("Invalid FDT header read from %s\n", filename);
+ free(fdt);
+ return -1;
+ }
+
+ /* Make sure we read enough data to contain the FDT */
+ int correct_size = fdt_totalsize(fdt);
+ if ((int)ftd_size < correct_size)
+ {
+ bdk_error("Unable to read FDT from %s\n", filename);
+ free(fdt);
+ return -1;
+ }
+
+ /* Check if a CRC32 was added on the end of the FDT */
+ if ((int)ftd_size >= correct_size + 4)
+ {
+ uint32_t crc32 = bdk_crc32(fdt, correct_size, 0);
+ uint32_t correct_crc32 = *(uint32_t *)((const char *)fdt + correct_size);
+ /* CRC32 is stored in same endianness as FDT */
+ correct_crc32 = fdt32_to_cpu(correct_crc32);
+ if (crc32 != correct_crc32)
+ {
+ bdk_error("FDT failed CRC32 verification (%s)\n", filename);
+ free(fdt);
+ return -1;
+ }
+ //printf("PASS: FDT CRC32 verification (%s)\n", filename);
+ }
+
+ /* Parse the device tree, adding its configuration to ours */
+ if (config_parse_fdt(fdt, "/cavium,bdk"))
+ {
+ free(fdt);
+ return -1;
+ }
+
+ free(fdt);
+ return 0;
+}
+
+/**
+ * Internal BDK function to initialize the config system. Must be called before
+ * any configuration functions are called
+ */
+void __bdk_config_init(void)
+{
+ bool done_trust_init = false;
+ /* Set default that can vary dynamically at runtime */
+ config_set_defaults();
+
+ /* Regsiter X1 is expected to be a device tree when we boot. Check that
+ the physical address seems correct, then load the device tree */
+ if ((__bdk_init_reg_x1 > 0) && /* Not zero */
+ (__bdk_init_reg_x1 < 0x1000000) && /* In the lower 16MB */
+ ((__bdk_init_reg_x1 & 0xfff) == 0)) /* Aligned on a 4KB boundary */
+ {
+ const void *fdt = (const void *)__bdk_init_reg_x1;
+ /* Check the FDT header */
+ int result = fdt_check_header(fdt);
+ if (result)
+ result = -1; /* Invalid tree */
+ else
+ {
+ int fdt_size = fdt_totalsize(fdt);
+ uint32_t crc32 = bdk_crc32(fdt, fdt_size, 0);
+ uint32_t correct_crc32 = *(uint32_t *)((const char *)fdt + fdt_size);
+ /* CRC32 is stored in same endianness as FDT */
+ correct_crc32 = fdt32_to_cpu(correct_crc32);
+ if (crc32 == correct_crc32)
+ {
+ //printf("Previous image FDT passed CRC32 verification(%p, size 0x%x, CRC32 %08x)\n", fdt, fdt_size, crc32);
+ result = fdt_path_offset(fdt, "/cavium,bdk"); /* Find our node */
+ }
+ else
+ {
+ bdk_error("Previous image FDT failed CRC32 verification(%p, size 0x%x)\n", fdt, fdt_size);
+ result = -1; /* Invalid tree */
+ }
+ }
+ /* If tree is valid so far, attempt to move it into our memory space */
+ if (result > 0)
+ {
+ /* 4KB extra room for growth */
+ const int fdt_size = fdt_totalsize(fdt) + 4096;
+ config_fdt = calloc(1, fdt_size);
+ if (config_fdt)
+ {
+ int result = fdt_move(fdt, config_fdt, fdt_size);
+ if (result == 0)
+ {
+ /* Find our node */
+ config_node = fdt_path_offset(config_fdt, "/cavium,bdk");
+ if (config_node > 0)
+ {
+ printf("Using configuration from previous image\n");
+ goto done;
+ }
+ else
+ {
+ bdk_error("Unable to find BDK node after move\n");
+ free(config_fdt);
+ config_node = 0;
+ config_fdt = NULL;
+ }
+ }
+ else
+ {
+ bdk_error("Unable to move passed device tree\n");
+ free(config_fdt);
+ config_fdt = NULL;
+ }
+ }
+ else
+ bdk_error("Failed to allocate memory for passed device tree (%d bytes)\n", fdt_size);
+ }
+ }
+
+ /* Create the global device tree used to store config items */
+ config_setup_fdt();
+ /* Setup trust level so reading device trees works */
+ __bdk_trust_init();
+ done_trust_init = true;
+
+ if (bdk_is_platform(BDK_PLATFORM_ASIM))
+ {
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ bdk_config_set_str("ASIM-CN88XX", BDK_CONFIG_BOARD_MODEL);
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ bdk_config_set_str("ASIM-CN83XX", BDK_CONFIG_BOARD_MODEL);
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ bdk_config_set_str("ASIM-CN81XX", BDK_CONFIG_BOARD_MODEL);
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
+ bdk_config_set_str("ASIM-CN93XX", BDK_CONFIG_BOARD_MODEL);
+ }
+ else if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
+ {
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ bdk_config_set_str("EMUL-CN88XX", BDK_CONFIG_BOARD_MODEL);
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ bdk_config_set_str("EMUL-CN83XX", BDK_CONFIG_BOARD_MODEL);
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ bdk_config_set_str("EMUL-CN81XX", BDK_CONFIG_BOARD_MODEL);
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
+ bdk_config_set_str("EMUL-CN93XX", BDK_CONFIG_BOARD_MODEL);
+ }
+ else if (config_load_file("/rom/boardcfg.dtb", 0) == 0)
+ {
+ printf("Board manufacturing information loaded from ROM-FS\n");
+ }
+ /* Load manufacturing data from the top 64KB of flash */
+ else if (config_load_file("/boot", BDK_CONFIG_MANUFACTURING_ADDRESS) != 0)
+ {
+ printf("\33[1m"); /* Bold */
+ bdk_warn("\n");
+ bdk_warn("********************************************************\n");
+ bdk_warn("* Board manufacturing information not found. Program\n");
+ bdk_warn("* the board manufacturing information in the Setup menu.\n");
+ bdk_warn("********************************************************\n");
+ bdk_warn("\n");
+ printf("\33[0m"); /* Normal */
+ goto done;
+ }
+
+ const char *model = bdk_config_get_str(BDK_CONFIG_BOARD_MODEL);
+ const char *revision = bdk_config_get_str(BDK_CONFIG_BOARD_REVISION);
+
+ /* Load BOARD-REVISION.cfg if it is on ROM-FS */
+ if (model && revision)
+ {
+ char filename[64];
+ snprintf(filename, sizeof(filename), "/rom/%s-%s.dtb", model, revision);
+ if (config_load_file(filename, 0) == 0)
+ goto done;
+ }
+
+ /* Load BOARD.cfg if it is on ROM-FS */
+ if (model)
+ {
+ char filename[64];
+ snprintf(filename, sizeof(filename), "/rom/%s.dtb", model);
+ if (config_load_file(filename, 0) == 0)
+ goto done;
+ }
+
+ /* Load default.dtb if it is there */
+ if (config_load_file("/fatfs/default.dtb", 0) == 0)
+ goto done;
+
+ /* Load BOARD-REVISION.cfg if it is there */
+ if (model && revision)
+ {
+ char filename[64];
+ snprintf(filename, sizeof(filename), "/fatfs/%s-%s.dtb", model, revision);
+ if (config_load_file(filename, 0) == 0)
+ goto done;
+ }
+
+ /* Load BOARD.cfg if it is there */
+ if (model)
+ {
+ char filename[64];
+ snprintf(filename, sizeof(filename), "/fatfs/%s.dtb", model);
+ if (config_load_file(filename, 0) == 0)
+ goto done;
+ }
+
+ /* No board specific configuration was found. Warn the user */
+ printf("\33[1m"); /* Bold */
+ bdk_warn("\n");
+ bdk_warn("********************************************************\n");
+ bdk_warn("* Board configuration file not found. Either the board\n");
+ bdk_warn("* model is incorrect, or factory settings are not\n");
+ bdk_warn("* available. DTB file not found for board \"%s\".\n", model);
+ bdk_warn("********************************************************\n");
+ bdk_warn("\n");
+ printf("\33[0m"); /* Normal */
+
+done:
+ bdk_config_set_str(bdk_version_string(), BDK_CONFIG_VERSION);
+ /* Load the tracing level */
+ bdk_trace_enables = bdk_config_get_int(BDK_CONFIG_TRACE);
+ if (BDK_TRACE_OVERRIDE)
+ bdk_trace_enables = BDK_TRACE_OVERRIDE;
+ if (!done_trust_init)
+ __bdk_trust_init();
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c
new file mode 100644
index 0000000000..55f0dbf3f2
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c
@@ -0,0 +1,197 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-gpio.h"
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(GPIO);
+
+/**
+ * Initialize a single GPIO as either an input or output. If it is
+ * an output, also set its output value.
+ *
+ * @param gpio GPIO to initialize
+ * @param is_output Non zero if this GPIO should be an output
+ * @param output_value
+ * Value of the GPIO if it should be an output. Not used if the
+ * GPIO isn't an output.
+ *
+ * @return Zero on success, negative ob failure
+ */
+int bdk_gpio_initialize(bdk_node_t node, int gpio, int is_output, int output_value)
+{
+ if ((gpio >= 0) && (gpio < bdk_gpio_get_num()))
+ {
+ int gpio_group = gpio >> 6;
+ int gpio_index = gpio & 63;
+ if (output_value)
+ bdk_gpio_set(node, gpio_group, 1ull << gpio_index);
+ else
+ bdk_gpio_clear(node, gpio_group, 1ull << gpio_index);
+
+ BDK_CSR_DEFINE(cfg, BDK_GPIO_BIT_CFGX(gpio));
+ cfg.u = 0;
+ cfg.s.tx_oe = !!is_output;
+ BDK_CSR_WRITE(node, BDK_GPIO_BIT_CFGX(gpio), cfg.u);
+ }
+ else
+ {
+ bdk_error("bdk_gpio_initialize: Illegal GPIO\n");
+ return -1;
+ }
+ return 0;
+}
+
+
+/**
+ * GPIO Read Data
+ *
+ * @param node Node GPIO block is on
+ * @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
+ *
+ * @return Status of the GPIO pins for the given block
+ */
+uint64_t bdk_gpio_read(bdk_node_t node, int gpio_block)
+{
+ bdk_gpio_rx_dat_t gpio_rx_dat;
+ switch (gpio_block)
+ {
+ case 0:
+ gpio_rx_dat.u = BDK_CSR_READ(node, BDK_GPIO_RX_DAT);
+ break;
+ case 1:
+ gpio_rx_dat.u = BDK_CSR_READ(node, BDK_GPIO_RX1_DAT);
+ break;
+ default:
+ bdk_error("GPIO block %d not supported\n", gpio_block);
+ gpio_rx_dat.u = 0;
+ break;
+ }
+ return gpio_rx_dat.s.dat;
+}
+
+
+/**
+ * GPIO Clear pin
+ *
+ * @param node Node GPIO block is on
+ * @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
+ * @param clear_mask Bit mask to indicate which bits to drive to '0'.
+ */
+void bdk_gpio_clear(bdk_node_t node, int gpio_block, uint64_t clear_mask)
+{
+ switch (gpio_block)
+ {
+ case 0:
+ BDK_CSR_WRITE(node, BDK_GPIO_TX_CLR, clear_mask);
+ break;
+ case 1:
+ BDK_CSR_WRITE(node, BDK_GPIO_TX1_CLR, clear_mask);
+ break;
+ default:
+ bdk_error("GPIO block %d not supported\n", gpio_block);
+ break;
+ }
+}
+
+
+/**
+ * GPIO Set pin
+ *
+ * @param node Node GPIO block is on
+ * @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
+ * @param set_mask Bit mask to indicate which bits to drive to '1'.
+ */
+void bdk_gpio_set(bdk_node_t node, int gpio_block, uint64_t set_mask)
+{
+ switch (gpio_block)
+ {
+ case 0:
+ BDK_CSR_WRITE(node, BDK_GPIO_TX_SET, set_mask);
+ break;
+ case 1:
+ BDK_CSR_WRITE(node, BDK_GPIO_TX1_SET, set_mask);
+ break;
+ default:
+ bdk_error("GPIO block %d not supported\n", gpio_block);
+ break;
+ }
+}
+
+
+/** GPIO Select pin
+ *
+ * @param node CPU node
+ * @param gpio GPIO number
+ * @param pin Pin number
+ */
+void bdk_gpio_select_pin(bdk_node_t node, int gpio, int pin)
+{
+ if ((gpio < 0) || (gpio >= bdk_gpio_get_num()))
+ {
+ bdk_warn("bdk_gpio_select_pin: Illegal GPIO %d\n", gpio);
+ return;
+ }
+
+ BDK_CSR_MODIFY(c, node, BDK_GPIO_BIT_CFGX(gpio), c.s.pin_sel = pin);
+}
+
+
+/**
+ * Return the number of GPIO pins on this chip
+ *
+ * @return Number of GPIO pins
+ */
+int bdk_gpio_get_num(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 51;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 48;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 80;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
+ return 96;
+ else
+ {
+ bdk_error("bdk_gpio_get_num(): Unsupported chip");
+ return 0;
+ }
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c
new file mode 100644
index 0000000000..b1e2a88ce1
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c
@@ -0,0 +1,270 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-ap.h"
+#include "libbdk-arch/bdk-csrs-l2c.h"
+#include "libbdk-arch/bdk-csrs-l2c_cbc.h"
+#include "libbdk-arch/bdk-csrs-mio_fus.h"
+
+typedef struct
+{
+ int sets;
+ int ways;
+ bool is_locked;
+} l2_node_state_t;
+
+static l2_node_state_t l2_node_state[BDK_NUMA_MAX_NODES];
+
+/**
+ * Perform one time initialization of L2 for improved
+ * performance. This can be called after L2 is in use.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int bdk_l2c_initialize(bdk_node_t node)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ /* Tell L2 to give the IOB statically higher priority compared to the
+ cores. This avoids conditions where IO blocks might be starved under
+ very high L2 loads */
+ BDK_CSR_MODIFY(c, node, BDK_L2C_CTL,
+ c.s.rsp_arb_mode = 1;
+ c.s.xmc_arb_mode = 0);
+ }
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && !bdk_is_platform(BDK_PLATFORM_ASIM))
+ {
+ /* Errata: (L2C-22279) RCAS/RSTC which hits S/S can use wrong compare data */
+ BDK_CSR_MODIFY(c, node, BDK_L2C_CTL,
+ c.s.dissblkdty = 1);
+ /* Errata: (L2C-22249) Broadcast invals can cause starvation on the INV bus */
+ for (int i = 0; i < 4; i++)
+ BDK_CSR_MODIFY(c, node, BDK_L2C_CBCX_SCRATCH(i),
+ c.s.invdly = 1);
+ }
+
+ // FIXME: Disable partial writes on pass 2 until it is debugged
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && !bdk_is_platform(BDK_PLATFORM_ASIM))
+ {
+ BDK_CSR_MODIFY(c, node, BDK_L2C_CTL,
+ c.s.dissblkdty = 1);
+ }
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && bdk_is_platform(BDK_PLATFORM_EMULATOR))
+ {
+ /* The emulator requires L2C_CTL[DISSBLKDTY] to be set */
+ BDK_CSR_MODIFY(c, node, BDK_L2C_CTL,
+ c.s.dissblkdty = 1);
+ }
+ return 0;
+}
+
+int bdk_l2c_get_core_way_partition(bdk_node_t node, int core)
+{
+ return (BDK_CSR_READ(node, BDK_L2C_WPAR_PPX(core)) & 0xffff);
+}
+
+int bdk_l2c_set_core_way_partition(bdk_node_t node, int core, uint32_t mask)
+{
+ uint32_t valid_mask = (1 << bdk_l2c_get_num_assoc(node)) - 1;
+ mask &= valid_mask;
+
+ BDK_CSR_WRITE(node, BDK_L2C_WPAR_PPX(core), mask);
+ return 0;
+}
+
+
+int bdk_l2c_set_hw_way_partition(bdk_node_t node, uint32_t mask)
+{
+ uint32_t valid_mask = (1 << bdk_l2c_get_num_assoc(node)) - 1;
+ mask &= valid_mask;
+
+ BDK_CSR_WRITE(node, BDK_L2C_WPAR_IOBX(0), mask);
+ return 0;
+}
+
+
+int bdk_l2c_get_hw_way_partition(bdk_node_t node)
+{
+ return (BDK_CSR_READ(node, BDK_L2C_WPAR_IOBX(0)) & 0xffff);
+}
+
+
+int bdk_l2c_lock_mem_region(bdk_node_t node, uint64_t start, uint64_t len)
+{
+ /* Round start/end to cache line boundaries */
+ len += start & BDK_CACHE_LINE_MASK;
+ start &= ~BDK_CACHE_LINE_MASK;
+ len = (len + BDK_CACHE_LINE_MASK) & ~BDK_CACHE_LINE_MASK;
+ void *ptr = (start) ? bdk_phys_to_ptr(start) : NULL;
+
+ while (len)
+ {
+ BDK_CACHE_LCK_L2(ptr);
+ ptr += BDK_CACHE_LINE_SIZE;
+ len -= BDK_CACHE_LINE_SIZE;
+ }
+ l2_node_state[node].is_locked = true;
+ return 0;
+}
+
+void bdk_l2c_flush(bdk_node_t node)
+{
+ /* The number of ways can be reduced with fuses, but the equations below
+ assume the max number of ways */
+ const int MAX_WAYS = 16;
+ int num_sets = bdk_l2c_get_num_sets(node);
+ int num_ways = bdk_l2c_get_num_assoc(node);
+
+ int is_rtg = 1; /* Clear remote tags */
+ for (int l2_way = 0; l2_way < num_ways; l2_way++)
+ {
+ for (int l2_set = 0; l2_set < num_sets; l2_set++)
+ {
+ uint64_t encoded = 128 * (l2_set + num_sets * (l2_way + (is_rtg * MAX_WAYS)));
+ BDK_CACHE_WBI_L2_INDEXED(encoded);
+ }
+ }
+
+ is_rtg = 0; /* Clear local tags */
+ for (int l2_way = 0; l2_way < num_ways; l2_way++)
+ {
+ for (int l2_set = 0; l2_set < num_sets; l2_set++)
+ {
+ uint64_t encoded = 128 * (l2_set + num_sets * (l2_way + (is_rtg * MAX_WAYS)));
+ BDK_CACHE_WBI_L2_INDEXED(encoded);
+ }
+ }
+ l2_node_state[node].is_locked = false;
+}
+
+int bdk_l2c_unlock_mem_region(bdk_node_t node, uint64_t start, uint64_t len)
+{
+ /* Round start/end to cache line boundaries */
+ len += start & BDK_CACHE_LINE_MASK;
+ start &= ~BDK_CACHE_LINE_MASK;
+ len = (len + BDK_CACHE_LINE_MASK) & ~BDK_CACHE_LINE_MASK;
+ void *ptr = (start) ? bdk_phys_to_ptr(start) : NULL;
+
+ while (len > 0)
+ {
+ /* Must use invalidate version to release lock */
+ BDK_CACHE_WBI_L2(ptr);
+ ptr += BDK_CACHE_LINE_SIZE;
+ len -= BDK_CACHE_LINE_SIZE;
+ }
+
+ l2_node_state[node].is_locked = false;
+ return 0;
+}
+
+
+int bdk_l2c_get_cache_size_bytes(bdk_node_t node)
+{
+ return bdk_l2c_get_num_sets(node) * bdk_l2c_get_num_assoc(node) * BDK_CACHE_LINE_SIZE;
+}
+
+/* Return the number of sets in the L2 Cache */
+int bdk_l2c_get_num_sets(bdk_node_t node)
+{
+ if (bdk_unlikely(l2_node_state[node].sets == 0))
+ {
+ /* Select the L2 cache */
+ bdk_ap_csselr_el1_t csselr_el1;
+ csselr_el1.u = 0;
+ csselr_el1.s.ind = 0;
+ csselr_el1.s.level = CAVIUM_IS_MODEL(CAVIUM_CN8XXX) ? 1 : 2;
+ BDK_MSR(CSSELR_EL1, csselr_el1.u);
+ /* Read its size */
+ bdk_ap_ccsidr_el1_t ccsidr_el1;
+ BDK_MRS(CCSIDR_EL1, ccsidr_el1.u);
+ /* Store it for use later */
+ l2_node_state[node].sets = ccsidr_el1.s.numsets + 1;
+ l2_node_state[node].ways = ccsidr_el1.s.associativity + 1;
+
+ /* Early chips didn't update the number of ways based on fusing */
+ if ((l2_node_state[node].ways == 16) && CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ /* The l2 can be reduced in 25% increments */
+ BDK_CSR_INIT(mio_fus_dat3, node, BDK_MIO_FUS_DAT3);
+ switch (mio_fus_dat3.s.l2c_crip)
+ {
+ case 3: /* 1/4 size */
+ l2_node_state[node].ways *= 1;
+ break;
+ case 2: /* 1/2 size */
+ l2_node_state[node].ways *= 2;
+ break;
+ case 1: /* 3/4 size */
+ l2_node_state[node].ways *= 3;
+ break;
+ default: /* Full size */
+ l2_node_state[node].ways *= 4;
+ break;
+ }
+ l2_node_state[node].ways /= 4;
+ }
+ }
+ return l2_node_state[node].sets;
+}
+
+/* Return the number of associations in the L2 Cache */
+int bdk_l2c_get_num_assoc(bdk_node_t node)
+{
+ /* Get the number of sets if the global sets/ways is not setup */
+ if (bdk_unlikely(l2_node_state[node].ways == 0))
+ bdk_l2c_get_num_sets(node);
+ return l2_node_state[node].ways;
+}
+
+/**
+ * Return true if the BDK has locked itself in L2
+ *
+ * @return
+ */
+int bdk_l2c_is_locked(bdk_node_t node)
+{
+ /* Determining the lock state of L2 requires reading exact tags from L2
+ which varies per chip. Rather than deal with that complexity, we just
+ keep a flag around saying if the L2 lock functions have been called.
+ This works for the BDK as its use of locking is very simple */
+ return l2_node_state[node].is_locked;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-twsi.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-twsi.c
new file mode 100644
index 0000000000..4fbb78a876
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-twsi.c
@@ -0,0 +1,318 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-mio_tws.h"
+
+#define RECOVERY_UDELAY 5
+#define RECOVERY_CLK_CNT 9
+#define ARBLOST_UDELAY 5000 /* 5ms */
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(TWSI);
+
+/**
+ * Initialize the TWSI blocks. This just sets the clock rate.
+ * Many times stuff will work without calling this, but some
+ * TWSI devices will fail. This is normally called automatically
+ * in bdk-init-main.c.
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_twsix_initialize(bdk_node_t node)
+{
+ const int TWSI_BUS_FREQ = 100000; /* 100 KHz */
+ const int TWSI_THP = 24; /* TCLK half period (default 24) */
+ const int io_clock_hz = bdk_clock_get_rate(node, BDK_CLOCK_SCLK);
+ int N_divider;
+ int M_divider;
+
+ /* Set the TWSI clock to a conservative TWSI_BUS_FREQ. Compute the
+ clocks M divider based on the SCLK.
+ TWSI freq = (core freq) / (20 x (M+1) x (thp+1) x 2^N)
+ M = ((core freq) / (20 x (TWSI freq) x (thp+1) x 2^N)) - 1 */
+ for (N_divider = 0; N_divider < 8; N_divider++)
+ {
+ M_divider = (io_clock_hz / (20 * TWSI_BUS_FREQ * (TWSI_THP + 1) * (1 << N_divider))) - 1;
+ if (M_divider < 16)
+ break;
+ }
+
+ BDK_CSR_DEFINE(sw_twsi, BDK_MIO_TWSX_SW_TWSI(bus));
+ sw_twsi.u = 0;
+ sw_twsi.s.v = 1; /* Clear valid bit */
+ sw_twsi.s.op = 0x6; /* See EOP field */
+ sw_twsi.s.r = 0; /* Select CLKCTL when R = 0 */
+ sw_twsi.s.eop_ia = 3; /* R=0 selects CLKCTL, R=1 selects STAT */
+ sw_twsi.s.data = ((M_divider & 0xf) << 3) | ((N_divider & 0x7) << 0);
+
+ int num_busses = 2;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ num_busses = 6;
+
+ for (int bus = 0; bus < num_busses; bus++)
+ {
+ /* Only init non-slave ports */
+ BDK_CSR_INIT(state, node, BDK_MIO_TWSX_SW_TWSI(bus));
+ if (!state.s.slonly)
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI(bus), sw_twsi.u);
+ }
+ return 0;
+}
+
+/**
+ * Do a twsi bus recovery in the case when the last transaction
+ * on the bus has been left unfinished.
+ *
+ * @param twsi_id which TWSI bus to use
+ */
+static void bdk_twsix_recover_bus(bdk_node_t node, int twsi_id)
+{
+ /* read TWSX_INT */
+ BDK_CSR_INIT(twsx_int, node, BDK_MIO_TWSX_INT(twsi_id));
+
+ for (int i = 0; i < RECOVERY_CLK_CNT * 2; i++)
+ {
+ if (!twsx_int.s.scl_ovr)
+ {
+ /* SCL shouldn't be low here */
+ if (!twsx_int.s.scl)
+ {
+ bdk_error("N%d.TWSI%d: SCL is stuck low\n", node, twsi_id);
+ return;
+ }
+
+ /* Break if SDA is high */
+ if (twsx_int.s.sda)
+ break;
+ }
+
+ twsx_int.s.scl_ovr = !twsx_int.s.scl_ovr;
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_INT(twsi_id), twsx_int.u);
+ bdk_wait_usec(RECOVERY_UDELAY);
+ }
+
+ /*
+ * Generate STOP condition using the register overrides
+ * in order to move the higher level controller out of
+ * the bad state. This is a workaround for the TWSI hardware.
+ */
+ twsx_int.s.scl_ovr = 1;
+ twsx_int.s.sda_ovr = 1;
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_INT(twsi_id), twsx_int.u);
+ bdk_wait_usec(RECOVERY_UDELAY);
+ twsx_int.s.scl_ovr = 0;
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_INT(twsi_id), twsx_int.u);
+ bdk_wait_usec(RECOVERY_UDELAY);
+ twsx_int.s.sda_ovr = 0;
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_INT(twsi_id), twsx_int.u);
+}
+
+/**
+ * Do a twsi read from a 7 bit device address using an (optional)
+ * internal address. Up to 4 bytes can be read at a time.
+ *
+ * @param twsi_id which TWSI bus to use
+ * @param dev_addr Device address (7 bit)
+ * @param internal_addr
+ * Internal address. Can be 0, 1 or 2 bytes in width
+ * @param num_bytes Number of data bytes to read (1-4)
+ * @param ia_width_bytes
+ * Internal address size in bytes (0, 1, or 2)
+ *
+ * @return Read data, or -1 on failure
+ */
+int64_t bdk_twsix_read_ia(bdk_node_t node, int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes)
+{
+ bdk_mio_twsx_sw_twsi_t sw_twsi_val;
+ bdk_mio_twsx_sw_twsi_ext_t twsi_ext;
+ int retry_limit = 5;
+
+ if (num_bytes < 1 || num_bytes > 4 || ia_width_bytes < 0 || ia_width_bytes > 2)
+ return -1;
+retry:
+ twsi_ext.u = 0;
+ sw_twsi_val.u = 0;
+ sw_twsi_val.s.v = 1;
+ sw_twsi_val.s.r = 1;
+ sw_twsi_val.s.sovr = 1;
+ sw_twsi_val.s.size = num_bytes - 1;
+ sw_twsi_val.s.addr = dev_addr;
+
+ if (ia_width_bytes > 0)
+ {
+ sw_twsi_val.s.op = 1;
+ sw_twsi_val.s.ia = (internal_addr >> 3) & 0x1f;
+ sw_twsi_val.s.eop_ia = internal_addr & 0x7;
+ if (ia_width_bytes == 2)
+ {
+ sw_twsi_val.s.eia = 1;
+ twsi_ext.s.ia = internal_addr >> 8;
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u);
+ }
+ }
+
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u);
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_MIO_TWSX_SW_TWSI(twsi_id), v, ==, 0, 10000))
+ {
+ bdk_warn("N%d.TWSI%d: Timeout waiting for read to complete...start recovering process\n",
+ node, twsi_id);
+ /* perform bus recovery */
+ bdk_twsix_recover_bus(node, twsi_id);
+ if (retry_limit-- > 0)
+ goto retry;
+
+ bdk_error("N%d.TWSI%d: Timeout waiting for operation to complete\n", node, twsi_id);
+ return -1;
+ }
+ sw_twsi_val.u = BDK_CSR_READ(node, BDK_MIO_TWSX_SW_TWSI(twsi_id));
+ if (!sw_twsi_val.s.r)
+ {
+ /* Check the reason for the failure. We may need to retry to handle multi-master
+ ** configurations.
+ ** Lost arbitration : 0x38, 0x68, 0xB0, 0x78
+ ** Core busy as slave: 0x80, 0x88, 0xA0, 0xA8, 0xB8, 0xC0, 0xC8
+ */
+ if (sw_twsi_val.s.data == 0x38
+ || sw_twsi_val.s.data == 0x68
+ || sw_twsi_val.s.data == 0xB0
+ || sw_twsi_val.s.data == 0x78
+ || sw_twsi_val.s.data == 0x80
+ || sw_twsi_val.s.data == 0x88
+ || sw_twsi_val.s.data == 0xA0
+ || sw_twsi_val.s.data == 0xA8
+ || sw_twsi_val.s.data == 0xB8
+ || sw_twsi_val.s.data == 0xC8)
+ {
+ /*
+ * One of the arbitration lost conditions is recognized.
+ * The TWSI hardware has switched to the slave mode and
+ * expects the STOP condition on the bus.
+ * Make a delay before next retry.
+ */
+ bdk_wait_usec(ARBLOST_UDELAY);
+ if (retry_limit-- > 0)
+ goto retry;
+ }
+ /* For all other errors, return an error code */
+ return -1;
+ }
+
+ return (sw_twsi_val.s.data & (0xFFFFFFFF >> (32 - num_bytes*8)));
+}
+
+
+/**
+ * Write 1-8 bytes to a TWSI device using an internal address.
+ *
+ * @param twsi_id which TWSI interface to use
+ * @param dev_addr TWSI device address (7 bit only)
+ * @param internal_addr
+ * TWSI internal address (0, 8, or 16 bits)
+ * @param num_bytes Number of bytes to write (1-8)
+ * @param ia_width_bytes
+ * internal address width, in bytes (0, 1, 2)
+ * @param data Data to write. Data is written MSB first on the twsi bus, and
+ * only the lower num_bytes bytes of the argument are valid. (If
+ * a 2 byte write is done, only the low 2 bytes of the argument is
+ * used.
+ *
+ * @return Zero on success, -1 on error
+ */
+int bdk_twsix_write_ia(bdk_node_t node, int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t data)
+{
+ bdk_mio_twsx_sw_twsi_t sw_twsi_val;
+ bdk_mio_twsx_sw_twsi_ext_t twsi_ext;
+ int retry_limit = 5;
+ int to;
+
+ if (num_bytes < 1 || num_bytes > 8 || ia_width_bytes < 0 || ia_width_bytes > 2)
+ return -1;
+
+retry:
+ twsi_ext.u = 0;
+ sw_twsi_val.u = 0;
+ sw_twsi_val.s.v = 1;
+ sw_twsi_val.s.sovr = 1;
+ sw_twsi_val.s.size = num_bytes - 1;
+ sw_twsi_val.s.addr = dev_addr;
+ sw_twsi_val.s.data = 0xFFFFFFFF & data;
+
+ if (ia_width_bytes > 0)
+ {
+ sw_twsi_val.s.op = 1;
+ sw_twsi_val.s.ia = (internal_addr >> 3) & 0x1f;
+ sw_twsi_val.s.eop_ia = internal_addr & 0x7;
+ }
+ if (ia_width_bytes == 2)
+ {
+ sw_twsi_val.s.eia = 1;
+ twsi_ext.s.ia = internal_addr >> 8;
+ }
+ if (num_bytes > 4)
+ twsi_ext.s.data = data >> 32;
+
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u);
+ BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u);
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_MIO_TWSX_SW_TWSI(twsi_id), v, ==, 0, 10000))
+ {
+ bdk_warn("N%d.TWSI%d: Timeout waiting for write to complete...start recovering process\n",
+ node, twsi_id);
+ /* perform bus recovery */
+ bdk_twsix_recover_bus(node, twsi_id);
+ if (retry_limit-- > 0)
+ goto retry;
+
+ // After retry but still not success, report error and return
+ bdk_error("N%d.TWSI%d: Timeout waiting for operation to complete\n", node, twsi_id);
+ return -1;
+ }
+
+ /* Poll until reads succeed, or polling times out */
+ to = 100;
+ while (to-- > 0)
+ {
+ if (bdk_twsix_read_ia(node, twsi_id, dev_addr, 0, 1, 0) >= 0)
+ break;
+ }
+ if (to <= 0)
+ return -1;
+
+ return 0;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c b/src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c
new file mode 100644
index 0000000000..25d6b9eed3
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c
@@ -0,0 +1,561 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <stdio.h>
+#include <unistd.h>
+#include "libbdk-arch/bdk-csrs-ap.h"
+#include "libbdk-arch/bdk-csrs-l2c.h"
+#include "libbdk-arch/bdk-csrs-l2c_tad.h"
+#include "libbdk-arch/bdk-csrs-mio_boot.h"
+#include "libbdk-arch/bdk-csrs-rom.h"
+#include "libbdk-arch/bdk-csrs-uaa.h"
+
+uint64_t __bdk_init_reg_x0; /* The contents of X0 when this image started */
+uint64_t __bdk_init_reg_x1; /* The contents of X1 when this image started */
+uint64_t __bdk_init_reg_pc; /* The contents of PC when this image started */
+static int64_t __bdk_alive_coremask[BDK_NUMA_MAX_NODES];
+
+/**
+ * Set the baud rate on a UART
+ *
+ * @param uart uart to set
+ * @param baudrate Baud rate (9600, 19200, 115200, etc)
+ * @param use_flow_control
+ * Non zero if hardware flow control should be enabled
+ */
+void bdk_set_baudrate(bdk_node_t node, int uart, int baudrate, int use_flow_control)
+{
+ /* 1.2.1 Initialization Sequence (Power-On/Hard/Cold Reset) */
+ /* 1. Wait for IOI reset (srst_n) to deassert. */
+ /* 2. Assert all resets:
+ a. UAA reset: UCTL_CTL[UAA_RST] = 1
+ b. UCTL reset: UCTL_CTL[UCTL_RST] = 1 */
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart),
+ c.s.uaa_rst = 1;
+ c.s.uctl_rst = 1);
+
+ /* 3. Configure the HCLK:
+ a. Reset the clock dividers: UCTL_CTL[H_CLKDIV_RST] = 1.
+ b. Select the HCLK frequency
+ i. UCTL_CTL[H_CLKDIV] = desired value,
+ ii. UCTL_CTL[H_CLKDIV_EN] = 1 to enable the HCLK.
+ iii. Readback UCTL_CTL to ensure the values take effect.
+ c. Deassert the HCLK clock divider reset: UCTL_CTL[H_CLKDIV_RST] = 0. */
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart),
+ c.s.h_clkdiv_sel = 3; /* Run at SCLK / 6, matches emulator */
+ c.s.h_clk_byp_sel = 0;
+ c.s.h_clk_en = 1);
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart),
+ c.s.h_clkdiv_rst = 0);
+
+ /* 4. Wait 20 HCLK cycles from step 3 for HCLK to start and async fifo
+ to properly reset. */
+ bdk_wait(200); /* Overkill */
+
+ /* 5. Deassert UCTL and UAHC resets:
+ a. UCTL_CTL[UCTL_RST] = 0
+ b. Wait 10 HCLK cycles.
+ c. UCTL_CTL[UAHC_RST] = 0
+ d. You will have to wait 10 HCLK cycles before accessing any
+ HCLK-only registers. */
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart), c.s.uctl_rst = 0);
+ bdk_wait(100); /* Overkill */
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart), c.s.uaa_rst = 0);
+ bdk_wait(100); /* Overkill */
+
+ /* 6. Enable conditional SCLK of UCTL by writing UCTL_CTL[CSCLK_EN] = 1. */
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart), c.s.csclk_en = 1);
+
+ /* 7. Initialize the integer and fractional baud rate divider registers
+ UARTIBRD and UARTFBRD as follows:
+ a. Baud Rate Divisor = UARTCLK/(16xBaud Rate) = BRDI + BRDF
+ b. The fractional register BRDF, m is calculated as integer(BRDF x 64 + 0.5)
+ Example calculation:
+ If the required baud rate is 230400 and hclk = 4MHz then:
+ Baud Rate Divisor = (4x10^6)/(16x230400) = 1.085
+ This means BRDI = 1 and BRDF = 0.085.
+ Therefore, fractional part, BRDF = integer((0.085x64)+0.5) = 5
+ Generated baud rate divider = 1+5/64 = 1.078 */
+ uint64_t divisor_x_64 = bdk_clock_get_rate(node, BDK_CLOCK_SCLK) / (baudrate * 16 * 6 / 64);
+ if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
+ {
+ /* The hardware emulator currently fixes the uart at a fixed rate */
+ divisor_x_64 = 64;
+ }
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_IBRD(uart),
+ c.s.baud_divint = divisor_x_64 >> 6);
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_FBRD(uart),
+ c.s.baud_divfrac = divisor_x_64 & 0x3f);
+
+ /* 8. Program the line control register UAA(0..1)_LCR_H and the control
+ register UAA(0..1)_CR */
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_LCR_H(uart),
+ c.s.sps = 0; /* No parity */
+ c.s.wlen = 3; /* 8 bits */
+ c.s.fen = 1; /* FIFOs enabled */
+ c.s.stp2 = 0; /* Use one stop bit, not two */
+ c.s.eps = 0; /* No parity */
+ c.s.pen = 0; /* No parity */
+ c.s.brk = 0); /* Don't send a break */
+ BDK_CSR_MODIFY(c, node, BDK_UAAX_CR(uart),
+ c.s.ctsen = use_flow_control;
+ c.s.rtsen = use_flow_control;
+ c.s.out1 = 1; /* Drive data carrier detect */
+ c.s.rts = 0; /* Don't override RTS */
+ c.s.dtr = 0; /* Don't override DTR */
+ c.s.rxe = 1; /* Enable receive */
+ c.s.txe = 1; /* Enable transmit */
+ c.s.lbe = 0; /* Disable loopback */
+ c.s.uarten = 1); /* Enable uart */
+}
+
+/**
+ * First C code run when a BDK application starts. It is called by bdk-start.S.
+ *
+ * @param image_crc A CRC32 of the entire image before any variables might have been updated by C.
+ * This should match the CRC32 in the image header.
+ * @param reg_x0 The contents of the X0 register when the image started. In images loaded after
+ * the boot stub, this contains a "environment" string containing "BOARD=xxx". The
+ * use of this is deprecated as it has been replaced with a expandable device tree
+ * in X1.
+ * @param reg_x1 The contents of the X1 register when the image started. For all images after the
+ * boot stub, this contains a physical address of a device tree in memory. This
+ * should be used by all images to identify and configure the board we are running
+ * on.
+ * @param reg_pc This is the PC the code started at before relocation. This is useful for
+ * the first stage to determine if it from trusted or non-trusted code.
+ */
+void __bdk_init(uint32_t image_crc, uint64_t reg_x0, uint64_t reg_x1, uint64_t reg_pc) __attribute((noreturn));
+void __bdk_init(uint32_t image_crc, uint64_t reg_x0, uint64_t reg_x1, uint64_t reg_pc)
+{
+ extern void __bdk_exception_current_el_sync_sp0();
+ BDK_MSR(VBAR_EL3, __bdk_exception_current_el_sync_sp0);
+ BDK_MSR(VBAR_EL2, __bdk_exception_current_el_sync_sp0);
+ BDK_MSR(VBAR_EL1, __bdk_exception_current_el_sync_sp0);
+
+ /* Use Cavium specific function to change memory to normal instead of
+ device attributes. DCVA47=1 makes unmapped addresses behave as
+ non-shared memory (not inner or outer shared in ARM speak) */
+ bdk_ap_cvmmemctl0_el1_t cvmmemctl0_el1;
+ BDK_MRS(s3_0_c11_c0_4, cvmmemctl0_el1.u);
+ cvmmemctl0_el1.s.dcva47 = 1;
+ BDK_MSR(s3_0_c11_c0_4, cvmmemctl0_el1.u);
+
+
+ /* Setup running with no mmu */
+ bdk_ap_sctlr_el3_t sctlr_el3;
+ BDK_MRS(SCTLR_EL3, sctlr_el3.u);
+ sctlr_el3.s.wxn = 0; /* No write perm changes */
+ sctlr_el3.s.i = 1; /* Enable Icache */
+ sctlr_el3.s.sa = 1; /* Enable stack alignment checking */
+ sctlr_el3.s.cc = 1; /* Enable Dcache */
+ sctlr_el3.s.aa = 0; /* Allow unaligned accesses */
+ sctlr_el3.s.m = 0; /* Disable MMU */
+ BDK_MSR(SCTLR_EL3, sctlr_el3.u);
+
+ bdk_node_t node = bdk_numa_local();
+ bdk_numa_set_exists(node);
+
+ /* Default color, Reset scroll region and goto bottom */
+ static const char BANNER_1[] = "\33[0m\33[1;r\33[100;1H"
+ "\n\n\nCavium SOC\n";
+ static const char BANNER_2[] = "Locking L2 cache\n";
+ static const char BANNER_CRC_RIGHT[] = "PASS: CRC32 verification\n";
+ static const char BANNER_CRC_WRONG[] = "FAIL: CRC32 verification\n";
+ static const char BANNER_3[] = "Transferring to thread scheduler\n";
+
+ BDK_MSR(TPIDR_EL3, 0);
+
+ if (bdk_is_boot_core())
+ {
+ /* Initialize the platform */
+ __bdk_platform_init();
+ if (!bdk_is_platform(BDK_PLATFORM_EMULATOR) && CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ BDK_CSR_INIT(l2c_oci_ctl, node, BDK_L2C_OCI_CTL);
+ if (l2c_oci_ctl.s.iofrcl)
+ {
+ /* CCPI isn't being used, so don't reset if the links change */
+ BDK_CSR_WRITE(node, BDK_RST_OCX, 0);
+ BDK_CSR_READ(node, BDK_RST_OCX);
+ /* Force CCPI links down so they aren't trying to run while
+ we're configuring the QLMs */
+ __bdk_init_ccpi_early(1);
+ }
+ }
+
+ /* AP-23192: The DAP in pass 1.0 has an issue where its state isn't cleared for
+ cores in reset. Put the DAPs in reset as their associated cores are
+ also in reset */
+ if (!bdk_is_platform(BDK_PLATFORM_EMULATOR) && CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_0))
+ BDK_CSR_WRITE(node, BDK_RST_DBG_RESET, BDK_CSR_READ(node, BDK_RST_PP_RESET));
+
+ /* Enable the timer */
+ BDK_MSR(CNTFRQ_EL0, BDK_GTI_RATE); /* Needed for Asim */
+ bdk_clock_setup(node);
+
+ /* Only setup the uarts if they haven't been already setup */
+ BDK_CSR_INIT(uctl_ctl0, node, BDK_UAAX_UCTL_CTL(0));
+ if (!uctl_ctl0.s.h_clk_en)
+ bdk_set_baudrate(node, 0, BDK_UART_BAUDRATE, 0);
+ BDK_CSR_INIT(uctl_ctl1, node, BDK_UAAX_UCTL_CTL(1));
+ if (!uctl_ctl1.s.h_clk_en)
+ bdk_set_baudrate(node, 1, BDK_UART_BAUDRATE, 0);
+
+ __bdk_fs_init_early();
+ if (BDK_SHOW_BOOT_BANNERS)
+ write(1, BANNER_1, sizeof(BANNER_1)-1);
+
+ /* Only lock L2 if DDR3 isn't initialized */
+ if (bdk_is_platform(BDK_PLATFORM_HW) && !__bdk_is_dram_enabled(node))
+ {
+ if (BDK_TRACE_ENABLE_INIT)
+ write(1, BANNER_2, sizeof(BANNER_2)-1);
+ /* Lock the entire cache for chips with less than 4MB of
+ L2/LLC. Larger chips can use the 1/4 of the cache to
+ speed up DRAM init and testing */
+ int lock_size = bdk_l2c_get_cache_size_bytes(node);
+ if (lock_size >= (4 << 20))
+ lock_size = lock_size * 3 / 4;
+ bdk_l2c_lock_mem_region(node, bdk_numa_get_address(node, 0), lock_size);
+ /* The locked region isn't considered dirty by L2. Do read
+ read/write of each cache line to force each to be dirty. This
+ is needed across the whole line to make sure the L2 dirty bits
+ are all up to date */
+ volatile uint64_t *ptr = bdk_phys_to_ptr(bdk_numa_get_address(node, 8));
+ /* The above pointer got address 8 to avoid NULL pointer checking
+ in bdk_phys_to_ptr(). Correct it here */
+ ptr--;
+ uint64_t *end = bdk_phys_to_ptr(bdk_numa_get_address(node, bdk_l2c_get_cache_size_bytes(node)));
+ while (ptr < end)
+ {
+ *ptr = *ptr;
+ ptr++;
+ }
+ /* The above locking will cause L2 to load zeros without DRAM setup.
+ This will cause L2C_TADX_INT[rddislmc], which we suppress below */
+ BDK_CSR_DEFINE(l2c_tadx_int, BDK_L2C_TADX_INT_W1C(0));
+ l2c_tadx_int.u = 0;
+ l2c_tadx_int.s.wrdislmc = 1;
+ l2c_tadx_int.s.rddislmc = 1;
+ l2c_tadx_int.s.rdnxm = 1;
+
+ BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(0), l2c_tadx_int.u);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) || CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ {
+ BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(1), l2c_tadx_int.u);
+ BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(2), l2c_tadx_int.u);
+ BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(3), l2c_tadx_int.u);
+ }
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(4), l2c_tadx_int.u);
+ BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(5), l2c_tadx_int.u);
+ BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(6), l2c_tadx_int.u);
+ BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(7), l2c_tadx_int.u);
+ }
+ }
+
+ /* Validate the image CRC */
+ extern void _start();
+ uint32_t *ptr_crc32 = (uint32_t *)(_start + 16);
+ uint32_t correct_crc = bdk_le32_to_cpu(*ptr_crc32);
+ if (correct_crc == image_crc)
+ write(1, BANNER_CRC_RIGHT, sizeof(BANNER_CRC_RIGHT) - 1);
+ else
+ write(1, BANNER_CRC_WRONG, sizeof(BANNER_CRC_WRONG) - 1);
+
+ if (BDK_TRACE_ENABLE_INIT)
+ write(1, BANNER_3, sizeof(BANNER_3)-1);
+ bdk_thread_initialize();
+ }
+
+ /* Enable the core timer */
+ BDK_MSR(CNTFRQ_EL0, BDK_GTI_RATE); /* Needed for Asim */
+ bdk_ap_cntps_ctl_el1_t cntps_ctl_el1;
+ cntps_ctl_el1.u = 0;
+ cntps_ctl_el1.s.imask = 1;
+ cntps_ctl_el1.s.enable = 1;
+ BDK_MSR(CNTPS_CTL_EL1, cntps_ctl_el1.u);
+
+ /* Setup an exception stack in case we crash */
+ int EX_STACK_SIZE = 16384;
+ void *exception_stack = malloc(EX_STACK_SIZE);
+ extern void __bdk_init_exception_stack(void *ptr);
+ __bdk_init_exception_stack(exception_stack + EX_STACK_SIZE);
+
+ bdk_atomic_add64(&__bdk_alive_coremask[node], bdk_core_to_mask());
+
+ /* Record our input registers for use later */
+ __bdk_init_reg_x0 = reg_x0;
+ __bdk_init_reg_x1 = reg_x1;
+ __bdk_init_reg_pc = reg_pc;
+ bdk_thread_first(__bdk_init_main, 0, NULL, 0);
+}
+
+/**
+ * Call this function to take secondary cores out of reset and have
+ * them start running threads
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param coremask Cores to start. Zero is a shortcut for all.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int bdk_init_cores(bdk_node_t node, uint64_t coremask)
+{
+ extern void __bdk_start_cores();
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ /* Write the address of the main entry point */
+ BDK_TRACE(INIT, "N%d: Setting address for boot jump\n", node);
+ BDK_CSR_WRITE(node, BDK_MIO_BOOT_AP_JUMP, (uint64_t)__bdk_start_cores);
+ }
+ else
+ {
+ BDK_TRACE(INIT, "N%d: Setting ROM boot code\n", node);
+ /* Assembly for ROM memory:
+ d508711f ic ialluis
+ d503201f nop
+ 58000040 ldr x0, 328 <branch_addr>
+ d61f0000 br x0
+ branch_addr:
+ Memory is little endain, so 64 bit constants have the first
+ instruction in the low word */
+ BDK_CSR_WRITE(node, BDK_ROM_MEMX(0), 0xd503201fd508711f);
+ BDK_CSR_WRITE(node, BDK_ROM_MEMX(1), 0xd61f000058000040);
+ BDK_CSR_WRITE(node, BDK_ROM_MEMX(2), (uint64_t)__bdk_start_cores);
+ }
+
+ /* Choose all cores by default */
+ if (coremask == 0)
+ coremask = -1;
+
+ /* Limit to the cores that aren't already running */
+ coremask &= ~__bdk_alive_coremask[node];
+
+ /* Limit to the cores that are specified in configuration menu */
+ uint64_t config_coremask = bdk_config_get_int(BDK_CONFIG_COREMASK);
+ if (config_coremask)
+ coremask &= config_coremask;
+
+ /* Limit to the cores that exist */
+ coremask &= (1ull<<bdk_get_num_cores(node)) - 1;
+
+ uint64_t reset = BDK_CSR_READ(node, BDK_RST_PP_RESET);
+ BDK_TRACE(INIT, "N%d: Cores currently in reset: 0x%lx\n", node, reset);
+ uint64_t need_reset_off = reset & coremask;
+ if (need_reset_off)
+ {
+ BDK_TRACE(INIT, "N%d: Taking cores out of reset (0x%lx)\n", node, need_reset_off);
+ BDK_CSR_WRITE(node, BDK_RST_PP_RESET, reset & ~need_reset_off);
+ /* Wait for cores to finish coming out of reset */
+ bdk_wait_usec(1);
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_RST_PP_PENDING, pend, ==, 0, 100000))
+ bdk_error("Timeout wating for reset pending to clear");
+ /* AP-23192: The DAP in pass 1.0 has an issue where its state isn't cleared for
+ cores in reset. Put the DAPs in reset as their associated cores are
+ also in reset */
+ if (!bdk_is_platform(BDK_PLATFORM_EMULATOR) && CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_0))
+ BDK_CSR_WRITE(node, BDK_RST_DBG_RESET, reset & ~need_reset_off);
+ }
+
+ BDK_TRACE(INIT, "N%d: Wait up to 1s for the cores to boot\n", node);
+ uint64_t timeout = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) + bdk_clock_get_count(BDK_CLOCK_TIME);
+ while ((bdk_clock_get_count(BDK_CLOCK_TIME) < timeout) && ((bdk_atomic_get64(&__bdk_alive_coremask[node]) & coremask) != coremask))
+ {
+ /* Tight spin, no thread schedules */
+ }
+
+ if ((bdk_atomic_get64(&__bdk_alive_coremask[node]) & coremask) != coremask)
+ {
+ bdk_error("Node %d: Some cores failed to start. Alive mask 0x%lx, requested 0x%lx\n",
+ node, __bdk_alive_coremask[node], coremask);
+ return -1;
+ }
+ BDK_TRACE(INIT, "N%d: All cores booted\n", node);
+ return 0;
+}
+
+/**
+ * Put cores back in reset and power them down
+ *
+ * @param node Node to update
+ * @param coremask Each bit will be a core put in reset. Cores already in reset are unaffected
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_reset_cores(bdk_node_t node, uint64_t coremask)
+{
+ extern void __bdk_reset_thread(int arg1, void *arg2);
+
+ /* Limit to the cores that exist */
+ coremask &= (1ull<<bdk_get_num_cores(node)) - 1;
+
+ /* Update which cores are in reset */
+ uint64_t reset = BDK_CSR_READ(node, BDK_RST_PP_RESET);
+ BDK_TRACE(INIT, "N%d: Cores currently in reset: 0x%lx\n", node, reset);
+ coremask &= ~reset;
+ BDK_TRACE(INIT, "N%d: Cores to put into reset: 0x%lx\n", node, coremask);
+
+ /* Check if everything is already done */
+ if (coremask == 0)
+ return 0;
+
+ int num_cores = bdk_get_num_cores(node);
+ for (int core = 0; core < num_cores; core++)
+ {
+ uint64_t my_mask = 1ull << core;
+ /* Skip cores not in mask */
+ if ((coremask & my_mask) == 0)
+ continue;
+ BDK_TRACE(INIT, "N%d: Telling core %d to go into reset\n", node, core);
+ if (bdk_thread_create(node, my_mask, __bdk_reset_thread, 0, NULL, 0))
+ {
+ bdk_error("Failed to create thread for putting core in reset");
+ continue;
+ }
+ /* Clear the core in the alive mask */
+ bdk_atomic_fetch_and_bclr64_nosync((uint64_t*)&__bdk_alive_coremask[node], my_mask);
+ }
+
+ BDK_TRACE(INIT, "N%d: Waiting for all reset bits to be set\n", node);
+ uint64_t timeout = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) + bdk_clock_get_count(BDK_CLOCK_TIME);
+ while (bdk_clock_get_count(BDK_CLOCK_TIME) < timeout)
+ {
+ reset = BDK_CSR_READ(node, BDK_RST_PP_RESET);
+ if ((reset & coremask) == coremask)
+ break;
+ bdk_thread_yield();
+ }
+ /* AP-23192: The DAP in pass 1.0 has an issue where its state isn't cleared for
+ cores in reset. Put the DAPs in reset as their associated cores are
+ also in reset */
+ if (!bdk_is_platform(BDK_PLATFORM_EMULATOR) && CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_0))
+ BDK_CSR_WRITE(node, BDK_RST_DBG_RESET, BDK_CSR_READ(node, BDK_RST_PP_RESET));
+
+ BDK_TRACE(INIT, "N%d: Cores now in reset: 0x%lx\n", node, reset);
+
+ return ((reset & coremask) == coremask) ? 0 : -1;
+}
+
+/**
+ * Call this function to take secondary nodes and cores out of
+ * reset and have them start running threads
+ *
+ * @param skip_cores If non-zero, cores are not started. Only the nodes are setup
+ * @param ccpi_sw_gbaud
+ * If CCPI is in software mode, this is the speed the CCPI QLMs will be configured
+ * for
+ *
+ * @return Zero on success, negative on failure.
+ */
+int bdk_init_nodes(int skip_cores, int ccpi_sw_gbaud)
+{
+ int result = 0;
+ int do_oci_init = (__bdk_init_ccpi_links != NULL);
+
+ /* Only init OCI/CCPI on chips that support it */
+ do_oci_init &= CAVIUM_IS_MODEL(CAVIUM_CN88XX);
+
+ /* Check that the BDK config says multi-node is enabled */
+ if (bdk_config_get_int(BDK_CONFIG_MULTI_NODE) == 0)
+ do_oci_init = 0;
+
+ /* Simulation under Asim is a special case. Multi-node is simulaoted, but
+ not the details of the low level link */
+ if (do_oci_init && bdk_is_platform(BDK_PLATFORM_ASIM))
+ {
+ bdk_numa_set_exists(0);
+ bdk_numa_set_exists(1);
+ /* Skip the rest in simulation */
+ do_oci_init = 0;
+ }
+
+ if (do_oci_init)
+ {
+ if (__bdk_init_ccpi_links(ccpi_sw_gbaud) == 0)
+ {
+ /* Don't run node init if L2C_OCI_CTL shows that it has already
+ been done */
+ BDK_CSR_INIT(l2c_oci_ctl, bdk_numa_local(), BDK_L2C_OCI_CTL);
+ if (l2c_oci_ctl.s.enaoci == 0)
+ result |= __bdk_init_ccpi_multinode();
+ }
+ }
+
+ /* Start cores on all node unless it was disabled */
+ if (!skip_cores)
+ {
+ for (bdk_node_t node=0; node<BDK_NUMA_MAX_NODES; node++)
+ {
+ if (bdk_numa_exists(node))
+ result |= bdk_init_cores(node, 0);
+ }
+ }
+ return result;
+}
+
+/**
+ * Get the coremask of the cores actively running the BDK. Doesn't count cores
+ * that aren't booted.
+ *
+ * @param node Node to coremask the count for
+ *
+ * @return 64bit bitmask
+ */
+uint64_t bdk_get_running_coremask(bdk_node_t node)
+{
+ return __bdk_alive_coremask[node];
+}
+
+/**
+ * Return the number of cores actively running in the BDK for the given node.
+ * Not an inline so it can be called from LUA.
+ *
+ * @param node Node to get the core count for
+ *
+ * @return Number of cores running. Doesn't count cores that aren't booted
+ */
+int bdk_get_num_running_cores(bdk_node_t node)
+{
+ return __builtin_popcountl(bdk_get_running_coremask(node));
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-os/bdk-thread.c b/src/vendorcode/cavium/bdk/libbdk-os/bdk-thread.c
new file mode 100644
index 0000000000..df1d02864b
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-os/bdk-thread.c
@@ -0,0 +1,384 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <stdio.h>
+#include <malloc.h>
+
+#define STACK_CANARY 0x0BADBADBADBADBADull
+
+typedef struct bdk_thread
+{
+ struct bdk_thread *next;
+ uint64_t coremask;
+ uint64_t gpr[32]; /* Reg 31 is SP */
+ struct _reent lib_state;
+ uint64_t stack_canary;
+ uint64_t stack[0];
+} bdk_thread_t;
+
+typedef struct
+{
+ bdk_thread_t* head;
+ bdk_thread_t* tail;
+ bdk_spinlock_t lock;
+ int64_t __padding1[16-3]; /* Stats in different cache line for speed */
+ int64_t stat_num_threads;
+ int64_t stat_no_schedulable_threads;
+ int64_t stat_next_calls;
+ int64_t stat_next_walks;
+ int64_t __padding2[16-4];
+} bdk_thread_node_t;
+
+static bdk_thread_node_t bdk_thread_node[BDK_NUMA_MAX_NODES];
+
+extern void __bdk_thread_switch(bdk_thread_t* next_context, int delete_old);
+
+/**
+ * Main thread body for all threads
+ *
+ * @param func User function to call
+ * @param arg0 First argument to the user function
+ * @param arg1 Second argument to the user function
+ */
+static void __bdk_thread_body(bdk_thread_func_t func, int arg0, void *arg1)
+{
+ func(arg0, arg1);
+ bdk_thread_destroy();
+}
+
+
+/**
+ * Initialize the BDK thread library
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_thread_initialize(void)
+{
+ bdk_zero_memory(bdk_thread_node, sizeof(bdk_thread_node));
+ _REENT_INIT_PTR(&__bdk_thread_global_reent);
+ return 0;
+}
+
+static bdk_thread_t *__bdk_thread_next(void)
+{
+ bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
+ uint64_t coremask = bdk_core_to_mask();
+
+ bdk_atomic_add64_nosync(&t_node->stat_next_calls, 1);
+ bdk_thread_t *prev = NULL;
+ bdk_thread_t *next = t_node->head;
+ int walks = 0;
+ while (next && !(next->coremask & coremask))
+ {
+ prev = next;
+ next = next->next;
+ walks++;
+ }
+ if (walks)
+ bdk_atomic_add64_nosync(&t_node->stat_next_walks, walks);
+
+ if (next)
+ {
+ if (t_node->tail == next)
+ t_node->tail = prev;
+ if (prev)
+ prev->next = next->next;
+ else
+ t_node->head = next->next;
+ next->next = NULL;
+ }
+ else
+ bdk_atomic_add64_nosync(&t_node->stat_no_schedulable_threads, 1);
+
+ return next;
+}
+
+/**
+ * Yield the current thread and run a new one
+ */
+void bdk_thread_yield(void)
+{
+ if (BDK_DBG_MAGIC_ENABLE && (bdk_numa_local() == bdk_numa_master()))
+ bdk_dbg_check_magic();
+ bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
+ bdk_thread_t *current;
+ BDK_MRS_NV(TPIDR_EL3, current);
+
+ /* Yield can be called without a thread context during core init. The
+ cores call bdk_wait_usec(), which yields. In this case yielding
+ does nothing */
+ if (bdk_unlikely(!current))
+ return;
+
+ if (bdk_unlikely(current->stack_canary != STACK_CANARY))
+ bdk_fatal("bdk_thread_yield() detected a stack overflow\n");
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ bdk_sso_process_work();
+
+ if (t_node->head == NULL)
+ return;
+
+ bdk_spinlock_lock(&t_node->lock);
+
+ /* Find the first thread that can run on this core */
+ bdk_thread_t *next = __bdk_thread_next();
+
+ /* If next is NULL then there are no other threads ready to run and we
+ will continue without doing anything */
+ if (next)
+ {
+ __bdk_thread_switch(next, 0);
+ /* Unlock performed in __bdk_thread_switch_complete */
+ return;
+ }
+ bdk_spinlock_unlock(&t_node->lock);
+}
+
+
+/**
+ * Create a new thread and return it. The thread will not be scheduled
+ * as it isn't put in the thread list.
+ *
+ * @param coremask Mask of cores the thread can run on. Each set bit is an allowed
+ * core. Zero and -1 are both shortcuts for all cores.
+ * @param func Function to run as a thread
+ * @param arg0 First argument to the function
+ * @param arg1 Second argument to the function
+ * @param stack_size Stack size for the new thread. Set to zero for the system default.
+ *
+ * @return Thread or NULL on failure
+ */
+static void *__bdk_thread_create(uint64_t coremask, bdk_thread_func_t func, int arg0, void *arg1, int stack_size)
+{
+ bdk_thread_t *thread;
+ if (!stack_size)
+ stack_size = BDK_THREAD_DEFAULT_STACK_SIZE;
+
+ thread = memalign(16, sizeof(bdk_thread_t) + stack_size);
+ if (thread == NULL)
+ {
+ bdk_error("Unable to allocate memory for new thread\n");
+ return NULL;
+ }
+ memset(thread, 0, sizeof(bdk_thread_t) + stack_size);
+ if (coremask == 0)
+ coremask = -1;
+ thread->coremask = coremask;
+ thread->gpr[0] = (uint64_t)func; /* x0 = Argument 0 to __bdk_thread_body */
+ thread->gpr[1] = arg0; /* x1 = Argument 1 to __bdk_thread_body */
+ thread->gpr[2] = (uint64_t)arg1; /* x2 = Argument 2 to __bdk_thread_body */
+ thread->gpr[29] = 0; /* x29 = Frame pointer */
+ thread->gpr[30] = (uint64_t)__bdk_thread_body; /* x30 = Link register */
+ thread->gpr[31] = (uint64_t)thread->stack + stack_size; /* x31 = Stack pointer */
+ if (thread->gpr[31] & 0xf)
+ bdk_fatal("Stack not aligned 0x%lx\n", thread->gpr[31]);
+ _REENT_INIT_PTR(&thread->lib_state);
+ extern void __sinit(struct _reent *);
+ __sinit(&thread->lib_state);
+ thread->stack_canary = STACK_CANARY;
+ thread->next = NULL;
+ return thread;
+}
+
+
+/**
+ * Create a new thread. The thread may be scheduled to any of the
+ * cores supplied in the coremask. Note that a single thread is
+ * created and may only run on one core at a time. The thread may
+ * not start executing until the next yield call if all cores in
+ * the coremask are currently busy.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a
+ * special value.
+ * @param coremask Mask of cores the thread can run on. Each set bit is an allowed
+ * core. Zero and -1 are both shortcuts for all cores.
+ * @param func Function to run as a thread
+ * @param arg0 First argument to the function
+ * @param arg1 Second argument to the function
+ * @param stack_size Stack size for the new thread. Set to zero for the system default.
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_thread_create(bdk_node_t node, uint64_t coremask, bdk_thread_func_t func, int arg0, void *arg1, int stack_size)
+{
+ bdk_thread_node_t *t_node = &bdk_thread_node[node];
+ bdk_thread_t *thread = __bdk_thread_create(coremask, func, arg0, arg1, stack_size);
+ if (thread == NULL)
+ return -1;
+
+ bdk_atomic_add64_nosync(&t_node->stat_num_threads, 1);
+ bdk_spinlock_lock(&t_node->lock);
+ if (t_node->tail)
+ t_node->tail->next = thread;
+ else
+ t_node->head = thread;
+ t_node->tail = thread;
+ bdk_spinlock_unlock(&t_node->lock);
+ BDK_SEV;
+ return 0;
+}
+
+
+/**
+ * Destroy the currently running thread. This never returns.
+ */
+void bdk_thread_destroy(void)
+{
+ bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
+ bdk_thread_t *current;
+ BDK_MRS_NV(TPIDR_EL3, current);
+ if (bdk_unlikely(!current))
+ bdk_fatal("bdk_thread_destroy() called without thread context\n");
+ if (bdk_unlikely(current->stack_canary != STACK_CANARY))
+ bdk_fatal("bdk_thread_destroy() detected a stack overflow\n");
+
+ fflush(NULL);
+ bdk_atomic_add64_nosync(&t_node->stat_num_threads, -1);
+
+ while (1)
+ {
+ if (BDK_DBG_MAGIC_ENABLE && (bdk_numa_local() == bdk_numa_master()))
+ bdk_dbg_check_magic();
+ if (t_node->head)
+ {
+ bdk_spinlock_lock(&t_node->lock);
+ /* Find the first thread that can run on this core */
+ bdk_thread_t *next = __bdk_thread_next();
+
+ /* If next is NULL then there are no other threads ready to run and we
+ will continue without doing anything */
+ if (next)
+ {
+ __bdk_thread_switch(next, 1);
+ bdk_fatal("bdk_thread_destroy() should never get here\n");
+ }
+ bdk_spinlock_unlock(&t_node->lock);
+ }
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ bdk_sso_process_work();
+ BDK_WFE;
+ }
+}
+
+struct _reent __bdk_thread_global_reent;
+struct _reent *__bdk_thread_getreent(void)
+{
+ bdk_thread_t *current;
+ BDK_MRS_NV(TPIDR_EL3, current);
+ if (current)
+ return &current->lib_state;
+ else
+ return &__bdk_thread_global_reent;
+}
+
+void __bdk_thread_switch_complete(bdk_thread_t* old_context, int delete_old)
+{
+ bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
+ if (bdk_unlikely(delete_old))
+ {
+ bdk_spinlock_unlock(&t_node->lock);
+ free(old_context);
+ }
+ else
+ {
+ if (bdk_likely(old_context))
+ {
+ if (t_node->tail)
+ t_node->tail->next = old_context;
+ else
+ t_node->head = old_context;
+ t_node->tail = old_context;
+ }
+ bdk_spinlock_unlock(&t_node->lock);
+ if (bdk_likely(old_context))
+ BDK_SEV;
+ }
+}
+
+
+/**
+ * Called to create the initial thread for a CPU. Must be called
+ * once for each CPU.
+ *
+ * @param func Function to run as new thread. It is guaranteed that this will
+ * be the next thread run by the core.
+ * @param arg0 First thread argument
+ * @param arg1 Second thread argument
+ * @param stack_size Initial stack size, or zero for the default
+ */
+void bdk_thread_first(bdk_thread_func_t func, int arg0, void *arg1, int stack_size)
+{
+ bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
+ void *thread = __bdk_thread_create(bdk_core_to_mask(), func, arg0, arg1, stack_size);
+ if (thread)
+ {
+ bdk_atomic_add64_nosync(&t_node->stat_num_threads, 1);
+ bdk_spinlock_lock(&t_node->lock);
+ __bdk_thread_switch(thread, 0);
+ }
+ bdk_fatal("Create of __bdk_init_main thread failed\n");
+}
+
+/**
+ * Display statistics about the number of threads and scheduling
+ */
+void bdk_thread_show_stats()
+{
+ for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
+ {
+ if (!bdk_numa_exists(node))
+ continue;
+ bdk_thread_node_t *t_node = &bdk_thread_node[node];
+ printf("Node %d\n", node);
+ printf(" Active threads: %ld\n", t_node->stat_num_threads);
+ printf(" Schedule checks: %ld\n", t_node->stat_next_calls);
+ int64_t div = t_node->stat_next_calls;
+ if (!div)
+ div = 1;
+ printf(" Average walk depth: %ld\n",
+ t_node->stat_next_walks / div);
+ printf(" Not switching: %ld (%ld%%)\n",
+ t_node->stat_no_schedulable_threads,
+ t_node->stat_no_schedulable_threads * 100 / div);
+ bdk_atomic_set64(&t_node->stat_next_calls, 0);
+ bdk_atomic_set64(&t_node->stat_next_walks, 0);
+ bdk_atomic_set64(&t_node->stat_no_schedulable_threads, 0);
+ }
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c b/src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c
new file mode 100644
index 0000000000..27c3294479
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c
@@ -0,0 +1,286 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-fusf.h"
+#include "libbdk-arch/bdk-csrs-rom.h"
+
+/* The define BDK_TRUST_HARD_BLOW_NV controls whether the BDK will
+ hard blow the secure NV counter on boot. This is needed for a
+ production system, but can be dangerous in a development
+ environment. The default value of 0 is to prevent bricking of
+ chips due to CSIB[NVCOUNT] mistakes. BDK_TRUST_HARD_BLOW_NV must
+ be changed to a 1 for production. The code below will display a
+ warning if BDK_TRUST_HARD_BLOW_NV=0 in a trusted boot to remind
+ you */
+#define BDK_TRUST_HARD_BLOW_NV 0
+
+/* The CSIB used to boot will be stored here by bsk-start.S */
+union bdk_rom_csib_s __bdk_trust_csib __attribute__((section("init")));
+static bdk_trust_level_t __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
+
+/**
+ * Update the fused secure NV counter to reflect the CSIB[NVCOUNT] value. In
+ * production systems, be sure to set BDK_TRUST_HARD_BLOW_NV=1.
+ */
+static void __bdk_program_nv_counter(void)
+{
+ int hw_nv = bdk_trust_get_nv_counter();
+ int csib_nv = __bdk_trust_csib.s.nvcnt;
+
+ if (!BDK_TRUST_HARD_BLOW_NV)
+ {
+ printf("\33[1m"); /* Bold */
+ bdk_warn("\n");
+ bdk_warn("********************************************************\n");
+ bdk_warn("* Configured for soft blow of secure NV counter. This\n");
+ bdk_warn("* build is not suitable for production trusted boot.\n");
+ bdk_warn("********************************************************\n");
+ bdk_warn("\n");
+ printf("\33[0m"); /* Normal */
+ }
+
+ /* Check if the CSIB NV counter is less than the HW fused values.
+ This means the image is an old rollback. Refuse to run */
+ if (csib_nv < hw_nv)
+ bdk_fatal("CSIB[NVCOUNT] is less than FUSF_CTL[ROM_T_CNT]. Image rollback not allowed\n");
+ /* If the CSIB NV counter matches the HW fuses, everything is
+ good */
+ if (csib_nv == hw_nv)
+ return;
+ /* CSIB NV counter is larger than the HW fuses. We must blow
+ fuses to move the hardware counter forward, protecting from
+ image rollback */
+ if (BDK_TRUST_HARD_BLOW_NV)
+ {
+ BDK_TRACE(INIT, "Trust: Hard blow secure NV counter to %d\n", csib_nv);
+ uint64_t v = 1ull << BDK_FUSF_FUSE_NUM_E_ROM_T_CNTX(csib_nv - 1);
+ bdk_fuse_field_hard_blow(bdk_numa_master(), BDK_FUSF_FUSE_NUM_E_FUSF_LCK, v, 0);
+ }
+ else
+ {
+ BDK_TRACE(INIT, "Trust: Soft blow secure NV counter to %d\n", csib_nv);
+ bdk_fuse_field_soft_blow(bdk_numa_master(), BDK_FUSF_FUSE_NUM_E_ROM_T_CNTX(csib_nv - 1));
+ }
+}
+
+/**
+ * Called by boot stub (TBL1FW) to initialize the state of trust
+ */
+void __bdk_trust_init(void)
+{
+ extern uint64_t __bdk_init_reg_pc; /* The contents of PC when this image started */
+ const bdk_node_t node = bdk_numa_local();
+ volatile uint64_t *huk = bdk_phys_to_ptr(bdk_numa_get_address(node, BDK_FUSF_HUKX(0)));
+
+ /* Non-trusted boot address */
+ if (__bdk_init_reg_pc == 0x120000)
+ {
+ __bdk_trust_level = BDK_TRUST_LEVEL_NONE;
+ if (huk[0] | huk[1])
+ {
+ BDK_TRACE(INIT, "Trust: Initial image, Non-trusted boot with HUK\n");
+ goto fail_trust;
+ }
+ else
+ {
+ BDK_TRACE(INIT, "Trust: Initial image, Non-trusted boot without HUK\n");
+ goto skip_trust;
+ }
+ }
+
+ if (__bdk_init_reg_pc != 0x150000)
+ {
+ /* Not the first image */
+ BDK_CSR_INIT(rst_boot, node, BDK_RST_BOOT);
+ if (!rst_boot.s.trusted_mode)
+ {
+ __bdk_trust_level = BDK_TRUST_LEVEL_NONE;
+ BDK_TRACE(INIT, "Trust: Secondary image, non-trusted boot\n");
+ goto skip_trust;
+ }
+ int csibsize = 0;
+ const union bdk_rom_csib_s *csib = bdk_config_get_blob(&csibsize, BDK_CONFIG_TRUST_CSIB);
+ if (!csib)
+ {
+ __bdk_trust_level = BDK_TRUST_LEVEL_NONE;
+ BDK_TRACE(INIT, "Trust: Secondary image, non-trusted boot\n");
+ goto skip_trust;
+ }
+ if (csibsize != sizeof(__bdk_trust_csib))
+ {
+ BDK_TRACE(INIT, "Trust: Secondary image, Trusted boot with corrupt CSIB, trust broken\n");
+ goto fail_trust;
+ }
+ /* Record our trust level */
+ switch (csib->s.crypt)
+ {
+ case 0:
+ __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED;
+ BDK_TRACE(INIT, "Trust: Secondary image, Trused boot, no encryption\n");
+ goto success_trust;
+ case 1:
+ __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED_SSK;
+ BDK_TRACE(INIT, "Trust: Secondary image, Trused boot, SSK encryption\n");
+ goto success_trust;
+ case 2:
+ __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED_BSSK;
+ BDK_TRACE(INIT, "Trust: Secondary image, Trused boot, BSSK encryption\n");
+ goto success_trust;
+ default:
+ __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
+ BDK_TRACE(INIT, "Trust: Secondary image, Trusted boot, Corrupt CSIB[crypt], trust broken\n");
+ goto fail_trust;
+ }
+ }
+
+ /* Copy the Root of Trust public key out of the CSIB */
+ volatile uint64_t *rot_pub_key = bdk_key_alloc(node, 64);
+ if (!rot_pub_key)
+ {
+ __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
+ BDK_TRACE(INIT, "Trust: Failed to allocate ROT memory, trust broken\n");
+ goto fail_trust;
+ }
+ rot_pub_key[0] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk0);
+ rot_pub_key[1] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk1);
+ rot_pub_key[2] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk2);
+ rot_pub_key[3] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk3);
+ rot_pub_key[4] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk4);
+ rot_pub_key[5] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk5);
+ rot_pub_key[6] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk6);
+ rot_pub_key[7] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk7);
+ bdk_config_set_int(bdk_ptr_to_phys((void*)rot_pub_key), BDK_CONFIG_TRUST_ROT_ADDR);
+ BDK_TRACE(INIT, "Trust: ROT %016lx %016lx %016lx %016lx %016lx %016lx %016lx %016lx\n",
+ bdk_cpu_to_be64(rot_pub_key[0]), bdk_cpu_to_be64(rot_pub_key[1]),
+ bdk_cpu_to_be64(rot_pub_key[2]), bdk_cpu_to_be64(rot_pub_key[3]),
+ bdk_cpu_to_be64(rot_pub_key[4]), bdk_cpu_to_be64(rot_pub_key[5]),
+ bdk_cpu_to_be64(rot_pub_key[6]), bdk_cpu_to_be64(rot_pub_key[7]));
+
+ /* Update the secure NV counter with the value in the CSIB */
+ __bdk_program_nv_counter();
+
+ /* Create the BSSK */
+ if (huk[0] | huk[1])
+ {
+ uint64_t iv[2] = {0, 0};
+ volatile uint64_t *bssk = bdk_key_alloc(node, 16);
+ if (!bssk)
+ {
+ __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
+ BDK_TRACE(INIT, "Trust: Failed to allocate BSSK memory, trust broken\n");
+ goto fail_trust;
+ }
+ BDK_TRACE(INIT, "Trust: Calculating BSSK\n");
+ uint64_t tmp_bssk[2];
+ tmp_bssk[0] = __bdk_trust_csib.s.fs0;
+ tmp_bssk[1] = __bdk_trust_csib.s.fs1;
+ bdk_aes128cbc_decrypt((void*)huk, (void*)tmp_bssk, 16, iv);
+ bssk[0] = tmp_bssk[0];
+ bssk[1] = tmp_bssk[1];
+ tmp_bssk[0] = 0;
+ tmp_bssk[1] = 0;
+ bdk_config_set_int(bdk_ptr_to_phys((void*)bssk), BDK_CONFIG_TRUST_BSSK_ADDR);
+ //BDK_TRACE(INIT, "Trust: BSSK %016lx %016lx\n", bdk_cpu_to_be64(bssk[0]), bdk_cpu_to_be64(bssk[1]));
+ }
+
+ /* Record our trust level */
+ switch (__bdk_trust_csib.s.crypt)
+ {
+ case 0:
+ __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED;
+ BDK_TRACE(INIT, "Trust: Trused boot, no encryption\n");
+ break;
+ case 1:
+ __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED_SSK;
+ BDK_TRACE(INIT, "Trust: Trused boot, SSK encryption\n");
+ break;
+ case 2:
+ __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED_BSSK;
+ BDK_TRACE(INIT, "Trust: Trused boot, BSSK encryption\n");
+ break;
+ default:
+ __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
+ goto fail_trust;
+ }
+
+ /* We started at the trusted boot address, CSIB should be
+ valid */
+ bdk_config_set_blob(sizeof(__bdk_trust_csib), &__bdk_trust_csib, BDK_CONFIG_TRUST_CSIB);
+success_trust:
+ bdk_signed_load_public();
+ return;
+
+fail_trust:
+ /* Hide secrets */
+ BDK_CSR_MODIFY(c, node, BDK_RST_BOOT,
+ c.s.dis_huk = 1);
+ BDK_TRACE(INIT, "Trust: Secrets Hidden\n");
+skip_trust:
+ /* Erase CSIB as it is invalid */
+ memset(&__bdk_trust_csib, 0, sizeof(__bdk_trust_csib));
+ bdk_config_set_blob(0, NULL, BDK_CONFIG_TRUST_CSIB);
+}
+
+/**
+ * Returns the current level of trust. Must be called after
+ * __bdk_trust_init()
+ *
+ * @return Enumerated trsut level, see bdk_trust_level_t
+ */
+bdk_trust_level_t bdk_trust_get_level(void)
+{
+ return __bdk_trust_level;
+}
+
+/**
+ * Return the current secure NV counter stored in the fuses
+ *
+ * @return NV counter (0-31)
+ */
+int bdk_trust_get_nv_counter(void)
+{
+ /* Count leading zeros in FUSF_CTL[ROM_T_CNT] to dermine the
+ hardware NV value */
+ BDK_CSR_INIT(fusf_ctl, bdk_numa_master(), BDK_FUSF_CTL);
+ int hw_nv = 0;
+ if (fusf_ctl.s.rom_t_cnt)
+ hw_nv = 32 - __builtin_clz(fusf_ctl.s.rom_t_cnt);
+ return hw_nv;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-csr.h b/src/vendorcode/cavium/bdk/libdram/dram-csr.h
new file mode 100644
index 0000000000..ffe1472a0b
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-csr.h
@@ -0,0 +1,86 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * Functions and macros for libdram access to CSR. These build
+ * on the normal BDK functions to allow logging of CSRs based on
+ * the libdram verbosity level. Internal use only.
+ */
+
+/**
+ * Write a CSR, possibly logging it based on the verbosity
+ * level. You should use DRAM_CSR_WRITE() as a convientent
+ * wrapper.
+ *
+ * @param node
+ * @param csr_name
+ * @param type
+ * @param busnum
+ * @param size
+ * @param address
+ * @param value
+ */
+#ifdef DRAM_CSR_WRITE_INLINE
+static inline void dram_csr_write(bdk_node_t node, const char *csr_name, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value) __attribute__((always_inline));
+static inline void dram_csr_write(bdk_node_t node, const char *csr_name, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value)
+{
+ VB_PRT(VBL_CSRS, "N%d: DDR Config %s[%016lx] => %016lx\n", node, csr_name, address, value);
+ bdk_csr_write(node, type, busnum, size, address, value);
+}
+#else
+extern void dram_csr_write(bdk_node_t node, const char *csr_name, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value);
+#endif
+
+/**
+ * Macro to write a CSR, logging if necessary
+ */
+#define DRAM_CSR_WRITE(node, csr, value) \
+ dram_csr_write(node, basename_##csr, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), csr, value)
+
+/**
+ * Macro to make a read, modify, and write sequence easy. The "code_block"
+ * should be replaced with a C code block or a comma separated list of
+ * "name.s.field = value", without the quotes.
+ */
+#define DRAM_CSR_MODIFY(name, node, csr, code_block) do { \
+ typedef_##csr name = {.u = bdk_csr_read(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), csr)}; \
+ code_block; \
+ dram_csr_write(node, basename_##csr, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), csr, name.u); \
+ } while (0)
+
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-env.c b/src/vendorcode/cavium/bdk/libdram/dram-env.c
new file mode 100644
index 0000000000..f25e6bdb26
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-env.c
@@ -0,0 +1,83 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "dram-internal.h"
+
+const char* lookup_env_parameter(const char *format, ...)
+{
+ const char *s;
+ unsigned long value;
+ va_list args;
+ char buffer[64];
+
+ va_start(args, format);
+ vsnprintf(buffer, sizeof(buffer)-1, format, args);
+ buffer[sizeof(buffer)-1] = '\0';
+ va_end(args);
+
+ if ((s = getenv(buffer)) != NULL)
+ {
+ value = strtoul(s, NULL, 0);
+ error_print("Parameter found in environment: %s = \"%s\" 0x%lx (%ld)\n",
+ buffer, s, value, value);
+ }
+ return s;
+}
+
+const char* lookup_env_parameter_ull(const char *format, ...)
+{
+ const char *s;
+ unsigned long long value;
+ va_list args;
+ char buffer[64];
+
+ va_start(args, format);
+ vsnprintf(buffer, sizeof(buffer)-1, format, args);
+ buffer[sizeof(buffer)-1] = '\0';
+ va_end(args);
+
+ if ((s = getenv(buffer)) != NULL)
+ {
+ value = strtoull(s, NULL, 0);
+ error_print("Parameter found in environment: %s = 0x%016llx\n",
+ buffer, value);
+ }
+ return s;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-env.h b/src/vendorcode/cavium/bdk/libdram/dram-env.h
new file mode 100644
index 0000000000..0f100e1b25
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-env.h
@@ -0,0 +1,48 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * Functions for access the environment for DRAM tweaking.
+ * Intenral use only.
+ */
+
+
+extern const char *lookup_env_parameter(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
+extern const char *lookup_env_parameter_ull(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
+
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-gpio.h b/src/vendorcode/cavium/bdk/libdram/dram-gpio.h
new file mode 100644
index 0000000000..62c9a5c190
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-gpio.h
@@ -0,0 +1,46 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * Functions for reporting DRAM init status through GPIOs.
+ * Useful for triggering scopes and such. Internal use only.
+ */
+
+extern void pulse_gpio_pin(bdk_node_t node, int pin, int usecs);
+
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c b/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c
new file mode 100644
index 0000000000..edb42312f1
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c
@@ -0,0 +1,8535 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-l2c_tad.h"
+#include "libbdk-arch/bdk-csrs-mio_fus.h"
+#include "dram-internal.h"
+
+#define WODT_MASK_2R_1S 1 // FIXME: did not seem to make much difference with #152 1-slot?
+
+#define DESKEW_RODT_CTL 1
+
+// Set to 1 to use the feature whenever possible automatically.
+// When 0, however, the feature is still available, and it can
+// be enabled via envvar override "ddr_enable_write_deskew=1".
+#define ENABLE_WRITE_DESKEW_DEFAULT 0
+
+#define ENABLE_COMPUTED_VREF_ADJUSTMENT 1
+
+#define RLEXTRAS_PATCH 1 // write to unused RL rank entries
+#define WLEXTRAS_PATCH 1 // write to unused WL rank entries
+#define ADD_48_OHM_SKIP 1
+#define NOSKIP_40_48_OHM 1
+#define NOSKIP_48_STACKED 1
+#define NOSKIP_FOR_MINI 1
+#define NOSKIP_FOR_2S_1R 1
+#define MAJORITY_OVER_AVG 1
+#define RANK_MAJORITY MAJORITY_OVER_AVG && 1
+#define SW_WL_CHECK_PATCH 1 // check validity after SW adjust
+#define HW_WL_MAJORITY 1
+#define SWL_TRY_HWL_ALT HW_WL_MAJORITY && 1 // try HW WL base alternate if available when SW WL fails
+#define DISABLE_SW_WL_PASS_2 1
+
+#define HWL_BY_BYTE 0 // FIXME? set to 1 to do HWL a byte at a time (seemed to work better earlier?)
+
+#define USE_ORIG_TEST_DRAM_BYTE 1
+
+// collect and print LMC utilization using SWL software algorithm
+#define ENABLE_SW_WLEVEL_UTILIZATION 0
+
+#define COUNT_RL_CANDIDATES 1
+
+#define LOOK_FOR_STUCK_BYTE 0
+#define ENABLE_STUCK_BYTE_RESET 0
+
+#define FAILSAFE_CHECK 1
+
+#define PERFECT_BITMASK_COUNTING 1
+
+#define DAC_OVERRIDE_EARLY 1
+
+#define SWL_WITH_HW_ALTS_CHOOSE_SW 0 // FIXME: allow override?
+
+#define DEBUG_VALIDATE_BITMASK 0
+#if DEBUG_VALIDATE_BITMASK
+#define debug_bitmask_print ddr_print
+#else
+#define debug_bitmask_print(...)
+#endif
+
+#define ENABLE_SLOT_CTL_ACCESS 0
+#undef ENABLE_CUSTOM_RLEVEL_TABLE
+
+#define ENABLE_DISPLAY_MPR_PAGE 0
+#if ENABLE_DISPLAY_MPR_PAGE
+static void Display_MPR_Page_Location(bdk_node_t node, int rank,
+ int ddr_interface_num, int dimm_count,
+ int page, int location, uint64_t *mpr_data);
+#endif
+
+#define USE_L2_WAYS_LIMIT 1
+
+/* Read out Deskew Settings for DDR */
+
+typedef struct {
+ uint16_t bits[8];
+} deskew_bytes_t;
+typedef struct {
+ deskew_bytes_t bytes[9];
+} deskew_data_t;
+
+static void
+Get_Deskew_Settings(bdk_node_t node, int ddr_interface_num, deskew_data_t *dskdat)
+{
+ bdk_lmcx_phy_ctl_t phy_ctl;
+ bdk_lmcx_config_t lmc_config;
+ int bit_num, bit_index;
+ int byte_lane, byte_limit;
+ // NOTE: these are for pass 2.x
+ int is_t88p2 = !CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X); // added 81xx and 83xx
+ int bit_end = (is_t88p2) ? 9 : 8;
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ byte_limit = ((lmc_config.s.mode32b) ? 4 : 8) + lmc_config.s.ecc_ena;
+
+ memset(dskdat, 0, sizeof(*dskdat));
+
+ BDK_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ phy_ctl.s.dsk_dbg_clk_scaler = 3);
+
+ for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) {
+ bit_index = 0;
+ for (bit_num = 0; bit_num <= bit_end; ++bit_num) { // NOTE: this is for pass 2.x
+
+ if (bit_num == 4) continue;
+ if ((bit_num == 5) && is_t88p2) continue; // NOTE: this is for pass 2.x
+
+ // set byte lane and bit to read
+ BDK_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ (phy_ctl.s.dsk_dbg_bit_sel = bit_num,
+ phy_ctl.s.dsk_dbg_byte_sel = byte_lane));
+
+ // start read sequence
+ BDK_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ phy_ctl.s.dsk_dbg_rd_start = 1);
+
+ // poll for read sequence to complete
+ do {
+ phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
+ } while (phy_ctl.s.dsk_dbg_rd_complete != 1);
+
+ // record the data
+ dskdat->bytes[byte_lane].bits[bit_index] = phy_ctl.s.dsk_dbg_rd_data & 0x3ff;
+ bit_index++;
+
+ } /* for (bit_num = 0; bit_num <= bit_end; ++bit_num) */
+ } /* for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) */
+
+ return;
+}
+
+static void
+Display_Deskew_Data(bdk_node_t node, int ddr_interface_num,
+ deskew_data_t *dskdat, int print_enable)
+{
+ int byte_lane;
+ int bit_num;
+ uint16_t flags, deskew;
+ bdk_lmcx_config_t lmc_config;
+ int byte_limit;
+ const char *fc = " ?-=+*#&";
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ byte_limit = ((lmc_config.s.mode32b) ? 4 : 8) + lmc_config.s.ecc_ena;
+
+ if (print_enable) {
+ VB_PRT(print_enable, "N%d.LMC%d: Deskew Data: Bit => :",
+ node, ddr_interface_num);
+ for (bit_num = 7; bit_num >= 0; --bit_num)
+ VB_PRT(print_enable, " %3d ", bit_num);
+ VB_PRT(print_enable, "\n");
+ }
+
+ for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) {
+ if (print_enable)
+ VB_PRT(print_enable, "N%d.LMC%d: Bit Deskew Byte %d %s :",
+ node, ddr_interface_num, byte_lane,
+ (print_enable >= VBL_TME) ? "FINAL" : " ");
+
+ for (bit_num = 7; bit_num >= 0; --bit_num) {
+
+ flags = dskdat->bytes[byte_lane].bits[bit_num] & 7;
+ deskew = dskdat->bytes[byte_lane].bits[bit_num] >> 3;
+
+ if (print_enable)
+ VB_PRT(print_enable, " %3d %c", deskew, fc[flags^1]);
+
+ } /* for (bit_num = 7; bit_num >= 0; --bit_num) */
+
+ if (print_enable)
+ VB_PRT(print_enable, "\n");
+
+ } /* for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) */
+
+ return;
+}
+
+static int
+change_wr_deskew_ena(bdk_node_t node, int ddr_interface_num, int new_state)
+{
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+ int saved_wr_deskew_ena;
+
+ // return original WR_DESKEW_ENA setting
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ saved_wr_deskew_ena = !!GET_DDR_DLL_CTL3(wr_deskew_ena);
+ if (saved_wr_deskew_ena != !!new_state) { // write it only when changing it
+ SET_DDR_DLL_CTL3(wr_deskew_ena, !!new_state);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ }
+ return saved_wr_deskew_ena;
+}
+
+typedef struct {
+ int saturated; // number saturated
+ int unlocked; // number unlocked
+ int nibrng_errs; // nibble range errors
+ int nibunl_errs; // nibble unlocked errors
+ //int nibsat_errs; // nibble saturation errors
+ int bitval_errs; // bit value errors
+#if LOOK_FOR_STUCK_BYTE
+ int bytes_stuck; // byte(s) stuck
+#endif
+} deskew_counts_t;
+
+#define MIN_BITVAL 17
+#define MAX_BITVAL 110
+
+static deskew_counts_t deskew_training_results;
+static int deskew_validation_delay = 10000; // FIXME: make this a var for overriding
+
+static void
+Validate_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_num,
+ deskew_counts_t *counts, int print_enable)
+{
+ int byte_lane, bit_num, nib_num;
+ int nibrng_errs, nibunl_errs, bitval_errs;
+ //int nibsat_errs;
+ bdk_lmcx_config_t lmc_config;
+ int16_t nib_min[2], nib_max[2], nib_unl[2]/*, nib_sat[2]*/;
+ // NOTE: these are for pass 2.x
+ int is_t88p2 = !CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X); // added 81xx and 83xx
+ int bit_start = (is_t88p2) ? 9 : 8;
+ int byte_limit;
+#if LOOK_FOR_STUCK_BYTE
+ uint64_t bl_mask[2]; // enough for 128 values
+ int bit_values;
+#endif
+ deskew_data_t dskdat;
+ int bit_index;
+ int16_t flags, deskew;
+ const char *fc = " ?-=+*#&";
+ int saved_wr_deskew_ena;
+ int bit_last;
+
+ // save original WR_DESKEW_ENA setting, and disable it for read deskew
+ saved_wr_deskew_ena = change_wr_deskew_ena(node, ddr_interface_num, 0);
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ byte_limit = ((!lmc_config.s.mode32b) ? 8 : 4) + lmc_config.s.ecc_ena;
+
+ memset(counts, 0, sizeof(deskew_counts_t));
+
+ Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+
+ if (print_enable) {
+ VB_PRT(print_enable, "N%d.LMC%d: Deskew Settings: Bit => :",
+ node, ddr_interface_num);
+ for (bit_num = 7; bit_num >= 0; --bit_num)
+ VB_PRT(print_enable, " %3d ", bit_num);
+ VB_PRT(print_enable, "\n");
+ }
+
+ for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) {
+ if (print_enable)
+ VB_PRT(print_enable, "N%d.LMC%d: Bit Deskew Byte %d %s :",
+ node, ddr_interface_num, byte_lane,
+ (print_enable >= VBL_TME) ? "FINAL" : " ");
+
+ nib_min[0] = 127; nib_min[1] = 127;
+ nib_max[0] = 0; nib_max[1] = 0;
+ nib_unl[0] = 0; nib_unl[1] = 0;
+ //nib_sat[0] = 0; nib_sat[1] = 0;
+
+#if LOOK_FOR_STUCK_BYTE
+ bl_mask[0] = bl_mask[1] = 0;
+#endif
+
+ if ((lmc_config.s.mode32b == 1) && (byte_lane == 4)) {
+ bit_index = 3;
+ bit_last = 3;
+ if (print_enable)
+ VB_PRT(print_enable, " ");
+ } else {
+ bit_index = 7;
+ bit_last = bit_start;
+ }
+
+ for (bit_num = bit_last; bit_num >= 0; --bit_num) { // NOTE: this is for pass 2.x
+ if (bit_num == 4) continue;
+ if ((bit_num == 5) && is_t88p2) continue; // NOTE: this is for pass 2.x
+
+ nib_num = (bit_num > 4) ? 1 : 0;
+
+ flags = dskdat.bytes[byte_lane].bits[bit_index] & 7;
+ deskew = dskdat.bytes[byte_lane].bits[bit_index] >> 3;
+ bit_index--;
+
+ counts->saturated += !!(flags & 6);
+ counts->unlocked += !(flags & 1);
+
+ nib_unl[nib_num] += !(flags & 1);
+ //nib_sat[nib_num] += !!(flags & 6);
+
+ if (flags & 1) { // FIXME? only do range when locked
+ nib_min[nib_num] = min(nib_min[nib_num], deskew);
+ nib_max[nib_num] = max(nib_max[nib_num], deskew);
+ }
+
+#if LOOK_FOR_STUCK_BYTE
+ bl_mask[(deskew >> 6) & 1] |= 1UL << (deskew & 0x3f);
+#endif
+
+ if (print_enable)
+ VB_PRT(print_enable, " %3d %c", deskew, fc[flags^1]);
+
+ } /* for (bit_num = bit_last; bit_num >= 0; --bit_num) */
+
+ /*
+ Now look for nibble errors:
+
+ For bit 55, it looks like a bit deskew problem. When the upper nibble of byte 6
+ needs to go to saturation, bit 7 of byte 6 locks prematurely at 64.
+ For DIMMs with raw card A and B, can we reset the deskew training when we encounter this case?
+ The reset criteria should be looking at one nibble at a time for raw card A and B;
+ if the bit-deskew setting within a nibble is different by > 33, we'll issue a reset
+ to the bit deskew training.
+
+ LMC0 Bit Deskew Byte(6): 64 0 - 0 - 0 - 26 61 35 64
+ */
+ // upper nibble range, then lower nibble range
+ nibrng_errs = ((nib_max[1] - nib_min[1]) > 33) ? 1 : 0;
+ nibrng_errs |= ((nib_max[0] - nib_min[0]) > 33) ? 1 : 0;
+
+ // check for nibble all unlocked
+ nibunl_errs = ((nib_unl[0] == 4) || (nib_unl[1] == 4)) ? 1 : 0;
+
+ // check for nibble all saturated
+ //nibsat_errs = ((nib_sat[0] == 4) || (nib_sat[1] == 4)) ? 1 : 0;
+
+ // check for bit value errors, ie < 17 or > 110
+ // FIXME? assume max always > MIN_BITVAL and min < MAX_BITVAL
+ bitval_errs = ((nib_max[1] > MAX_BITVAL) || (nib_max[0] > MAX_BITVAL)) ? 1 : 0;
+ bitval_errs |= ((nib_min[1] < MIN_BITVAL) || (nib_min[0] < MIN_BITVAL)) ? 1 : 0;
+
+ if (((nibrng_errs != 0) || (nibunl_errs != 0) /*|| (nibsat_errs != 0)*/ || (bitval_errs != 0))
+ && print_enable)
+ {
+ VB_PRT(print_enable, " %c%c%c%c",
+ (nibrng_errs)?'R':' ',
+ (nibunl_errs)?'U':' ',
+ (bitval_errs)?'V':' ',
+ /*(nibsat_errs)?'S':*/' ');
+ }
+
+#if LOOK_FOR_STUCK_BYTE
+ bit_values = __builtin_popcountl(bl_mask[0]) + __builtin_popcountl(bl_mask[1]);
+ if (bit_values < 3) {
+ counts->bytes_stuck |= (1 << byte_lane);
+ if (print_enable)
+ VB_PRT(print_enable, "X");
+ }
+#endif
+ if (print_enable)
+ VB_PRT(print_enable, "\n");
+
+ counts->nibrng_errs |= (nibrng_errs << byte_lane);
+ counts->nibunl_errs |= (nibunl_errs << byte_lane);
+ //counts->nibsat_errs |= (nibsat_errs << byte_lane);
+ counts->bitval_errs |= (bitval_errs << byte_lane);
+
+#if LOOK_FOR_STUCK_BYTE
+ // just for completeness, allow print of the stuck values bitmask after the bytelane print
+ if ((bit_values < 3) && print_enable) {
+ VB_PRT(VBL_DEV, "N%d.LMC%d: Deskew byte %d STUCK on value 0x%016lx.%016lx\n",
+ node, ddr_interface_num, byte_lane,
+ bl_mask[1], bl_mask[0]);
+ }
+#endif
+
+ } /* for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) */
+
+ // restore original WR_DESKEW_ENA setting
+ change_wr_deskew_ena(node, ddr_interface_num, saved_wr_deskew_ena);
+
+ return;
+}
+
+unsigned short load_dac_override(int node, int ddr_interface_num,
+ int dac_value, int byte)
+{
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+ int bytex = (byte == 0x0A) ? byte : byte + 1; // single bytelanes incr by 1; A is for ALL
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+
+ SET_DDR_DLL_CTL3(byte_sel, bytex);
+ SET_DDR_DLL_CTL3(offset, dac_value >> 1); // only 7-bit field, use MS bits
+
+ ddr_dll_ctl3.s.bit_select = 0x9; /* No-op */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ ddr_dll_ctl3.s.bit_select = 0xC; /* Vref bypass setting load */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ ddr_dll_ctl3.s.bit_select = 0xD; /* Vref bypass on. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ ddr_dll_ctl3.s.bit_select = 0x9; /* No-op */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ return ((unsigned short) GET_DDR_DLL_CTL3(offset));
+}
+
+// arg dac_or_dbi is 1 for DAC, 0 for DBI
+// returns 9 entries (bytelanes 0 through 8) in settings[]
+// returns 0 if OK, -1 if a problem
+int read_DAC_DBI_settings(int node, int ddr_interface_num,
+ int dac_or_dbi, int *settings)
+{
+ bdk_lmcx_phy_ctl_t phy_ctl;
+ int byte_lane, bit_num;
+ int deskew;
+ int dac_value;
+ int is_t88p2 = !CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X); // added 81xx and 83xx
+
+ phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
+ phy_ctl.s.dsk_dbg_clk_scaler = 3;
+ DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), phy_ctl.u);
+
+ bit_num = (dac_or_dbi) ? 4 : 5;
+ if ((bit_num == 5) && !is_t88p2) { // NOTE: this is for pass 1.x
+ return -1;
+ }
+
+ for (byte_lane = 8; byte_lane >= 0 ; --byte_lane) { // FIXME: always assume ECC is available
+
+ //set byte lane and bit to read
+ phy_ctl.s.dsk_dbg_bit_sel = bit_num;
+ phy_ctl.s.dsk_dbg_byte_sel = byte_lane;
+ DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), phy_ctl.u);
+
+ //start read sequence
+ phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
+ phy_ctl.s.dsk_dbg_rd_start = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), phy_ctl.u);
+
+ //poll for read sequence to complete
+ do {
+ phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
+ } while (phy_ctl.s.dsk_dbg_rd_complete != 1);
+
+ deskew = phy_ctl.s.dsk_dbg_rd_data /*>> 3*/; // leave the flag bits for DBI
+ dac_value = phy_ctl.s.dsk_dbg_rd_data & 0xff;
+
+ settings[byte_lane] = (dac_or_dbi) ? dac_value : deskew;
+
+ } /* for (byte_lane = 8; byte_lane >= 0 ; --byte_lane) { */
+
+ return 0;
+}
+
+// print out the DBI settings array
+// arg dac_or_dbi is 1 for DAC, 0 for DBI
+void
+display_DAC_DBI_settings(int node, int lmc, int dac_or_dbi,
+ int ecc_ena, int *settings, char *title)
+{
+ int byte;
+ int flags;
+ int deskew;
+ const char *fc = " ?-=+*#&";
+
+ ddr_print("N%d.LMC%d: %s %s Deskew Settings %d:0 :",
+ node, lmc, title, (dac_or_dbi)?"DAC":"DBI", 7+ecc_ena);
+ for (byte = (7+ecc_ena); byte >= 0; --byte) { // FIXME: what about 32-bit mode?
+ if (dac_or_dbi) { // DAC
+ flags = 1; // say its locked to get blank
+ deskew = settings[byte] & 0xff;
+ } else { // DBI
+ flags = settings[byte] & 7;
+ deskew = (settings[byte] >> 3) & 0x7f;
+ }
+ ddr_print(" %3d %c", deskew, fc[flags^1]);
+ }
+ ddr_print("\n");
+}
+
+// Evaluate the DAC settings array
+static int
+evaluate_DAC_settings(int ddr_interface_64b, int ecc_ena, int *settings)
+{
+ int byte, dac;
+ int last = (ddr_interface_64b) ? 7 : 3;
+
+ // this looks only for DAC values that are EVEN
+ for (byte = (last+ecc_ena); byte >= 0; --byte) {
+ dac = settings[byte] & 0xff;
+ if ((dac & 1) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+static void
+Perform_Offset_Training(bdk_node_t node, int rank_mask, int ddr_interface_num)
+{
+ bdk_lmcx_phy_ctl_t lmc_phy_ctl;
+ uint64_t orig_phy_ctl;
+ const char *s;
+
+ /*
+ * 6.9.8 LMC Offset Training
+ *
+ * LMC requires input-receiver offset training.
+ *
+ * 1. Write LMC(0)_PHY_CTL[DAC_ON] = 1
+ */
+ lmc_phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
+ orig_phy_ctl = lmc_phy_ctl.u;
+ lmc_phy_ctl.s.dac_on = 1;
+
+ // allow full CSR override
+ if ((s = lookup_env_parameter_ull("ddr_phy_ctl")) != NULL) {
+ lmc_phy_ctl.u = strtoull(s, NULL, 0);
+ }
+
+ // do not print or write if CSR does not change...
+ if (lmc_phy_ctl.u != orig_phy_ctl) {
+ ddr_print("PHY_CTL : 0x%016lx\n", lmc_phy_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), lmc_phy_ctl.u);
+ }
+
+#if 0
+ // FIXME? do we really need to show RODT here?
+ bdk_lmcx_comp_ctl2_t lmc_comp_ctl2;
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ ddr_print("Read ODT_CTL : 0x%x (%d ohms)\n",
+ lmc_comp_ctl2.s.rodt_ctl, imp_values->rodt_ohms[lmc_comp_ctl2.s.rodt_ctl]);
+#endif
+
+ /*
+ * 2. Write LMC(0)_SEQ_CTL[SEQ_SEL] = 0x0B and
+ * LMC(0)_SEQ_CTL[INIT_START] = 1.
+ *
+ * 3. Wait for LMC(0)_SEQ_CTL[SEQ_COMPLETE] to be set to 1.
+ */
+ perform_octeon3_ddr3_sequence(node, rank_mask, ddr_interface_num, 0x0B); /* Offset training sequence */
+
+}
+
+static void
+Perform_Internal_VREF_Training(bdk_node_t node, int rank_mask, int ddr_interface_num)
+{
+ bdk_lmcx_ext_config_t ext_config;
+
+ /*
+ * 6.9.9 LMC Internal Vref Training
+ *
+ * LMC requires input-reference-voltage training.
+ *
+ * 1. Write LMC(0)_EXT_CONFIG[VREFINT_SEQ_DESKEW] = 0.
+ */
+ ext_config.u = BDK_CSR_READ(node, BDK_LMCX_EXT_CONFIG(ddr_interface_num));
+ ext_config.s.vrefint_seq_deskew = 0;
+
+ VB_PRT(VBL_SEQ, "N%d.LMC%d: Performing LMC sequence: vrefint_seq_deskew = %d\n",
+ node, ddr_interface_num, ext_config.s.vrefint_seq_deskew);
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_EXT_CONFIG(ddr_interface_num), ext_config.u);
+
+ /*
+ * 2. Write LMC(0)_SEQ_CTL[SEQ_SEL] = 0x0a and
+ * LMC(0)_SEQ_CTL[INIT_START] = 1.
+ *
+ * 3. Wait for LMC(0)_SEQ_CTL[SEQ_COMPLETE] to be set to 1.
+ */
+ perform_octeon3_ddr3_sequence(node, rank_mask, ddr_interface_num, 0x0A); /* LMC Internal Vref Training */
+}
+
+#define dbg_avg(format, ...) VB_PRT(VBL_DEV, format, ##__VA_ARGS__)
+static int
+process_samples_average(int16_t *bytes, int num_samples, int lmc, int lane_no)
+{
+ int i, savg, sadj, sum = 0, rng, ret, asum, trunc;
+ int16_t smin = 32767, smax = -32768;
+
+ dbg_avg("DBG_AVG%d.%d: ", lmc, lane_no);
+
+ for (i = 0; i < num_samples; i++) {
+ sum += bytes[i];
+ if (bytes[i] < smin) smin = bytes[i];
+ if (bytes[i] > smax) smax = bytes[i];
+ dbg_avg(" %3d", bytes[i]);
+ }
+ rng = smax - smin + 1;
+
+ dbg_avg(" (%3d, %3d, %2d)", smin, smax, rng);
+
+ asum = sum - smin - smax;
+
+ savg = divide_nint(sum * 10, num_samples);
+
+ sadj = divide_nint(asum * 10, (num_samples - 2));
+
+ trunc = asum / (num_samples - 2);
+
+ dbg_avg(" [%3d.%d, %3d.%d, %3d]", savg/10, savg%10, sadj/10, sadj%10, trunc);
+
+ sadj = divide_nint(sadj, 10);
+ if (trunc & 1)
+ ret = trunc;
+ else if (sadj & 1)
+ ret = sadj;
+ else
+ ret = trunc + 1;
+
+ dbg_avg(" -> %3d\n", ret);
+
+ return ret;
+}
+
+
+#define DEFAULT_SAT_RETRY_LIMIT 11 // 1 + 10 retries
+static int default_lock_retry_limit = 20; // 20 retries // FIXME: make a var for overriding
+
+static int
+Perform_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_num,
+ int spd_rawcard_AorB, int print_flags, int ddr_interface_64b)
+{
+ int unsaturated, locked;
+ //int nibble_sat;
+ int sat_retries, lock_retries, lock_retries_total, lock_retries_limit;
+ int print_first;
+ int print_them_all;
+ deskew_counts_t dsk_counts;
+ uint64_t saved_wr_deskew_ena;
+#if DESKEW_RODT_CTL
+ bdk_lmcx_comp_ctl2_t comp_ctl2;
+ int save_deskew_rodt_ctl = -1;
+#endif
+ int is_t88p2 = !CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X); // added 81xx and 83xx
+
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Performing Read Deskew Training.\n", node, ddr_interface_num);
+
+ // save original WR_DESKEW_ENA setting, and disable it for read deskew
+ saved_wr_deskew_ena = change_wr_deskew_ena(node, ddr_interface_num, 0);
+
+ sat_retries = 0;
+ lock_retries_total = 0;
+ unsaturated = 0;
+ print_first = VBL_FAE; // print the first one, FAE and above
+ print_them_all = dram_is_verbose(VBL_DEV4); // set to true for printing all normal deskew attempts
+
+ int loops, normal_loops = 1; // default to 1 NORMAL deskew training op...
+ const char *s;
+ if ((s = getenv("ddr_deskew_normal_loops")) != NULL) {
+ normal_loops = strtoul(s, NULL, 0);
+ }
+
+#if LOOK_FOR_STUCK_BYTE
+ // provide override for STUCK BYTE RESETS
+ int do_stuck_reset = ENABLE_STUCK_BYTE_RESET;
+ if ((s = getenv("ddr_enable_stuck_byte_reset")) != NULL) {
+ do_stuck_reset = !!strtoul(s, NULL, 0);
+ }
+#endif
+
+#if DESKEW_RODT_CTL
+ if ((s = getenv("ddr_deskew_rodt_ctl")) != NULL) {
+ int deskew_rodt_ctl = strtoul(s, NULL, 0);
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ save_deskew_rodt_ctl = comp_ctl2.s.rodt_ctl;
+ comp_ctl2.s.rodt_ctl = deskew_rodt_ctl;
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), comp_ctl2.u);
+ }
+#endif
+
+ lock_retries_limit = default_lock_retry_limit;
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) // added 81xx and 83xx
+ lock_retries_limit *= 2; // give pass 2.0 twice as many
+
+ do { /* while (sat_retries < sat_retry_limit) */
+
+ /*
+ * 6.9.10 LMC Deskew Training
+ *
+ * LMC requires input-read-data deskew training.
+ *
+ * 1. Write LMC(0)_EXT_CONFIG[VREFINT_SEQ_DESKEW] = 1.
+ */
+ VB_PRT(VBL_SEQ, "N%d.LMC%d: Performing LMC sequence: Set vrefint_seq_deskew = 1\n",
+ node, ddr_interface_num);
+ DRAM_CSR_MODIFY(ext_config, node, BDK_LMCX_EXT_CONFIG(ddr_interface_num),
+ ext_config.s.vrefint_seq_deskew = 1); /* Set Deskew sequence */
+
+ /*
+ * 2. Write LMC(0)_SEQ_CTL[SEQ_SEL] = 0x0A and
+ * LMC(0)_SEQ_CTL[INIT_START] = 1.
+ *
+ * 3. Wait for LMC(0)_SEQ_CTL[SEQ_COMPLETE] to be set to 1.
+ */
+ DRAM_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ phy_ctl.s.phy_dsk_reset = 1); /* RESET Deskew sequence */
+ perform_octeon3_ddr3_sequence(node, rank_mask, ddr_interface_num, 0x0A); /* LMC Deskew Training */
+
+ lock_retries = 0;
+
+ perform_read_deskew_training:
+ // maybe perform the NORMAL deskew training sequence multiple times before looking at lock status
+ for (loops = 0; loops < normal_loops; loops++) {
+ DRAM_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ phy_ctl.s.phy_dsk_reset = 0); /* Normal Deskew sequence */
+ perform_octeon3_ddr3_sequence(node, rank_mask, ddr_interface_num, 0x0A); /* LMC Deskew Training */
+ }
+ // Moved this from Validate_Read_Deskew_Training
+ /* Allow deskew results to stabilize before evaluating them. */
+ bdk_wait_usec(deskew_validation_delay);
+
+ // Now go look at lock and saturation status...
+ Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &dsk_counts, print_first);
+ if (print_first && !print_them_all) // after printing the first and not doing them all, no more
+ print_first = 0;
+
+ unsaturated = (dsk_counts.saturated == 0);
+ locked = (dsk_counts.unlocked == 0);
+ //nibble_sat = (dsk_counts.nibsat_errs != 0);
+
+ // only do locking retries if unsaturated or rawcard A or B, otherwise full SAT retry
+ if (unsaturated || (spd_rawcard_AorB && !is_t88p2 /*&& !nibble_sat*/)) {
+ if (!locked) { // and not locked
+ lock_retries++;
+ lock_retries_total++;
+ if (lock_retries <= lock_retries_limit) {
+ goto perform_read_deskew_training;
+ } else {
+ VB_PRT(VBL_TME, "N%d.LMC%d: LOCK RETRIES failed after %d retries\n",
+ node, ddr_interface_num, lock_retries_limit);
+ }
+ } else {
+ if (lock_retries_total > 0) // only print if we did try
+ VB_PRT(VBL_TME, "N%d.LMC%d: LOCK RETRIES successful after %d retries\n",
+ node, ddr_interface_num, lock_retries);
+ }
+ } /* if (unsaturated || spd_rawcard_AorB) */
+
+ ++sat_retries;
+
+#if LOOK_FOR_STUCK_BYTE
+ // FIXME: this is a bit of a hack at the moment...
+ // We want to force a Deskew RESET hopefully to unstick the bytes values
+ // and then resume normal deskew training as usual.
+ // For now, do only if it is all locked...
+ if (locked && (dsk_counts.bytes_stuck != 0)) {
+ BDK_CSR_INIT(lmc_config, node, BDK_LMCX_CONFIG(ddr_interface_num));
+ if (do_stuck_reset && lmc_config.s.mode_x4dev) { // FIXME: only when x4!!
+ unsaturated = 0; // to always make sure the while continues
+ VB_PRT(VBL_TME, "N%d.LMC%d: STUCK BYTE (0x%x), forcing deskew RESET\n",
+ node, ddr_interface_num, dsk_counts.bytes_stuck);
+ continue; // bypass the rest to get back to the RESET
+ } else {
+ VB_PRT(VBL_TME, "N%d.LMC%d: STUCK BYTE (0x%x), ignoring deskew RESET\n",
+ node, ddr_interface_num, dsk_counts.bytes_stuck);
+ }
+ }
+#endif
+ /*
+ * At this point, check for a DDR4 RDIMM that will not benefit from SAT retries; if so, no retries
+ */
+ if (spd_rawcard_AorB && !is_t88p2 /*&& !nibble_sat*/) {
+ VB_PRT(VBL_TME, "N%d.LMC%d: Read Deskew Training Loop: Exiting for RAWCARD == A or B.\n",
+ node, ddr_interface_num);
+ break; // no sat or lock retries
+ }
+
+ } while (!unsaturated && (sat_retries < DEFAULT_SAT_RETRY_LIMIT));
+
+#if DESKEW_RODT_CTL
+ if (save_deskew_rodt_ctl != -1) {
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ comp_ctl2.s.rodt_ctl = save_deskew_rodt_ctl;
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), comp_ctl2.u);
+ }
+#endif
+
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Read Deskew Training %s. %d sat-retries, %d lock-retries\n",
+ node, ddr_interface_num,
+ (sat_retries >= DEFAULT_SAT_RETRY_LIMIT) ? "Timed Out" : "Completed",
+ sat_retries-1, lock_retries_total);
+
+ // restore original WR_DESKEW_ENA setting
+ change_wr_deskew_ena(node, ddr_interface_num, saved_wr_deskew_ena);
+
+ if ((dsk_counts.nibrng_errs != 0) || (dsk_counts.nibunl_errs != 0)) {
+ debug_print("N%d.LMC%d: NIBBLE ERROR(S) found, returning FAULT\n",
+ node, ddr_interface_num);
+ return -1; // we did retry locally, they did not help
+ }
+
+ // NOTE: we (currently) always print one last training validation before starting Read Leveling...
+
+ return 0;
+}
+
+static void
+do_write_deskew_op(bdk_node_t node, int ddr_interface_num,
+ int bit_sel, int byte_sel, int ena)
+{
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ SET_DDR_DLL_CTL3(bit_select, bit_sel);
+ SET_DDR_DLL_CTL3(byte_sel, byte_sel);
+ SET_DDR_DLL_CTL3(wr_deskew_ena, ena);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+}
+
+static void
+set_write_deskew_offset(bdk_node_t node, int ddr_interface_num,
+ int bit_sel, int byte_sel, int offset)
+{
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ SET_DDR_DLL_CTL3(bit_select, bit_sel);
+ SET_DDR_DLL_CTL3(byte_sel, byte_sel);
+ SET_DDR_DLL_CTL3(offset, offset);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ SET_DDR_DLL_CTL3(wr_deskew_ld, 1);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+}
+
+static void
+Update_Write_Deskew_Settings(bdk_node_t node, int ddr_interface_num, deskew_data_t *dskdat)
+{
+ bdk_lmcx_config_t lmc_config;
+ int bit_num;
+ int byte_lane, byte_limit;
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ byte_limit = ((lmc_config.s.mode32b) ? 4 : 8) + lmc_config.s.ecc_ena;
+
+ for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) {
+ for (bit_num = 0; bit_num <= 7; ++bit_num) {
+
+ set_write_deskew_offset(node, ddr_interface_num, bit_num, byte_lane + 1,
+ dskdat->bytes[byte_lane].bits[bit_num]);
+
+ } /* for (bit_num = 0; bit_num <= 7; ++bit_num) */
+ } /* for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) */
+
+ return;
+}
+
+#define ALL_BYTES 0x0A
+#define BS_NOOP 0x09
+#define BS_RESET 0x0F
+#define BS_REUSE 0x0A
+
+// set all entries to the same value (used during training)
+static void
+Set_Write_Deskew_Settings(bdk_node_t node, int ddr_interface_num, int value)
+{
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+ int bit_num;
+
+ VB_PRT(VBL_DEV2, "N%d.LMC%d: SetWriteDeskew: WRITE %d\n", node, ddr_interface_num, value);
+
+ for (bit_num = 0; bit_num <= 7; ++bit_num) {
+
+ // write a bit-deskew value to all bit-lanes of all bytes
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ SET_DDR_DLL_CTL3(bit_select, bit_num);
+ SET_DDR_DLL_CTL3(byte_sel, ALL_BYTES); // FIXME? will this work in 32-bit mode?
+ SET_DDR_DLL_CTL3(offset, value);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ SET_DDR_DLL_CTL3(wr_deskew_ld, 1);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+
+ } /* for (bit_num = 0; bit_num <= 7; ++bit_num) */
+
+#if 0
+ // FIXME: for debug use only
+ Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+#endif
+
+ return;
+}
+
+typedef struct {
+ uint8_t count[8];
+ uint8_t start[8];
+ uint8_t best_count[8];
+ uint8_t best_start[8];
+} deskew_bytelane_t;
+typedef struct {
+ deskew_bytelane_t bytes[9];
+} deskew_rank_t;
+
+deskew_rank_t deskew_history[4];
+
+#define DSKVAL_INCR 4
+
+static void
+Neutral_Write_Deskew_Setup(bdk_node_t node, int ddr_interface_num)
+{
+ // first: NO-OP, Select all bytes, Disable write bit-deskew
+ ddr_print("N%d.LMC%d: NEUTRAL Write Deskew Setup: first: NOOP\n", node, ddr_interface_num);
+ do_write_deskew_op(node, ddr_interface_num, BS_NOOP, ALL_BYTES, 0);
+ //Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ //Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+
+ // enable write bit-deskew and RESET the settings
+ ddr_print("N%d.LMC%d: NEUTRAL Write Deskew Setup: wr_ena: RESET\n", node, ddr_interface_num);
+ do_write_deskew_op(node, ddr_interface_num, BS_RESET, ALL_BYTES, 1);
+ //Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ //Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+}
+
+static void
+Perform_Write_Deskew_Training(bdk_node_t node, int ddr_interface_num)
+{
+ deskew_data_t dskdat;
+ int byte, bit_num;
+ int dskval, rankx, rank_mask, active_ranks, errors, bit_errs;
+ uint64_t hw_rank_offset;
+ uint64_t bad_bits[2];
+ uint64_t phys_addr;
+ deskew_rank_t *dhp;
+ int num_lmcs = __bdk_dram_get_num_lmc(node);
+
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(ddr_interface_num));
+ rank_mask = lmcx_config.s.init_status; // FIXME: is this right when we run?
+
+ // this should be correct for 1 or 2 ranks, 1 or 2 DIMMs
+ hw_rank_offset = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena + (num_lmcs/2));
+
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Performing Write Deskew Training.\n", node, ddr_interface_num);
+
+ // first: NO-OP, Select all bytes, Disable write bit-deskew
+ ddr_print("N%d.LMC%d: WriteDeskewConfig: first: NOOP\n", node, ddr_interface_num);
+ do_write_deskew_op(node, ddr_interface_num, BS_NOOP, ALL_BYTES, 0);
+ //Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ //Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+
+ // enable write bit-deskew and RESET the settings
+ ddr_print("N%d.LMC%d: WriteDeskewConfig: wr_ena: RESET\n", node, ddr_interface_num);
+ do_write_deskew_op(node, ddr_interface_num, BS_RESET, ALL_BYTES, 1);
+ //Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ //Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+
+#if 0
+ // enable write bit-deskew and REUSE read bit-deskew settings
+ ddr_print("N%d.LMC%d: WriteDeskewConfig: wr_ena: REUSE\n", node, ddr_interface_num);
+ do_write_deskew_op(node, ddr_interface_num, BS_REUSE, ALL_BYTES, 1);
+ Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+#endif
+
+#if 1
+ memset(deskew_history, 0, sizeof(deskew_history));
+
+ for (dskval = 0; dskval < 128; dskval += DSKVAL_INCR) {
+
+ Set_Write_Deskew_Settings(node, ddr_interface_num, dskval);
+
+ active_ranks = 0;
+ for (rankx = 0; rankx < 4; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+ dhp = &deskew_history[rankx];
+ phys_addr = hw_rank_offset * active_ranks;
+ active_ranks++;
+
+ errors = test_dram_byte_hw(node, ddr_interface_num, phys_addr, 0, bad_bits);
+
+ for (byte = 0; byte <= 8; byte++) { // do bytelane(s)
+
+ // check errors
+ if (errors & (1 << byte)) { // yes, error(s) in the byte lane in this rank
+ bit_errs = ((byte == 8) ? bad_bits[1] : bad_bits[0] >> (8 * byte)) & 0xFFULL;
+
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: Byte %d Value %d: Address 0x%012lx errors 0x%x/0x%x\n",
+ node, ddr_interface_num, rankx, byte,
+ dskval, phys_addr, errors, bit_errs);
+
+ for (bit_num = 0; bit_num <= 7; bit_num++) {
+ if (!(bit_errs & (1 << bit_num)))
+ continue;
+ if (dhp->bytes[byte].count[bit_num] > 0) { // had started run
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: Byte %d Bit %d Value %d: stopping a run here\n",
+ node, ddr_interface_num, rankx, byte, bit_num, dskval);
+ dhp->bytes[byte].count[bit_num] = 0; // stop now
+ }
+ } /* for (bit_num = 0; bit_num <= 7; bit_num++) */
+
+ // FIXME: else had not started run - nothing else to do?
+ } else { // no error in the byte lane
+ for (bit_num = 0; bit_num <= 7; bit_num++) {
+ if (dhp->bytes[byte].count[bit_num] == 0) { // first success, set run start
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: Byte %d Bit %d Value %d: starting a run here\n",
+ node, ddr_interface_num, rankx, byte, bit_num, dskval);
+ dhp->bytes[byte].start[bit_num] = dskval;
+ }
+ dhp->bytes[byte].count[bit_num] += DSKVAL_INCR; // bump run length
+
+ // is this now the biggest window?
+ if (dhp->bytes[byte].count[bit_num] > dhp->bytes[byte].best_count[bit_num]) {
+ dhp->bytes[byte].best_count[bit_num] = dhp->bytes[byte].count[bit_num];
+ dhp->bytes[byte].best_start[bit_num] = dhp->bytes[byte].start[bit_num];
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: Byte %d Bit %d Value %d: updating best to %d/%d\n",
+ node, ddr_interface_num, rankx, byte, bit_num, dskval,
+ dhp->bytes[byte].best_start[bit_num],
+ dhp->bytes[byte].best_count[bit_num]);
+ }
+ } /* for (bit_num = 0; bit_num <= 7; bit_num++) */
+ } /* error in the byte lane */
+ } /* for (byte = 0; byte <= 8; byte++) */
+ } /* for (rankx = 0; rankx < 4; rankx++) */
+ } /* for (dskval = 0; dskval < 128; dskval++) */
+
+
+ for (byte = 0; byte <= 8; byte++) { // do bytelane(s)
+
+ for (bit_num = 0; bit_num <= 7; bit_num++) { // do bits
+ int bit_beg, bit_end;
+
+ bit_beg = 0;
+ bit_end = 128;
+
+ for (rankx = 0; rankx < 4; rankx++) { // merge ranks
+ int rank_beg, rank_end, rank_count;
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ dhp = &deskew_history[rankx];
+ rank_beg = dhp->bytes[byte].best_start[bit_num];
+ rank_count = dhp->bytes[byte].best_count[bit_num];
+
+ if (!rank_count) {
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: Byte %d Bit %d: EMPTY\n",
+ node, ddr_interface_num, rankx, byte, bit_num);
+ continue;
+ }
+
+ bit_beg = max(bit_beg, rank_beg);
+ rank_end = rank_beg + rank_count - DSKVAL_INCR;
+ bit_end = min(bit_end, rank_end);
+
+ } /* for (rankx = 0; rankx < 4; rankx++) */
+
+ dskdat.bytes[byte].bits[bit_num] = (bit_end + bit_beg) / 2;
+
+ } /* for (bit_num = 0; bit_num <= 7; bit_num++) */
+ } /* for (byte = 0; byte <= 8; byte++) */
+
+#endif
+
+ // update the write bit-deskew settings with final settings
+ ddr_print("N%d.LMC%d: WriteDeskewConfig: wr_ena: UPDATE\n", node, ddr_interface_num);
+ Update_Write_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+
+ // last: NO-OP, Select all bytes, MUST leave write bit-deskew enabled
+ ddr_print("N%d.LMC%d: WriteDeskewConfig: last: wr_ena: NOOP\n", node, ddr_interface_num);
+ do_write_deskew_op(node, ddr_interface_num, BS_NOOP, ALL_BYTES, 1);
+ //Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ //Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+
+#if 0
+ // FIXME: disable/delete this when write bit-deskew works...
+ // final: NO-OP, Select all bytes, do NOT leave write bit-deskew enabled
+ ddr_print("N%d.LMC%d: WriteDeskewConfig: final: read: NOOP\n", node, ddr_interface_num);
+ do_write_deskew_op(node, ddr_interface_num, BS_NOOP, ALL_BYTES, 0);
+ Get_Deskew_Settings(node, ddr_interface_num, &dskdat);
+ Display_Deskew_Data(node, ddr_interface_num, &dskdat, VBL_NORM);
+#endif
+}
+
+#define SCALING_FACTOR (1000)
+#define Dprintf debug_print // make this "ddr_print" for extra debug output below
+static int compute_Vref_1slot_2rank(int rtt_wr, int rtt_park, int dqx_ctl, int rank_count)
+{
+ uint64_t Reff_s;
+ uint64_t Rser_s = 15;
+ uint64_t Vdd = 1200;
+ uint64_t Vref;
+ //uint64_t Vl;
+ uint64_t rtt_wr_s = (((rtt_wr == 0) || (rtt_wr == 99)) ? 1*1024*1024 : rtt_wr); // 99 == HiZ
+ uint64_t rtt_park_s = (((rtt_park == 0) || ((rank_count == 1) && (rtt_wr != 0))) ? 1*1024*1024 : rtt_park);
+ uint64_t dqx_ctl_s = (dqx_ctl == 0 ? 1*1024*1024 : dqx_ctl);
+ int Vref_value;
+ uint64_t Rangepc = 6000; // range1 base is 60%
+ uint64_t Vrefpc;
+ int Vref_range = 0;
+
+ Dprintf("rtt_wr = %d, rtt_park = %d, dqx_ctl = %d\n", rtt_wr, rtt_park, dqx_ctl);
+ Dprintf("rtt_wr_s = %d, rtt_park_s = %d, dqx_ctl_s = %d\n", rtt_wr_s, rtt_park_s, dqx_ctl_s);
+
+ Reff_s = divide_nint((rtt_wr_s * rtt_park_s) , (rtt_wr_s + rtt_park_s));
+ Dprintf("Reff_s = %d\n", Reff_s);
+
+ //Vl = (((Rser_s + dqx_ctl_s) * SCALING_FACTOR) / (Rser_s + dqx_ctl_s + Reff_s)) * Vdd / SCALING_FACTOR;
+ //printf("Vl = %d\n", Vl);
+
+ Vref = (((Rser_s + dqx_ctl_s) * SCALING_FACTOR) / (Rser_s + dqx_ctl_s + Reff_s)) + SCALING_FACTOR;
+ Dprintf("Vref = %d\n", Vref);
+
+ Vref = (Vref * Vdd) / 2 / SCALING_FACTOR;
+ Dprintf("Vref = %d\n", Vref);
+
+ Vrefpc = (Vref * 100 * 100) / Vdd;
+ Dprintf("Vrefpc = %d\n", Vrefpc);
+
+ if (Vrefpc < Rangepc) { // < range1 base, use range2
+ Vref_range = 1 << 6; // set bit A6 for range2
+ Rangepc = 4500; // range2 base is 45%
+ }
+
+ Vref_value = divide_nint(Vrefpc - Rangepc, 65);
+ if (Vref_value < 0)
+ Vref_value = Vref_range; // set to base of range as lowest value
+ else
+ Vref_value |= Vref_range;
+ Dprintf("Vref_value = %d (0x%02x)\n", Vref_value, Vref_value);
+
+ debug_print("rtt_wr:%d, rtt_park:%d, dqx_ctl:%d, Vref_value:%d (0x%x)\n",
+ rtt_wr, rtt_park, dqx_ctl, Vref_value, Vref_value);
+
+ return Vref_value;
+}
+static int compute_Vref_2slot_2rank(int rtt_wr, int rtt_park_00, int rtt_park_01, int dqx_ctl, int rtt_nom)
+{
+ //uint64_t Rser = 15;
+ uint64_t Vdd = 1200;
+ //uint64_t Vref;
+ uint64_t Vl, Vlp, Vcm;
+ uint64_t Rd0, Rd1, Rpullup;
+ uint64_t rtt_wr_s = (((rtt_wr == 0) || (rtt_wr == 99)) ? 1*1024*1024 : rtt_wr); // 99 == HiZ
+ uint64_t rtt_park_00_s = (rtt_park_00 == 0 ? 1*1024*1024 : rtt_park_00);
+ uint64_t rtt_park_01_s = (rtt_park_01 == 0 ? 1*1024*1024 : rtt_park_01);
+ uint64_t dqx_ctl_s = (dqx_ctl == 0 ? 1*1024*1024 : dqx_ctl);
+ uint64_t rtt_nom_s = (rtt_nom == 0 ? 1*1024*1024 : rtt_nom);
+ int Vref_value;
+ uint64_t Rangepc = 6000; // range1 base is 60%
+ uint64_t Vrefpc;
+ int Vref_range = 0;
+
+ // Rd0 = (RTT_NOM /*parallel*/ RTT_WR) + 15 = ((RTT_NOM * RTT_WR) / (RTT_NOM + RTT_WR)) + 15
+ Rd0 = divide_nint((rtt_nom_s * rtt_wr_s), (rtt_nom_s + rtt_wr_s)) + 15;
+ //printf("Rd0 = %ld\n", Rd0);
+
+ // Rd1 = (RTT_PARK_00 /*parallel*/ RTT_PARK_01) + 15 = ((RTT_PARK_00 * RTT_PARK_01) / (RTT_PARK_00 + RTT_PARK_01)) + 15
+ Rd1 = divide_nint((rtt_park_00_s * rtt_park_01_s), (rtt_park_00_s + rtt_park_01_s)) + 15;
+ //printf("Rd1 = %ld\n", Rd1);
+
+ // Rpullup = Rd0 /*parallel*/ Rd1 = (Rd0 * Rd1) / (Rd0 + Rd1)
+ Rpullup = divide_nint((Rd0 * Rd1), (Rd0 + Rd1));
+ //printf("Rpullup = %ld\n", Rpullup);
+
+ // Vl = (DQX_CTL / (DQX_CTL + Rpullup)) * 1.2
+ Vl = divide_nint((dqx_ctl_s * Vdd), (dqx_ctl_s + Rpullup));
+ //printf("Vl = %ld\n", Vl);
+
+ // Vlp = ((15 / Rd0) * (1.2 - Vl)) + Vl
+ Vlp = divide_nint((15 * (Vdd - Vl)), Rd0) + Vl;
+ //printf("Vlp = %ld\n", Vlp);
+
+ // Vcm = (Vlp + 1.2) / 2
+ Vcm = divide_nint((Vlp + Vdd), 2);
+ //printf("Vcm = %ld\n", Vcm);
+
+ // Vrefpc = (Vcm / 1.2) * 100
+ Vrefpc = divide_nint((Vcm * 100 * 100), Vdd);
+ //printf("Vrefpc = %ld\n", Vrefpc);
+
+ if (Vrefpc < Rangepc) { // < range1 base, use range2
+ Vref_range = 1 << 6; // set bit A6 for range2
+ Rangepc = 4500; // range2 base is 45%
+ }
+
+ Vref_value = divide_nint(Vrefpc - Rangepc, 65);
+ if (Vref_value < 0)
+ Vref_value = Vref_range; // set to base of range as lowest value
+ else
+ Vref_value |= Vref_range;
+ //printf("Vref_value = %d (0x%02x)\n", Vref_value, Vref_value);
+
+ debug_print("rtt_wr:%d, rtt_park_00:%d, rtt_park_01:%d, dqx_ctl:%d, rtt_nom:%d, Vref_value:%d (0x%x)\n",
+ rtt_wr, rtt_park_00, rtt_park_01, dqx_ctl, rtt_nom, Vref_value, Vref_value);
+
+ return Vref_value;
+}
+
+// NOTE: only call this for DIMMs with 1 or 2 ranks, not 4.
+int
+compute_vref_value(bdk_node_t node, int ddr_interface_num,
+ int rankx, int dimm_count, int rank_count,
+ impedence_values_t *imp_values, int is_stacked_die)
+{
+ int computed_final_vref_value = 0;
+
+ /* Calculate an override of the measured Vref value
+ but only for configurations we know how to...*/
+ // we have code for 2-rank DIMMs in both 1-slot or 2-slot configs,
+ // and can use the 2-rank 1-slot code for 1-rank DIMMs in 1-slot configs
+ // and can use the 2-rank 2-slot code for 1-rank DIMMs in 2-slot configs
+
+ int rtt_wr, dqx_ctl, rtt_nom, index;
+ bdk_lmcx_modereg_params1_t lmc_modereg_params1;
+ bdk_lmcx_modereg_params2_t lmc_modereg_params2;
+ bdk_lmcx_comp_ctl2_t comp_ctl2;
+
+ lmc_modereg_params1.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num));
+ lmc_modereg_params2.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num));
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ dqx_ctl = imp_values->dqx_strength[comp_ctl2.s.dqx_ctl];
+
+ // WR always comes from the current rank
+ index = (lmc_modereg_params1.u >> (rankx * 12 + 5)) & 0x03;
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) {
+ index |= lmc_modereg_params1.u >> (51+rankx-2) & 0x04;
+ }
+ rtt_wr = imp_values->rtt_wr_ohms [index];
+
+ // separate calculations for 1 vs 2 DIMMs per LMC
+ if (dimm_count == 1) {
+ // PARK comes from this rank if 1-rank, otherwise other rank
+ index = (lmc_modereg_params2.u >> ((rankx ^ (rank_count - 1)) * 10 + 0)) & 0x07;
+ int rtt_park = imp_values->rtt_nom_ohms[index];
+ computed_final_vref_value = compute_Vref_1slot_2rank(rtt_wr, rtt_park, dqx_ctl, rank_count);
+ } else {
+ // get both PARK values from the other DIMM
+ index = (lmc_modereg_params2.u >> ((rankx ^ 0x02) * 10 + 0)) & 0x07;
+ int rtt_park_00 = imp_values->rtt_nom_ohms[index];
+ index = (lmc_modereg_params2.u >> ((rankx ^ 0x03) * 10 + 0)) & 0x07;
+ int rtt_park_01 = imp_values->rtt_nom_ohms[index];
+ // NOM comes from this rank if 1-rank, otherwise other rank
+ index = (lmc_modereg_params1.u >> ((rankx ^ (rank_count - 1)) * 12 + 9)) & 0x07;
+ rtt_nom = imp_values->rtt_nom_ohms[index];
+ computed_final_vref_value = compute_Vref_2slot_2rank(rtt_wr, rtt_park_00, rtt_park_01, dqx_ctl, rtt_nom);
+ }
+
+#if ENABLE_COMPUTED_VREF_ADJUSTMENT
+ {
+ int saved_final_vref_value = computed_final_vref_value;
+ BDK_CSR_INIT(lmc_config, node, BDK_LMCX_CONFIG(ddr_interface_num));
+ /*
+ New computed Vref = existing computed Vref – X
+
+ The value of X is depending on different conditions. Both #122 and #139 are 2Rx4 RDIMM,
+ while #124 is stacked die 2Rx4, so I conclude the results into two conditions:
+
+ 1. Stacked Die: 2Rx4
+ 1-slot: offset = 7. i, e New computed Vref = existing computed Vref – 7
+ 2-slot: offset = 6
+
+ 2. Regular: 2Rx4
+ 1-slot: offset = 3
+ 2-slot: offset = 2
+ */
+ // we know we never get called unless DDR4, so test just the other conditions
+ if((!!__bdk_dram_is_rdimm(node, 0)) &&
+ (rank_count == 2) &&
+ (lmc_config.s.mode_x4dev))
+ { // it must first be RDIMM and 2-rank and x4
+ if (is_stacked_die) { // now do according to stacked die or not...
+ computed_final_vref_value -= (dimm_count == 1) ? 7 : 6;
+ } else {
+ computed_final_vref_value -= (dimm_count == 1) ? 3 : 2;
+ }
+ // we have adjusted it, so print it out if verbosity is right
+ VB_PRT(VBL_TME, "N%d.LMC%d.R%d: adjusting computed vref from %2d (0x%02x) to %2d (0x%02x)\n",
+ node, ddr_interface_num, rankx,
+ saved_final_vref_value, saved_final_vref_value,
+ computed_final_vref_value, computed_final_vref_value);
+ }
+ }
+#endif
+ return computed_final_vref_value;
+}
+
+static unsigned int EXTR_WR(uint64_t u, int x)
+{
+ return (unsigned int)(((u >> (x*12+5)) & 0x3UL) | ((u >> (51+x-2)) & 0x4UL));
+}
+static void INSRT_WR(uint64_t *up, int x, int v)
+{
+ uint64_t u = *up;
+ u &= ~(((0x3UL) << (x*12+5)) | ((0x1UL) << (51+x)));
+ *up = (u | ((v & 0x3UL) << (x*12+5)) | ((v & 0x4UL) << (51+x-2)));
+ return;
+}
+
+static int encode_row_lsb_ddr3(int row_lsb, int ddr_interface_wide)
+{
+ int encoded_row_lsb;
+ int row_lsb_start = 14;
+
+ /* Decoding for row_lsb */
+ /* 000: row_lsb = mem_adr[14] */
+ /* 001: row_lsb = mem_adr[15] */
+ /* 010: row_lsb = mem_adr[16] */
+ /* 011: row_lsb = mem_adr[17] */
+ /* 100: row_lsb = mem_adr[18] */
+ /* 101: row_lsb = mem_adr[19] */
+ /* 110: row_lsb = mem_adr[20] */
+ /* 111: RESERVED */
+
+ row_lsb_start = 14;
+
+ encoded_row_lsb = row_lsb - row_lsb_start ;
+
+ return encoded_row_lsb;
+}
+
+static int encode_pbank_lsb_ddr3(int pbank_lsb, int ddr_interface_wide)
+{
+ int encoded_pbank_lsb;
+
+ /* Decoding for pbank_lsb */
+ /* 0000:DIMM = mem_adr[28] / rank = mem_adr[27] (if RANK_ENA) */
+ /* 0001:DIMM = mem_adr[29] / rank = mem_adr[28] " */
+ /* 0010:DIMM = mem_adr[30] / rank = mem_adr[29] " */
+ /* 0011:DIMM = mem_adr[31] / rank = mem_adr[30] " */
+ /* 0100:DIMM = mem_adr[32] / rank = mem_adr[31] " */
+ /* 0101:DIMM = mem_adr[33] / rank = mem_adr[32] " */
+ /* 0110:DIMM = mem_adr[34] / rank = mem_adr[33] " */
+ /* 0111:DIMM = 0 / rank = mem_adr[34] " */
+ /* 1000-1111: RESERVED */
+
+ int pbank_lsb_start = 0;
+
+ pbank_lsb_start = 28;
+
+ encoded_pbank_lsb = pbank_lsb - pbank_lsb_start;
+
+ return encoded_pbank_lsb;
+}
+
+static uint64_t octeon_read_lmcx_ddr3_rlevel_dbg(bdk_node_t node, int ddr_interface_num, int idx)
+{
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num),
+ c.s.byte = idx);
+ BDK_CSR_READ(node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num));
+ BDK_CSR_INIT(rlevel_dbg, node, BDK_LMCX_RLEVEL_DBG(ddr_interface_num));
+ return rlevel_dbg.s.bitmask;
+}
+
+static uint64_t octeon_read_lmcx_ddr3_wlevel_dbg(bdk_node_t node, int ddr_interface_num, int idx)
+{
+ bdk_lmcx_wlevel_dbg_t wlevel_dbg;
+
+ wlevel_dbg.u = 0;
+ wlevel_dbg.s.byte = idx;
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_DBG(ddr_interface_num), wlevel_dbg.u);
+ BDK_CSR_READ(node, BDK_LMCX_WLEVEL_DBG(ddr_interface_num));
+
+ wlevel_dbg.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_DBG(ddr_interface_num));
+ return wlevel_dbg.s.bitmask;
+}
+
+
+/*
+ * Apply a filter to the BITMASK results returned from Octeon
+ * read-leveling to determine the most likely delay result. This
+ * computed delay may be used to qualify the delay result returned by
+ * Octeon. Accumulate an error penalty for invalid characteristics of
+ * the bitmask so that they can be used to select the most reliable
+ * results.
+ *
+ * The algorithm searches for the largest contiguous MASK within a
+ * maximum RANGE of bits beginning with the MSB.
+ *
+ * 1. a MASK with a WIDTH less than 4 will be penalized
+ * 2. Bubbles in the bitmask that occur before or after the MASK
+ * will be penalized
+ * 3. If there are no trailing bubbles then extra bits that occur
+ * beyond the maximum RANGE will be penalized.
+ *
+ * +++++++++++++++++++++++++++++++++++++++++++++++++++
+ * + +
+ * + e.g. bitmask = 27B00 +
+ * + +
+ * + 63 +--- mstart 0 +
+ * + | | | +
+ * + | +---------+ +--- fb | +
+ * + | | range | | | +
+ * + V V V V V +
+ * + +
+ * + 0 0 ... 1 0 0 1 1 1 1 0 1 1 0 0 0 0 0 0 0 0 +
+ * + +
+ * + ^ ^ ^ +
+ * + | | mask| +
+ * + lb ---+ +-----+ +
+ * + width +
+ * + +
+ * +++++++++++++++++++++++++++++++++++++++++++++++++++
+ */
+#define RLEVEL_BITMASK_TRAILING_BITS_ERROR 5
+#define RLEVEL_BITMASK_BUBBLE_BITS_ERROR 11 // FIXME? now less than TOOLONG
+#define RLEVEL_BITMASK_NARROW_ERROR 6
+#define RLEVEL_BITMASK_BLANK_ERROR 100
+#define RLEVEL_BITMASK_TOOLONG_ERROR 12
+
+#define MASKRANGE_BITS 6
+#define MASKRANGE ((1 << MASKRANGE_BITS) - 1)
+
+static int
+validate_ddr3_rlevel_bitmask(rlevel_bitmask_t *rlevel_bitmask_p, int ddr_type)
+{
+ int i;
+ int errors = 0;
+ uint64_t mask = 0; /* Used in 64-bit comparisons */
+ int8_t mstart = 0;
+ uint8_t width = 0;
+ uint8_t firstbit = 0;
+ uint8_t lastbit = 0;
+ uint8_t bubble = 0;
+ uint8_t tbubble = 0;
+ uint8_t blank = 0;
+ uint8_t narrow = 0;
+ uint8_t trailing = 0;
+ uint64_t bitmask = rlevel_bitmask_p->bm;
+ uint8_t extras = 0;
+ uint8_t toolong = 0;
+ uint64_t temp;
+
+ if (bitmask == 0) {
+ blank += RLEVEL_BITMASK_BLANK_ERROR;
+ } else {
+
+ /* Look for fb, the first bit */
+ temp = bitmask;
+ while (!(temp & 1)) {
+ firstbit++;
+ temp >>= 1;
+ }
+
+ /* Look for lb, the last bit */
+ lastbit = firstbit;
+ while ((temp >>= 1))
+ lastbit++;
+
+ /* Start with the max range to try to find the largest mask within the bitmask data */
+ width = MASKRANGE_BITS;
+ for (mask = MASKRANGE; mask > 0; mask >>= 1, --width) {
+ for (mstart = lastbit - width + 1; mstart >= firstbit; --mstart) {
+ temp = mask << mstart;
+ if ((bitmask & temp) == temp)
+ goto done_now;
+ }
+ }
+ done_now:
+ /* look for any more contiguous 1's to the right of mstart */
+ if (width == MASKRANGE_BITS) { // only when maximum mask
+ while ((bitmask >> (mstart - 1)) & 1) { // slide right over more 1's
+ --mstart;
+ if (ddr_type == DDR4_DRAM) // only for DDR4
+ extras++; // count the number of extra bits
+ }
+ }
+
+ /* Penalize any extra 1's beyond the maximum desired mask */
+ if (extras > 0)
+ toolong = RLEVEL_BITMASK_TOOLONG_ERROR * ((1 << extras) - 1);
+
+ /* Detect if bitmask is too narrow. */
+ if (width < 4)
+ narrow = (4 - width) * RLEVEL_BITMASK_NARROW_ERROR;
+
+ /* detect leading bubble bits, that is, any 0's between first and mstart */
+ temp = bitmask >> (firstbit + 1);
+ i = mstart - firstbit - 1;
+ while (--i >= 0) {
+ if ((temp & 1) == 0)
+ bubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
+ temp >>= 1;
+ }
+
+ temp = bitmask >> (mstart + width + extras);
+ i = lastbit - (mstart + width + extras - 1);
+ while (--i >= 0) {
+ if (temp & 1) { /* Detect 1 bits after the trailing end of the mask, including last. */
+ trailing += RLEVEL_BITMASK_TRAILING_BITS_ERROR;
+ } else { /* Detect trailing bubble bits, that is, any 0's between end-of-mask and last */
+ tbubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
+ }
+ temp >>= 1;
+ }
+ }
+
+ errors = bubble + tbubble + blank + narrow + trailing + toolong;
+
+ /* Pass out useful statistics */
+ rlevel_bitmask_p->mstart = mstart;
+ rlevel_bitmask_p->width = width;
+
+ VB_PRT(VBL_DEV2, "bm:%08lx mask:%02lx, width:%2u, mstart:%2d, fb:%2u, lb:%2u"
+ " (bu:%2d, tb:%2d, bl:%2d, n:%2d, t:%2d, x:%2d) errors:%3d %s\n",
+ (unsigned long) bitmask, mask, width, mstart,
+ firstbit, lastbit, bubble, tbubble, blank, narrow,
+ trailing, toolong, errors, (errors) ? "=> invalid" : "");
+
+ return errors;
+}
+
+static int compute_ddr3_rlevel_delay(uint8_t mstart, uint8_t width, bdk_lmcx_rlevel_ctl_t rlevel_ctl)
+{
+ int delay;
+
+ debug_bitmask_print(" offset_en:%d", rlevel_ctl.cn8.offset_en);
+
+ if (rlevel_ctl.s.offset_en) {
+ delay = max(mstart, mstart + width - 1 - rlevel_ctl.s.offset);
+ } else {
+ /* if (rlevel_ctl.s.offset) { */ /* Experimental */
+ if (0) {
+ delay = max(mstart + rlevel_ctl.s.offset, mstart + 1);
+ /* Insure that the offset delay falls within the bitmask */
+ delay = min(delay, mstart + width-1);
+ } else {
+ delay = (width - 1) / 2 + mstart; /* Round down */
+ /* delay = (width/2) + mstart; */ /* Round up */
+ }
+ }
+
+ return delay;
+}
+
+#define WLEVEL_BYTE_BITS 5
+#define WLEVEL_BYTE_MSK ((1UL << 5) - 1)
+
+static void update_wlevel_rank_struct(bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank,
+ int byte, int delay)
+{
+ bdk_lmcx_wlevel_rankx_t temp_wlevel_rank;
+ if (byte >= 0 && byte <= 8) {
+ temp_wlevel_rank.u = lmc_wlevel_rank->u;
+ temp_wlevel_rank.u &= ~(WLEVEL_BYTE_MSK << (WLEVEL_BYTE_BITS * byte));
+ temp_wlevel_rank.u |= ((delay & WLEVEL_BYTE_MSK) << (WLEVEL_BYTE_BITS * byte));
+ lmc_wlevel_rank->u = temp_wlevel_rank.u;
+ }
+}
+
+static int get_wlevel_rank_struct(bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank,
+ int byte)
+{
+ int delay = 0;
+ if (byte >= 0 && byte <= 8) {
+ delay = ((lmc_wlevel_rank->u) >> (WLEVEL_BYTE_BITS * byte)) & WLEVEL_BYTE_MSK;
+ }
+ return delay;
+}
+
+#if 0
+// entry = 1 is valid, entry = 0 is invalid
+static int
+validity_matrix[4][4] = {[0] {1,1,1,0}, // valid pairs when cv == 0: 0,0 + 0,1 + 0,2 == "7"
+ [1] {0,1,1,1}, // valid pairs when cv == 1: 1,1 + 1,2 + 1,3 == "E"
+ [2] {1,0,1,1}, // valid pairs when cv == 2: 2,2 + 2,3 + 2,0 == "D"
+ [3] {1,1,0,1}}; // valid pairs when cv == 3: 3,3 + 3,0 + 3,1 == "B"
+#endif
+static int
+validate_seq(int *wl, int *seq)
+{
+ int seqx; // sequence index, step through the sequence array
+ int bitnum;
+ seqx = 0;
+ while (seq[seqx+1] >= 0) { // stop on next seq entry == -1
+ // but now, check current versus next
+#if 0
+ if ( !validity_matrix [wl[seq[seqx]]] [wl[seq[seqx+1]]] )
+ return 1;
+#else
+ bitnum = (wl[seq[seqx]] << 2) | wl[seq[seqx+1]];
+ if (!((1 << bitnum) & 0xBDE7)) // magic validity number (see matrix above)
+ return 1;
+#endif
+ seqx++;
+ }
+ return 0;
+}
+
+static int
+Validate_HW_WL_Settings(bdk_node_t node, int ddr_interface_num,
+ bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank,
+ int ecc_ena)
+{
+ int wl[9], byte, errors;
+
+ // arrange the sequences so
+ int useq[] = { 0,1,2,3,8,4,5,6,7,-1 }; // index 0 has byte 0, etc, ECC in middle
+ int rseq1[] = { 8,3,2,1,0,-1 }; // index 0 is ECC, then go down
+ int rseq2[] = { 4,5,6,7,-1 }; // index 0 has byte 4, then go up
+ int useqno[] = { 0,1,2,3,4,5,6,7,-1 }; // index 0 has byte 0, etc, no ECC
+ int rseq1no[] = { 3,2,1,0,-1 }; // index 0 is byte 3, then go down, no ECC
+
+ // in the CSR, bytes 0-7 are always data, byte 8 is ECC
+ for (byte = 0; byte < 8+ecc_ena; byte++) {
+ wl[byte] = (get_wlevel_rank_struct(lmc_wlevel_rank, byte) >> 1) & 3; // preprocess :-)
+ }
+
+ errors = 0;
+ if (__bdk_dram_is_rdimm(node, 0) != 0) { // RDIMM order
+ errors = validate_seq(wl, (ecc_ena) ? rseq1 : rseq1no);
+ errors += validate_seq(wl, rseq2);
+ } else { // UDIMM order
+ errors = validate_seq(wl, (ecc_ena) ? useq : useqno);
+ }
+
+ return errors;
+}
+
+#define RLEVEL_BYTE_BITS 6
+#define RLEVEL_BYTE_MSK ((1UL << 6) - 1)
+
+static void update_rlevel_rank_struct(bdk_lmcx_rlevel_rankx_t *lmc_rlevel_rank,
+ int byte, int delay)
+{
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
+ if (byte >= 0 && byte <= 8) {
+ temp_rlevel_rank.u = lmc_rlevel_rank->u & ~(RLEVEL_BYTE_MSK << (RLEVEL_BYTE_BITS * byte));
+ temp_rlevel_rank.u |= ((delay & RLEVEL_BYTE_MSK) << (RLEVEL_BYTE_BITS * byte));
+ lmc_rlevel_rank->u = temp_rlevel_rank.u;
+ }
+}
+
+#if RLEXTRAS_PATCH || !DISABLE_SW_WL_PASS_2
+static int get_rlevel_rank_struct(bdk_lmcx_rlevel_rankx_t *lmc_rlevel_rank,
+ int byte)
+{
+ int delay = 0;
+ if (byte >= 0 && byte <= 8) {
+ delay = ((lmc_rlevel_rank->u) >> (RLEVEL_BYTE_BITS * byte)) & RLEVEL_BYTE_MSK;
+ }
+ return delay;
+}
+#endif
+
+static void unpack_rlevel_settings(int ddr_interface_bytemask, int ecc_ena,
+ rlevel_byte_data_t *rlevel_byte,
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank)
+{
+ if ((ddr_interface_bytemask & 0xff) == 0xff) {
+ if (ecc_ena) {
+ rlevel_byte[8].delay = lmc_rlevel_rank.cn83xx.byte7;
+ rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte6;
+ rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte5;
+ rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte4;
+ rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte8; /* ECC */
+ } else {
+ rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte7;
+ rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte6;
+ rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte5;
+ rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte4;
+ }
+ } else {
+ rlevel_byte[8].delay = lmc_rlevel_rank.cn83xx.byte8; /* unused */
+ rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte7; /* unused */
+ rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte6; /* unused */
+ rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte5; /* unused */
+ rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte4; /* ECC */
+ }
+ rlevel_byte[3].delay = lmc_rlevel_rank.cn83xx.byte3;
+ rlevel_byte[2].delay = lmc_rlevel_rank.cn83xx.byte2;
+ rlevel_byte[1].delay = lmc_rlevel_rank.cn83xx.byte1;
+ rlevel_byte[0].delay = lmc_rlevel_rank.cn83xx.byte0;
+}
+
+static void pack_rlevel_settings(int ddr_interface_bytemask, int ecc_ena,
+ rlevel_byte_data_t *rlevel_byte,
+ bdk_lmcx_rlevel_rankx_t *final_rlevel_rank)
+{
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank = *final_rlevel_rank;
+
+ if ((ddr_interface_bytemask & 0xff) == 0xff) {
+ if (ecc_ena) {
+ lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[8].delay;
+ lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[7].delay;
+ lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[6].delay;
+ lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[5].delay;
+ lmc_rlevel_rank.cn83xx.byte8 = rlevel_byte[4].delay; /* ECC */
+ } else {
+ lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[7].delay;
+ lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[6].delay;
+ lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[5].delay;
+ lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[4].delay;
+ }
+ } else {
+ lmc_rlevel_rank.cn83xx.byte8 = rlevel_byte[8].delay;
+ lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[7].delay;
+ lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[6].delay;
+ lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[5].delay;
+ lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[4].delay;
+ }
+ lmc_rlevel_rank.cn83xx.byte3 = rlevel_byte[3].delay;
+ lmc_rlevel_rank.cn83xx.byte2 = rlevel_byte[2].delay;
+ lmc_rlevel_rank.cn83xx.byte1 = rlevel_byte[1].delay;
+ lmc_rlevel_rank.cn83xx.byte0 = rlevel_byte[0].delay;
+
+ *final_rlevel_rank = lmc_rlevel_rank;
+}
+
+#if !DISABLE_SW_WL_PASS_2
+static void rlevel_to_wlevel(bdk_lmcx_rlevel_rankx_t *lmc_rlevel_rank,
+ bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank, int byte)
+{
+ int byte_delay = get_rlevel_rank_struct(lmc_rlevel_rank, byte);
+
+ debug_print("Estimating Wlevel delay byte %d: ", byte);
+ debug_print("Rlevel=%d => ", byte_delay);
+ byte_delay = divide_roundup(byte_delay,2) & 0x1e;
+ debug_print("Wlevel=%d\n", byte_delay);
+ update_wlevel_rank_struct(lmc_wlevel_rank, byte, byte_delay);
+}
+#endif /* !DISABLE_SW_WL_PASS_2 */
+
+/* Delay trend: constant=0, decreasing=-1, increasing=1 */
+static int calc_delay_trend(int v)
+{
+ if (v == 0)
+ return (0);
+ if (v < 0)
+ return (-1);
+ return 1;
+}
+
+/* Evaluate delay sequence across the whole range of byte delays while
+** keeping track of the overall delay trend, increasing or decreasing.
+** If the trend changes charge an error amount to the score.
+*/
+
+// NOTE: "max_adj_delay_inc" argument is, by default, 1 for DDR3 and 2 for DDR4
+
+static int nonsequential_delays(rlevel_byte_data_t *rlevel_byte,
+ int start, int end, int max_adj_delay_inc)
+{
+ int error = 0;
+ int delay_trend, prev_trend = 0;
+ int byte_idx;
+ int delay_inc;
+ int delay_diff;
+ int byte_err;
+
+ for (byte_idx = start; byte_idx < end; ++byte_idx) {
+ byte_err = 0;
+
+ delay_diff = rlevel_byte[byte_idx+1].delay - rlevel_byte[byte_idx].delay;
+ delay_trend = calc_delay_trend(delay_diff);
+
+ debug_bitmask_print("Byte %d: %2d, Byte %d: %2d, delay_trend: %2d, prev_trend: %2d",
+ byte_idx+0, rlevel_byte[byte_idx+0].delay,
+ byte_idx+1, rlevel_byte[byte_idx+1].delay,
+ delay_trend, prev_trend);
+
+ /* Increment error each time the trend changes to the opposite direction.
+ */
+ if ((prev_trend != 0) && (delay_trend != 0) && (prev_trend != delay_trend)) {
+ byte_err += RLEVEL_NONSEQUENTIAL_DELAY_ERROR;
+ prev_trend = delay_trend;
+ debug_bitmask_print(" => Nonsequential byte delay");
+ }
+
+ delay_inc = _abs(delay_diff); // how big was the delay change, if any
+
+ /* Even if the trend did not change to the opposite direction, check for
+ the magnitude of the change, and scale the penalty by the amount that
+ the size is larger than the provided limit.
+ */
+ if ((max_adj_delay_inc != 0) && (delay_inc > max_adj_delay_inc)) {
+ byte_err += (delay_inc - max_adj_delay_inc) * RLEVEL_ADJACENT_DELAY_ERROR;
+ debug_bitmask_print(" => Adjacent delay error");
+ }
+
+ debug_bitmask_print("\n");
+ if (delay_trend != 0)
+ prev_trend = delay_trend;
+
+ rlevel_byte[byte_idx+1].sqerrs = byte_err;
+ error += byte_err;
+ }
+ return error;
+}
+
+static int roundup_ddr3_wlevel_bitmask(int bitmask)
+{
+ int shifted_bitmask;
+ int leader;
+ int delay;
+
+ for (leader=0; leader<8; ++leader) {
+ shifted_bitmask = (bitmask>>leader);
+ if ((shifted_bitmask&1) == 0)
+ break;
+ }
+
+ for (/*leader=leader*/; leader<16; ++leader) {
+ shifted_bitmask = (bitmask>>(leader%8));
+ if (shifted_bitmask&1)
+ break;
+ }
+
+ delay = (leader & 1) ? leader + 1 : leader;
+ delay = delay % 8;
+
+ return delay;
+}
+
+/* Check to see if any custom offset values are provided */
+static int is_dll_offset_provided(const int8_t *dll_offset_table)
+{
+ int i;
+ if (dll_offset_table != NULL) {
+ for (i=0; i<9; ++i) {
+ if (dll_offset_table[i] != 0)
+ return (1);
+ }
+ }
+ return (0);
+}
+
+/////////////////// These are the RLEVEL settings display routines
+
+// flags
+#define WITH_NOTHING 0
+#define WITH_SCORE 1
+#define WITH_AVERAGE 2
+#define WITH_FINAL 4
+#define WITH_COMPUTE 8
+static void do_display_RL(bdk_node_t node, int ddr_interface_num,
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank,
+ int rank, int flags, int score)
+{
+ char score_buf[16];
+ if (flags & WITH_SCORE)
+ snprintf(score_buf, sizeof(score_buf), "(%d)", score);
+ else {
+ score_buf[0] = ' '; score_buf[1] = 0;
+ }
+
+ char *msg_buf;
+ char hex_buf[20];
+ if (flags & WITH_AVERAGE) {
+ msg_buf = " DELAY AVERAGES ";
+ } else if (flags & WITH_FINAL) {
+ msg_buf = " FINAL SETTINGS ";
+ } else if (flags & WITH_COMPUTE) {
+ msg_buf = " COMPUTED DELAYS ";
+ } else {
+ snprintf(hex_buf, sizeof(hex_buf), "0x%016lX", lmc_rlevel_rank.u);
+ msg_buf = hex_buf;
+ }
+
+ ddr_print("N%d.LMC%d.R%d: Rlevel Rank %#4x, %s : %5d %5d %5d %5d %5d %5d %5d %5d %5d %s\n",
+ node, ddr_interface_num, rank,
+ lmc_rlevel_rank.s.status,
+ msg_buf,
+ lmc_rlevel_rank.cn83xx.byte8,
+ lmc_rlevel_rank.cn83xx.byte7,
+ lmc_rlevel_rank.cn83xx.byte6,
+ lmc_rlevel_rank.cn83xx.byte5,
+ lmc_rlevel_rank.cn83xx.byte4,
+ lmc_rlevel_rank.cn83xx.byte3,
+ lmc_rlevel_rank.cn83xx.byte2,
+ lmc_rlevel_rank.cn83xx.byte1,
+ lmc_rlevel_rank.cn83xx.byte0,
+ score_buf
+ );
+}
+
+static inline void
+display_RL(bdk_node_t node, int ddr_interface_num, bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank, int rank)
+{
+ do_display_RL(node, ddr_interface_num, lmc_rlevel_rank, rank, 0, 0);
+}
+
+static inline void
+display_RL_with_score(bdk_node_t node, int ddr_interface_num, bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank, int rank, int score)
+{
+ do_display_RL(node, ddr_interface_num, lmc_rlevel_rank, rank, 1, score);
+}
+
+#if !PICK_BEST_RANK_SCORE_NOT_AVG
+static inline void
+display_RL_with_average(bdk_node_t node, int ddr_interface_num, bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank, int rank, int score)
+{
+ do_display_RL(node, ddr_interface_num, lmc_rlevel_rank, rank, 3, score);
+}
+#endif
+
+static inline void
+display_RL_with_final(bdk_node_t node, int ddr_interface_num, bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank, int rank)
+{
+ do_display_RL(node, ddr_interface_num, lmc_rlevel_rank, rank, 4, 0);
+}
+
+static inline void
+display_RL_with_computed(bdk_node_t node, int ddr_interface_num, bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank, int rank, int score)
+{
+ do_display_RL(node, ddr_interface_num, lmc_rlevel_rank, rank, 9, score);
+}
+
+// flag values
+#define WITH_RODT_BLANK 0
+#define WITH_RODT_SKIPPING 1
+#define WITH_RODT_BESTROW 2
+#define WITH_RODT_BESTSCORE 3
+// control
+#define SKIP_SKIPPING 1
+
+static const char *with_rodt_canned_msgs[4] = { " ", "SKIPPING ", "BEST ROW ", "BEST SCORE" };
+
+static void display_RL_with_RODT(bdk_node_t node, int ddr_interface_num,
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank, int rank, int score,
+ int nom_ohms, int rodt_ohms, int flag)
+{
+ const char *msg_buf;
+ char set_buf[20];
+#if SKIP_SKIPPING
+ if (flag == WITH_RODT_SKIPPING) return;
+#endif
+ msg_buf = with_rodt_canned_msgs[flag];
+ if (nom_ohms < 0) {
+ snprintf(set_buf, sizeof(set_buf), " RODT %3d ", rodt_ohms);
+ } else {
+ snprintf(set_buf, sizeof(set_buf), "NOM %3d RODT %3d", nom_ohms, rodt_ohms);
+ }
+
+ VB_PRT(VBL_TME, "N%d.LMC%d.R%d: Rlevel %s %s : %5d %5d %5d %5d %5d %5d %5d %5d %5d (%d)\n",
+ node, ddr_interface_num, rank,
+ set_buf, msg_buf,
+ lmc_rlevel_rank.cn83xx.byte8,
+ lmc_rlevel_rank.cn83xx.byte7,
+ lmc_rlevel_rank.cn83xx.byte6,
+ lmc_rlevel_rank.cn83xx.byte5,
+ lmc_rlevel_rank.cn83xx.byte4,
+ lmc_rlevel_rank.cn83xx.byte3,
+ lmc_rlevel_rank.cn83xx.byte2,
+ lmc_rlevel_rank.cn83xx.byte1,
+ lmc_rlevel_rank.cn83xx.byte0,
+ score
+ );
+
+ // FIXME: does this help make the output a little easier to focus?
+ if (flag == WITH_RODT_BESTSCORE) {
+ VB_PRT(VBL_DEV, "-----------\n");
+ }
+}
+
+static void
+do_display_WL(bdk_node_t node, int ddr_interface_num, bdk_lmcx_wlevel_rankx_t lmc_wlevel_rank, int rank, int flags)
+{
+ char *msg_buf;
+ char hex_buf[20];
+ int vbl;
+ if (flags & WITH_FINAL) {
+ msg_buf = " FINAL SETTINGS ";
+ vbl = VBL_NORM;
+ } else {
+ snprintf(hex_buf, sizeof(hex_buf), "0x%016lX", lmc_wlevel_rank.u);
+ msg_buf = hex_buf;
+ vbl = VBL_FAE;
+ }
+
+ VB_PRT(vbl, "N%d.LMC%d.R%d: Wlevel Rank %#4x, %s : %5d %5d %5d %5d %5d %5d %5d %5d %5d\n",
+ node, ddr_interface_num, rank,
+ lmc_wlevel_rank.s.status,
+ msg_buf,
+ lmc_wlevel_rank.s.byte8,
+ lmc_wlevel_rank.s.byte7,
+ lmc_wlevel_rank.s.byte6,
+ lmc_wlevel_rank.s.byte5,
+ lmc_wlevel_rank.s.byte4,
+ lmc_wlevel_rank.s.byte3,
+ lmc_wlevel_rank.s.byte2,
+ lmc_wlevel_rank.s.byte1,
+ lmc_wlevel_rank.s.byte0
+ );
+}
+
+static inline void
+display_WL(bdk_node_t node, int ddr_interface_num, bdk_lmcx_wlevel_rankx_t lmc_wlevel_rank, int rank)
+{
+ do_display_WL(node, ddr_interface_num, lmc_wlevel_rank, rank, WITH_NOTHING);
+}
+
+static inline void
+display_WL_with_final(bdk_node_t node, int ddr_interface_num, bdk_lmcx_wlevel_rankx_t lmc_wlevel_rank, int rank)
+{
+ do_display_WL(node, ddr_interface_num, lmc_wlevel_rank, rank, WITH_FINAL);
+}
+
+// pretty-print bitmask adjuster
+static uint64_t
+PPBM(uint64_t bm)
+{
+ if (bm != 0ul) {
+ while ((bm & 0x0fful) == 0ul)
+ bm >>= 4;
+ }
+ return bm;
+}
+
+// xlate PACKED index to UNPACKED index to use with rlevel_byte
+#define XPU(i,e) (((i) < 4)?(i):((i)<8)?(i)+(e):4)
+// xlate UNPACKED index to PACKED index to use with rlevel_bitmask
+#define XUP(i,e) (((i) < 4)?(i):((i)>4)?(i)-(e):8)
+
+// flag values
+#define WITH_WL_BITMASKS 0
+#define WITH_RL_BITMASKS 1
+#define WITH_RL_MASK_SCORES 2
+#define WITH_RL_SEQ_SCORES 3
+static void
+do_display_BM(bdk_node_t node, int ddr_interface_num, int rank, void *bm, int flags, int ecc_ena)
+{
+ int ecc = !!ecc_ena;
+ if (flags == WITH_WL_BITMASKS) { // wlevel_bitmask array in PACKED index order, so just print them
+ int *bitmasks = (int *)bm;
+
+ ddr_print("N%d.LMC%d.R%d: Wlevel Debug Results : %05x %05x %05x %05x %05x %05x %05x %05x %05x\n",
+ node, ddr_interface_num, rank,
+ bitmasks[8],
+ bitmasks[7],
+ bitmasks[6],
+ bitmasks[5],
+ bitmasks[4],
+ bitmasks[3],
+ bitmasks[2],
+ bitmasks[1],
+ bitmasks[0]
+ );
+ } else
+ if (flags == WITH_RL_BITMASKS) { // rlevel_bitmask array in PACKED index order, so just print them
+ rlevel_bitmask_t *rlevel_bitmask = (rlevel_bitmask_t *)bm;
+ ddr_print("N%d.LMC%d.R%d: Rlevel Debug Bitmasks 8:0 : %05lx %05lx %05lx %05lx %05lx %05lx %05lx %05lx %05lx\n",
+ node, ddr_interface_num, rank,
+ PPBM(rlevel_bitmask[8].bm),
+ PPBM(rlevel_bitmask[7].bm),
+ PPBM(rlevel_bitmask[6].bm),
+ PPBM(rlevel_bitmask[5].bm),
+ PPBM(rlevel_bitmask[4].bm),
+ PPBM(rlevel_bitmask[3].bm),
+ PPBM(rlevel_bitmask[2].bm),
+ PPBM(rlevel_bitmask[1].bm),
+ PPBM(rlevel_bitmask[0].bm)
+ );
+ } else
+ if (flags == WITH_RL_MASK_SCORES) { // rlevel_bitmask array in PACKED index order, so just print them
+ rlevel_bitmask_t *rlevel_bitmask = (rlevel_bitmask_t *)bm;
+ ddr_print("N%d.LMC%d.R%d: Rlevel Debug Bitmask Scores 8:0 : %5d %5d %5d %5d %5d %5d %5d %5d %5d\n",
+ node, ddr_interface_num, rank,
+ rlevel_bitmask[8].errs,
+ rlevel_bitmask[7].errs,
+ rlevel_bitmask[6].errs,
+ rlevel_bitmask[5].errs,
+ rlevel_bitmask[4].errs,
+ rlevel_bitmask[3].errs,
+ rlevel_bitmask[2].errs,
+ rlevel_bitmask[1].errs,
+ rlevel_bitmask[0].errs
+ );
+ } else
+ if (flags == WITH_RL_SEQ_SCORES) { // rlevel_byte array in UNPACKED index order, so xlate and print them
+ rlevel_byte_data_t *rlevel_byte = (rlevel_byte_data_t *)bm;
+ ddr_print("N%d.LMC%d.R%d: Rlevel Debug Non-seq Scores 8:0 : %5d %5d %5d %5d %5d %5d %5d %5d %5d\n",
+ node, ddr_interface_num, rank,
+ rlevel_byte[XPU(8,ecc)].sqerrs,
+ rlevel_byte[XPU(7,ecc)].sqerrs,
+ rlevel_byte[XPU(6,ecc)].sqerrs,
+ rlevel_byte[XPU(5,ecc)].sqerrs,
+ rlevel_byte[XPU(4,ecc)].sqerrs,
+ rlevel_byte[XPU(3,ecc)].sqerrs,
+ rlevel_byte[XPU(2,ecc)].sqerrs,
+ rlevel_byte[XPU(1,ecc)].sqerrs,
+ rlevel_byte[XPU(0,ecc)].sqerrs
+ );
+ }
+}
+
+static inline void
+display_WL_BM(bdk_node_t node, int ddr_interface_num, int rank, int *bitmasks)
+{
+ do_display_BM(node, ddr_interface_num, rank, (void *)bitmasks, WITH_WL_BITMASKS, 0);
+}
+
+static inline void
+display_RL_BM(bdk_node_t node, int ddr_interface_num, int rank, rlevel_bitmask_t *bitmasks, int ecc_ena)
+{
+ do_display_BM(node, ddr_interface_num, rank, (void *)bitmasks, WITH_RL_BITMASKS, ecc_ena);
+}
+
+static inline void
+display_RL_BM_scores(bdk_node_t node, int ddr_interface_num, int rank, rlevel_bitmask_t *bitmasks, int ecc_ena)
+{
+ do_display_BM(node, ddr_interface_num, rank, (void *)bitmasks, WITH_RL_MASK_SCORES, ecc_ena);
+}
+
+static inline void
+display_RL_SEQ_scores(bdk_node_t node, int ddr_interface_num, int rank, rlevel_byte_data_t *bytes, int ecc_ena)
+{
+ do_display_BM(node, ddr_interface_num, rank, (void *)bytes, WITH_RL_SEQ_SCORES, ecc_ena);
+}
+
+unsigned short load_dll_offset(bdk_node_t node, int ddr_interface_num,
+ int dll_offset_mode, int byte_offset, int byte)
+{
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+ /* byte_sel:
+ 0x1 = byte 0, ..., 0x9 = byte 8
+ 0xA = all bytes */
+ int byte_sel = (byte == 10) ? byte : byte + 1;
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ SET_DDR_DLL_CTL3(load_offset, 0);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+
+ SET_DDR_DLL_CTL3(mode_sel, dll_offset_mode);
+ SET_DDR_DLL_CTL3(offset, (_abs(byte_offset)&0x3f) | (_sign(byte_offset) << 6)); /* Always 6-bit field? */
+ SET_DDR_DLL_CTL3(byte_sel, byte_sel);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+
+ SET_DDR_DLL_CTL3(load_offset, 1);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+
+ return ((unsigned short) GET_DDR_DLL_CTL3(offset));
+}
+
+void change_dll_offset_enable(bdk_node_t node, int ddr_interface_num, int change)
+{
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ SET_DDR_DLL_CTL3(offset_ena, !!change);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+}
+
+static void process_custom_dll_offsets(bdk_node_t node, int ddr_interface_num, const char *enable_str,
+ const int8_t *offsets, const char *byte_str, int mode)
+{
+ const char *s;
+ int enabled;
+ int provided;
+
+ if ((s = lookup_env_parameter("%s", enable_str)) != NULL) {
+ enabled = !!strtol(s, NULL, 0);
+ } else
+ enabled = -1;
+
+ // enabled == -1: no override, do only configured offsets if provided
+ // enabled == 0: override OFF, do NOT do it even if configured offsets provided
+ // enabled == 1: override ON, do it for overrides plus configured offsets
+
+ if (enabled == 0)
+ return;
+
+ provided = is_dll_offset_provided(offsets);
+
+ if (enabled < 0 && !provided)
+ return;
+
+ int byte_offset;
+ unsigned short offset[9] = {0};
+ int byte;
+
+ // offsets need to be disabled while loading
+ change_dll_offset_enable(node, ddr_interface_num, 0);
+
+ for (byte = 0; byte < 9; ++byte) {
+
+ // always take the provided, if available
+ byte_offset = (provided) ? offsets[byte] : 0;
+
+ // then, if enabled, use any overrides present
+ if (enabled > 0) {
+ if ((s = lookup_env_parameter(byte_str, ddr_interface_num, byte)) != NULL) {
+ byte_offset = strtol(s, NULL, 0);
+ }
+ }
+
+ offset[byte] = load_dll_offset(node, ddr_interface_num, mode, byte_offset, byte);
+ }
+
+ // re-enable offsets after loading
+ change_dll_offset_enable(node, ddr_interface_num, 1);
+
+ ddr_print("N%d.LMC%d: DLL %s Offset 8:0 :"
+ " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ node, ddr_interface_num, (mode == 2) ? "Read " : "Write",
+ offset[8], offset[7], offset[6], offset[5], offset[4],
+ offset[3], offset[2], offset[1], offset[0]);
+}
+
+void perform_octeon3_ddr3_sequence(bdk_node_t node, int rank_mask, int ddr_interface_num, int sequence)
+{
+ /*
+ * 3. Without changing any other fields in LMC(0)_CONFIG, write
+ * LMC(0)_CONFIG[RANKMASK] then write both
+ * LMC(0)_SEQ_CTL[SEQ_SEL,INIT_START] = 1 with a single CSR write
+ * operation. LMC(0)_CONFIG[RANKMASK] bits should be set to indicate
+ * the ranks that will participate in the sequence.
+ *
+ * The LMC(0)_SEQ_CTL[SEQ_SEL] value should select power-up/init or
+ * selfrefresh exit, depending on whether the DRAM parts are in
+ * self-refresh and whether their contents should be preserved. While
+ * LMC performs these sequences, it will not perform any other DDR3
+ * transactions. When the sequence is complete, hardware sets the
+ * LMC(0)_CONFIG[INIT_STATUS] bits for the ranks that have been
+ * initialized.
+ *
+ * If power-up/init is selected immediately following a DRESET
+ * assertion, LMC executes the sequence described in the "Reset and
+ * Initialization Procedure" section of the JEDEC DDR3
+ * specification. This includes activating CKE, writing all four DDR3
+ * mode registers on all selected ranks, and issuing the required ZQCL
+ * command. The LMC(0)_CONFIG[RANKMASK] value should select all ranks
+ * with attached DRAM in this case. If LMC(0)_CONTROL[RDIMM_ENA] = 1,
+ * LMC writes the JEDEC standard SSTE32882 control words selected by
+ * LMC(0)_DIMM_CTL[DIMM*_WMASK] between DDR_CKE* signal assertion and
+ * the first DDR3 mode register write operation.
+ * LMC(0)_DIMM_CTL[DIMM*_WMASK] should be cleared to 0 if the
+ * corresponding DIMM is not present.
+ *
+ * If self-refresh exit is selected, LMC executes the required SRX
+ * command followed by a refresh and ZQ calibration. Section 4.5
+ * describes behavior of a REF + ZQCS. LMC does not write the DDR3
+ * mode registers as part of this sequence, and the mode register
+ * parameters must match at self-refresh entry and exit times.
+ *
+ * 4. Read LMC(0)_SEQ_CTL and wait for LMC(0)_SEQ_CTL[SEQ_COMPLETE] to be
+ * set.
+ *
+ * 5. Read LMC(0)_CONFIG[INIT_STATUS] and confirm that all ranks have
+ * been initialized.
+ */
+
+ const char *s;
+ static const char *sequence_str[] = {
+ "Power-up/init",
+ "Read-leveling",
+ "Self-refresh entry",
+ "Self-refresh exit",
+ "Illegal",
+ "Illegal",
+ "Write-leveling",
+ "Init Register Control Words",
+ "Mode Register Write",
+ "MPR Register Access",
+ "LMC Deskew/Internal Vref training",
+ "Offset Training"
+ };
+
+ bdk_lmcx_seq_ctl_t seq_ctl;
+ bdk_lmcx_config_t lmc_config;
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ lmc_config.s.rankmask = rank_mask;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+
+ seq_ctl.u = 0;
+
+ seq_ctl.s.init_start = 1;
+ seq_ctl.s.seq_sel = sequence;
+
+ VB_PRT(VBL_SEQ, "N%d.LMC%d: Performing LMC sequence=%x: rank_mask=0x%02x, %s\n",
+ node, ddr_interface_num, sequence, rank_mask, sequence_str[sequence]);
+
+ if ((s = lookup_env_parameter("ddr_trigger_sequence%d", sequence)) != NULL) {
+ int trigger = strtoul(s, NULL, 0);
+ if (trigger)
+ pulse_gpio_pin(node, 1, 2);
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_SEQ_CTL(ddr_interface_num), seq_ctl.u);
+ BDK_CSR_READ(node, BDK_LMCX_SEQ_CTL(ddr_interface_num));
+
+ /* Wait 100us minimum before checking for sequence complete */
+ bdk_wait_usec(100);
+ if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
+ BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_SEQ_CTL(ddr_interface_num), seq_complete, ==, 1, 1000000))
+ {
+ error_print("N%d.LMC%d: Timeout waiting for LMC sequence=%x, rank_mask=0x%02x, ignoring...\n",
+ node, ddr_interface_num, sequence, rank_mask);
+ }
+ else {
+ VB_PRT(VBL_SEQ, "N%d.LMC%d: LMC sequence=%x: Completed.\n", node, ddr_interface_num, sequence);
+ }
+}
+
+void ddr4_mrw(bdk_node_t node, int ddr_interface_num, int rank,
+ int mr_wr_addr, int mr_wr_sel, int mr_wr_bg1)
+{
+ bdk_lmcx_mr_mpr_ctl_t lmc_mr_mpr_ctl;
+
+ lmc_mr_mpr_ctl.u = 0;
+ lmc_mr_mpr_ctl.s.mr_wr_addr = (mr_wr_addr == -1) ? 0 : mr_wr_addr;
+ lmc_mr_mpr_ctl.s.mr_wr_sel = mr_wr_sel;
+ lmc_mr_mpr_ctl.s.mr_wr_rank = rank;
+ //lmc_mr_mpr_ctl.s.mr_wr_pda_mask =
+ //lmc_mr_mpr_ctl.s.mr_wr_pda_enable =
+ //lmc_mr_mpr_ctl.s.mpr_loc =
+ //lmc_mr_mpr_ctl.s.mpr_wr =
+ //lmc_mr_mpr_ctl.s.mpr_bit_select =
+ //lmc_mr_mpr_ctl.s.mpr_byte_select =
+ //lmc_mr_mpr_ctl.s.mpr_whole_byte_enable =
+ lmc_mr_mpr_ctl.s.mr_wr_use_default_value = (mr_wr_addr == -1) ? 1 : 0;
+ lmc_mr_mpr_ctl.s.mr_wr_bg1 = mr_wr_bg1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_MR_MPR_CTL(ddr_interface_num), lmc_mr_mpr_ctl.u);
+
+ /* Mode Register Write */
+ perform_octeon3_ddr3_sequence(node, 1 << rank, ddr_interface_num, 0x8);
+}
+
+#define InvA0_17(x) (x ^ 0x22bf8)
+static void set_mpr_mode (bdk_node_t node, int rank_mask,
+ int ddr_interface_num, int dimm_count, int mpr, int bg1)
+{
+ int rankx;
+
+ ddr_print("All Ranks: Set mpr mode = %x %c-side\n",
+ mpr, (bg1==0) ? 'A' : 'B');
+
+ for (rankx = 0; rankx < dimm_count*4; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+ if (bg1 == 0)
+ ddr4_mrw(node, ddr_interface_num, rankx, mpr<<2, 3, bg1); /* MR3 A-side */
+ else
+ ddr4_mrw(node, ddr_interface_num, rankx, InvA0_17(mpr<<2), ~3, bg1); /* MR3 B-side */
+ }
+}
+
+#if ENABLE_DISPLAY_MPR_PAGE
+static void do_ddr4_mpr_read(bdk_node_t node, int ddr_interface_num, int rank,
+ int page, int location)
+{
+ bdk_lmcx_mr_mpr_ctl_t lmc_mr_mpr_ctl;
+
+ lmc_mr_mpr_ctl.u = BDK_CSR_READ(node, BDK_LMCX_MR_MPR_CTL(ddr_interface_num));
+
+ lmc_mr_mpr_ctl.s.mr_wr_addr = 0;
+ lmc_mr_mpr_ctl.s.mr_wr_sel = page; /* Page */
+ lmc_mr_mpr_ctl.s.mr_wr_rank = rank;
+ //lmc_mr_mpr_ctl.s.mr_wr_pda_mask =
+ //lmc_mr_mpr_ctl.s.mr_wr_pda_enable =
+ lmc_mr_mpr_ctl.s.mpr_loc = location;
+ lmc_mr_mpr_ctl.s.mpr_wr = 0; /* Read=0, Write=1 */
+ //lmc_mr_mpr_ctl.s.mpr_bit_select =
+ //lmc_mr_mpr_ctl.s.mpr_byte_select =
+ //lmc_mr_mpr_ctl.s.mpr_whole_byte_enable =
+ //lmc_mr_mpr_ctl.s.mr_wr_use_default_value =
+ //lmc_mr_mpr_ctl.s.mr_wr_bg1 =
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_MR_MPR_CTL(ddr_interface_num), lmc_mr_mpr_ctl.u);
+
+ /* MPR register access sequence */
+ perform_octeon3_ddr3_sequence(node, 1 << rank, ddr_interface_num, 0x9);
+
+ debug_print("LMC_MR_MPR_CTL : 0x%016lx\n", lmc_mr_mpr_ctl.u);
+ debug_print("lmc_mr_mpr_ctl.s.mr_wr_addr: 0x%02x\n", lmc_mr_mpr_ctl.s.mr_wr_addr);
+ debug_print("lmc_mr_mpr_ctl.s.mr_wr_sel : 0x%02x\n", lmc_mr_mpr_ctl.s.mr_wr_sel);
+ debug_print("lmc_mr_mpr_ctl.s.mpr_loc : 0x%02x\n", lmc_mr_mpr_ctl.s.mpr_loc);
+ debug_print("lmc_mr_mpr_ctl.s.mpr_wr : 0x%02x\n", lmc_mr_mpr_ctl.s.mpr_wr);
+
+}
+#endif
+
+int set_rdimm_mode(bdk_node_t node, int ddr_interface_num, int enable)
+{
+ bdk_lmcx_control_t lmc_control;
+ int save_rdimm_mode;
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ save_rdimm_mode = lmc_control.s.rdimm_ena;
+ lmc_control.s.rdimm_ena = enable;
+ VB_PRT(VBL_FAE, "Setting RDIMM_ENA = %x\n", enable);
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ return (save_rdimm_mode);
+}
+
+#if ENABLE_DISPLAY_MPR_PAGE
+static void ddr4_mpr_read(bdk_node_t node, int ddr_interface_num, int rank,
+ int page, int location, uint64_t *mpr_data)
+{
+ do_ddr4_mpr_read(node, ddr_interface_num, rank, page, location);
+
+ mpr_data[0] = BDK_CSR_READ(node, BDK_LMCX_MPR_DATA0(ddr_interface_num));
+ mpr_data[1] = BDK_CSR_READ(node, BDK_LMCX_MPR_DATA1(ddr_interface_num));
+ mpr_data[2] = BDK_CSR_READ(node, BDK_LMCX_MPR_DATA2(ddr_interface_num));
+
+ debug_print("MPR Read %016lx.%016lx.%016lx\n", mpr_data[2], mpr_data[1], mpr_data[0]);
+}
+
+/* Display MPR values for Page Location */
+static void Display_MPR_Page_Location(bdk_node_t node, int rank,
+ int ddr_interface_num, int dimm_count,
+ int page, int location, uint64_t *mpr_data)
+{
+ ddr4_mpr_read(node, ddr_interface_num, rank, page, location, mpr_data);
+ ddr_print("MPR Page %d, Loc %d %016lx.%016lx.%016lx\n",
+ page, location, mpr_data[2], mpr_data[1], mpr_data[0]);
+}
+
+/* Display MPR values for Page */
+static void Display_MPR_Page(bdk_node_t node, int rank_mask,
+ int ddr_interface_num, int dimm_count, int page)
+{
+ int rankx;
+ uint64_t mpr_data[3];
+
+ for (rankx = 0; rankx < dimm_count * 4;rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ ddr_print("Rank %d: MPR values for Page %d\n", rankx, page);
+ for (int location = 0; location < 4; location++) {
+ Display_MPR_Page_Location(node, rankx, ddr_interface_num, dimm_count,
+ page, location, &mpr_data[0]);
+ }
+
+ } /* for (rankx = 0; rankx < dimm_count * 4; rankx++) */
+}
+#endif
+
+void ddr4_mpr_write(bdk_node_t node, int ddr_interface_num, int rank,
+ int page, int location, uint8_t mpr_data)
+{
+ bdk_lmcx_mr_mpr_ctl_t lmc_mr_mpr_ctl;
+
+ lmc_mr_mpr_ctl.u = 0;
+ lmc_mr_mpr_ctl.s.mr_wr_addr = mpr_data;
+ lmc_mr_mpr_ctl.s.mr_wr_sel = page; /* Page */
+ lmc_mr_mpr_ctl.s.mr_wr_rank = rank;
+ //lmc_mr_mpr_ctl.s.mr_wr_pda_mask =
+ //lmc_mr_mpr_ctl.s.mr_wr_pda_enable =
+ lmc_mr_mpr_ctl.s.mpr_loc = location;
+ lmc_mr_mpr_ctl.s.mpr_wr = 1; /* Read=0, Write=1 */
+ //lmc_mr_mpr_ctl.s.mpr_bit_select =
+ //lmc_mr_mpr_ctl.s.mpr_byte_select =
+ //lmc_mr_mpr_ctl.s.mpr_whole_byte_enable =
+ //lmc_mr_mpr_ctl.s.mr_wr_use_default_value =
+ //lmc_mr_mpr_ctl.s.mr_wr_bg1 =
+ DRAM_CSR_WRITE(node, BDK_LMCX_MR_MPR_CTL(ddr_interface_num), lmc_mr_mpr_ctl.u);
+
+ /* MPR register access sequence */
+ perform_octeon3_ddr3_sequence(node, (1 << rank), ddr_interface_num, 0x9);
+
+ debug_print("LMC_MR_MPR_CTL : 0x%016lx\n", lmc_mr_mpr_ctl.u);
+ debug_print("lmc_mr_mpr_ctl.s.mr_wr_addr: 0x%02x\n", lmc_mr_mpr_ctl.s.mr_wr_addr);
+ debug_print("lmc_mr_mpr_ctl.s.mr_wr_sel : 0x%02x\n", lmc_mr_mpr_ctl.s.mr_wr_sel);
+ debug_print("lmc_mr_mpr_ctl.s.mpr_loc : 0x%02x\n", lmc_mr_mpr_ctl.s.mpr_loc);
+ debug_print("lmc_mr_mpr_ctl.s.mpr_wr : 0x%02x\n", lmc_mr_mpr_ctl.s.mpr_wr);
+}
+
+void set_vref(bdk_node_t node, int ddr_interface_num, int rank,
+ int range, int value)
+{
+ bdk_lmcx_mr_mpr_ctl_t lmc_mr_mpr_ctl;
+ bdk_lmcx_modereg_params3_t lmc_modereg_params3;
+ int mr_wr_addr = 0;
+
+ lmc_mr_mpr_ctl.u = 0;
+ lmc_modereg_params3.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS3(ddr_interface_num));
+
+ mr_wr_addr |= lmc_modereg_params3.s.tccd_l<<10; /* A12:A10 tCCD_L */
+ mr_wr_addr |= 1<<7; /* A7 1 = Enable(Training Mode) */
+ mr_wr_addr |= range<<6; /* A6 VrefDQ Training Range */
+ mr_wr_addr |= value<<0; /* A5:A0 VrefDQ Training Value */
+
+ lmc_mr_mpr_ctl.s.mr_wr_addr = mr_wr_addr;
+ lmc_mr_mpr_ctl.s.mr_wr_sel = 6; /* Write MR6 */
+ lmc_mr_mpr_ctl.s.mr_wr_rank = rank;
+ //lmc_mr_mpr_ctl.s.mr_wr_pda_mask =
+ //lmc_mr_mpr_ctl.s.mr_wr_pda_enable =
+ //lmc_mr_mpr_ctl.s.mpr_loc = location;
+ //lmc_mr_mpr_ctl.s.mpr_wr = 0; /* Read=0, Write=1 */
+ //lmc_mr_mpr_ctl.s.mpr_bit_select =
+ //lmc_mr_mpr_ctl.s.mpr_byte_select =
+ //lmc_mr_mpr_ctl.s.mpr_whole_byte_enable =
+ //lmc_mr_mpr_ctl.s.mr_wr_use_default_value =
+ //lmc_mr_mpr_ctl.s.mr_wr_bg1 =
+ DRAM_CSR_WRITE(node, BDK_LMCX_MR_MPR_CTL(ddr_interface_num), lmc_mr_mpr_ctl.u);
+
+ /* 0x8 = Mode Register Write */
+ perform_octeon3_ddr3_sequence(node, 1<<rank, ddr_interface_num, 0x8);
+
+ /* It is vendor specific whether Vref_value is captured with A7=1.
+ A subsequent MRS might be necessary. */
+ perform_octeon3_ddr3_sequence(node, 1<<rank, ddr_interface_num, 0x8);
+
+ mr_wr_addr &= ~(1<<7); /* A7 0 = Disable(Training Mode) */
+ lmc_mr_mpr_ctl.s.mr_wr_addr = mr_wr_addr;
+ DRAM_CSR_WRITE(node, BDK_LMCX_MR_MPR_CTL(ddr_interface_num), lmc_mr_mpr_ctl.u);
+}
+
+static void set_DRAM_output_inversion (bdk_node_t node,
+ int ddr_interface_num,
+ int dimm_count,
+ int rank_mask,
+ int inversion)
+{
+ bdk_lmcx_ddr4_dimm_ctl_t lmc_ddr4_dimm_ctl;
+ bdk_lmcx_dimmx_params_t lmc_dimmx_params;
+ bdk_lmcx_dimm_ctl_t lmc_dimm_ctl;
+ int dimm_no;
+
+ lmc_ddr4_dimm_ctl.u = 0; /* Don't touch extended register control words */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DDR4_DIMM_CTL(ddr_interface_num), lmc_ddr4_dimm_ctl.u);
+
+ ddr_print("All DIMMs: Register Control Word RC0 : %x\n", (inversion & 1));
+
+ for (dimm_no = 0; dimm_no < dimm_count; ++dimm_no) {
+ lmc_dimmx_params.u = BDK_CSR_READ(node, BDK_LMCX_DIMMX_PARAMS(ddr_interface_num, dimm_no));
+ lmc_dimmx_params.s.rc0 = (lmc_dimmx_params.s.rc0 & ~1) | (inversion & 1);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMMX_PARAMS(ddr_interface_num, dimm_no), lmc_dimmx_params.u);
+ }
+
+ /* LMC0_DIMM_CTL */
+ lmc_dimm_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DIMM_CTL(ddr_interface_num));
+ lmc_dimm_ctl.s.dimm0_wmask = 0x1;
+ lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0x0001 : 0x0000;
+
+ ddr_print("LMC DIMM_CTL : 0x%016lx\n",
+ lmc_dimm_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
+
+ perform_octeon3_ddr3_sequence(node, rank_mask, ddr_interface_num, 0x7 ); /* Init RCW */
+}
+
+static void write_mpr_page0_pattern (bdk_node_t node, int rank_mask,
+ int ddr_interface_num, int dimm_count, int pattern, int location_mask)
+{
+ int rankx;
+ int location;
+
+ for (rankx = 0; rankx < dimm_count*4; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+ for (location = 0; location < 4; ++location) {
+ if (!(location_mask & (1 << location)))
+ continue;
+
+ ddr4_mpr_write(node, ddr_interface_num, rankx,
+ /* page */ 0, /* location */ location, pattern);
+ }
+ }
+}
+
+static void change_rdimm_mpr_pattern (bdk_node_t node, int rank_mask,
+ int ddr_interface_num, int dimm_count)
+{
+ int save_ref_zqcs_int;
+ bdk_lmcx_config_t lmc_config;
+
+ /*
+ Okay, here is the latest sequence. This should work for all
+ chips and passes (78,88,73,etc). This sequence should be run
+ immediately after DRAM INIT. The basic idea is to write the
+ same pattern into each of the 4 MPR locations in the DRAM, so
+ that the same value is returned when doing MPR reads regardless
+ of the inversion state. My advice is to put this into a
+ function, change_rdimm_mpr_pattern or something like that, so
+ that it can be called multiple times, as I think David wants a
+ clock-like pattern for OFFSET training, but does not want a
+ clock pattern for Bit-Deskew. You should then be able to call
+ this at any point in the init sequence (after DRAM init) to
+ change the pattern to a new value.
+ Mike
+
+ A correction: PHY doesn't need any pattern during offset
+ training, but needs clock like pattern for internal vref and
+ bit-dskew training. So for that reason, these steps below have
+ to be conducted before those trainings to pre-condition
+ the pattern. David
+
+ Note: Step 3, 4, 8 and 9 have to be done through RDIMM
+ sequence. If you issue MRW sequence to do RCW write (in o78 pass
+ 1 at least), LMC will still do two commands because
+ CONTROL[RDIMM_ENA] is still set high. We don't want it to have
+ any unintentional mode register write so it's best to do what
+ Mike is doing here.
+ Andrew
+ */
+
+
+ /* 1) Disable refresh (REF_ZQCS_INT = 0) */
+
+ debug_print("1) Disable refresh (REF_ZQCS_INT = 0)\n");
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ save_ref_zqcs_int = lmc_config.s.ref_zqcs_int;
+ lmc_config.s.ref_zqcs_int = 0;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+
+
+ /* 2) Put all devices in MPR mode (Run MRW sequence (sequence=8)
+ with MODEREG_PARAMS0[MPRLOC]=0,
+ MODEREG_PARAMS0[MPR]=1, MR_MPR_CTL[MR_WR_SEL]=3, and
+ MR_MPR_CTL[MR_WR_USE_DEFAULT_VALUE]=1) */
+
+ debug_print("2) Put all devices in MPR mode (Run MRW sequence (sequence=8)\n");
+
+ set_mpr_mode(node, rank_mask, ddr_interface_num, dimm_count, /* mpr */ 1, /* bg1 */ 0); /* A-side */
+ set_mpr_mode(node, rank_mask, ddr_interface_num, dimm_count, /* mpr */ 1, /* bg1 */ 1); /* B-side */
+
+ /* a. Or you can set MR_MPR_CTL[MR_WR_USE_DEFAULT_VALUE]=0 and set
+ the value you would like directly into
+ MR_MPR_CTL[MR_WR_ADDR] */
+
+ /* 3) Disable RCD Parity (if previously enabled) - parity does not
+ work if inversion disabled */
+
+ debug_print("3) Disable RCD Parity\n");
+
+ /* 4) Disable Inversion in the RCD. */
+ /* a. I did (3&4) via the RDIMM sequence (seq_sel=7), but it
+ may be easier to use the MRW sequence (seq_sel=8). Just set
+ MR_MPR_CTL[MR_WR_SEL]=7, MR_MPR_CTL[MR_WR_ADDR][3:0]=data,
+ MR_MPR_CTL[MR_WR_ADDR][7:4]=RCD reg */
+
+ debug_print("4) Disable Inversion in the RCD.\n");
+
+ set_DRAM_output_inversion(node, ddr_interface_num, dimm_count, rank_mask,
+ 1 /* 1=disable output inversion*/);
+
+ /* 5) Disable CONTROL[RDIMM_ENA] so that MR sequence goes out
+ non-inverted. */
+
+ debug_print("5) Disable CONTROL[RDIMM_ENA]\n");
+
+ set_rdimm_mode(node, ddr_interface_num, 0);
+
+ /* 6) Write all 4 MPR registers with the desired pattern (have to
+ do this for all enabled ranks) */
+ /* a. MR_MPR_CTL.MPR_WR=1, MR_MPR_CTL.MPR_LOC=0..3,
+ MR_MPR_CTL.MR_WR_SEL=0, MR_MPR_CTL.MR_WR_ADDR[7:0]=pattern */
+
+ debug_print("6) Write all 4 MPR page 0 Training Patterns\n");
+
+ write_mpr_page0_pattern(node, rank_mask,
+ ddr_interface_num, dimm_count, 0x55, 0x8);
+
+ /* 7) Re-enable RDIMM_ENA */
+
+ debug_print("7) Re-enable RDIMM_ENA\n");
+
+ set_rdimm_mode(node, ddr_interface_num, 1);
+
+ /* 8) Re-enable RDIMM inversion */
+
+ debug_print("8) Re-enable RDIMM inversion\n");
+
+ set_DRAM_output_inversion(node, ddr_interface_num, dimm_count, rank_mask,
+ 0 /* 0=re-enable output inversion*/);
+
+ /* 9) Re-enable RDIMM parity (if desired) */
+
+ debug_print("9) Re-enable RDIMM parity (if desired)\n");
+
+ /* 10)Take B-side devices out of MPR mode (Run MRW sequence
+ (sequence=8) with MODEREG_PARAMS0[MPRLOC]=0,
+ MODEREG_PARAMS0[MPR]=0, MR_MPR_CTL[MR_WR_SEL]=3, and
+ MR_MPR_CTL[MR_WR_USE_DEFAULT_VALUE]=1) */
+
+ debug_print("10)Take B-side devices out of MPR mode\n");
+
+ set_mpr_mode(node, rank_mask, ddr_interface_num, dimm_count, /* mpr */ 0, /* bg1 */ 1);
+
+ /* a. Or you can set MR_MPR_CTL[MR_WR_USE_DEFAULT_VALUE]=0 and
+ set the value you would like directly into
+ MR_MPR_CTL[MR_WR_ADDR] */
+
+ /* 11)Re-enable refresh (REF_ZQCS_INT=previous value) */
+
+ debug_print("11)Re-enable refresh (REF_ZQCS_INT=previous value)\n");
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ lmc_config.s.ref_zqcs_int = save_ref_zqcs_int;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+
+}
+
+static unsigned char ddr4_rodt_ohms [RODT_OHMS_COUNT ] = { 0, 40, 60, 80, 120, 240, 34, 48 };
+static unsigned char ddr4_rtt_nom_ohms [RTT_NOM_OHMS_COUNT ] = { 0, 60, 120, 40, 240, 48, 80, 34 };
+static unsigned char ddr4_rtt_nom_table [RTT_NOM_TABLE_COUNT ] = { 0, 4, 2, 6, 1, 5, 3, 7 };
+static unsigned char ddr4_rtt_wr_ohms [RTT_WR_OHMS_COUNT ] = { 0, 120, 240, 99, 80 }; // setting HiZ ohms to 99 for computed vref
+static unsigned char ddr4_dic_ohms [DIC_OHMS_COUNT ] = { 34, 48 };
+static short ddr4_drive_strength[DRIVE_STRENGTH_COUNT] = { 0, 0, 26, 30, 34, 40, 48, 68, 0,0,0,0,0,0,0 };
+static short ddr4_dqx_strength [DRIVE_STRENGTH_COUNT] = { 0, 24, 27, 30, 34, 40, 48, 60, 0,0,0,0,0,0,0 };
+
+impedence_values_t ddr4_impedence_values = {
+ .rodt_ohms = ddr4_rodt_ohms ,
+ .rtt_nom_ohms = ddr4_rtt_nom_ohms ,
+ .rtt_nom_table = ddr4_rtt_nom_table ,
+ .rtt_wr_ohms = ddr4_rtt_wr_ohms ,
+ .dic_ohms = ddr4_dic_ohms ,
+ .drive_strength = ddr4_drive_strength,
+ .dqx_strength = ddr4_dqx_strength ,
+};
+
+static unsigned char ddr3_rodt_ohms [RODT_OHMS_COUNT ] = { 0, 20, 30, 40, 60, 120, 0, 0 };
+static unsigned char ddr3_rtt_nom_ohms [RTT_NOM_OHMS_COUNT ] = { 0, 60, 120, 40, 20, 30, 0, 0 };
+static unsigned char ddr3_rtt_nom_table [RTT_NOM_TABLE_COUNT ] = { 0, 2, 1, 3, 5, 4, 0, 0 };
+static unsigned char ddr3_rtt_wr_ohms [RTT_WR_OHMS_COUNT ] = { 0, 60, 120 };
+static unsigned char ddr3_dic_ohms [DIC_OHMS_COUNT ] = { 40, 34 };
+static short ddr3_drive_strength[DRIVE_STRENGTH_COUNT] = { 0, 24, 27, 30, 34, 40, 48, 60, 0,0,0,0,0,0,0 };
+static impedence_values_t ddr3_impedence_values = {
+ .rodt_ohms = ddr3_rodt_ohms ,
+ .rtt_nom_ohms = ddr3_rtt_nom_ohms ,
+ .rtt_nom_table = ddr3_rtt_nom_table ,
+ .rtt_wr_ohms = ddr3_rtt_wr_ohms ,
+ .dic_ohms = ddr3_dic_ohms ,
+ .drive_strength = ddr3_drive_strength,
+ .dqx_strength = ddr3_drive_strength,
+};
+
+
+uint64_t
+hertz_to_psecs(uint64_t hertz)
+{
+ return divide_nint((uint64_t) 1000*1000*1000*1000, hertz); /* Clock in psecs */
+}
+
+#define DIVIDEND_SCALE 1000 /* Scale to avoid rounding error. */
+uint64_t
+psecs_to_mts(uint64_t psecs)
+{
+ //ddr_print("psecs %ld, divisor %ld\n", psecs, divide_nint((uint64_t)(2 * 1000000 * DIVIDEND_SCALE), psecs));
+ return divide_nint(divide_nint((uint64_t)(2 * 1000000 * DIVIDEND_SCALE), psecs), DIVIDEND_SCALE);
+}
+
+#define WITHIN(v,b,m) (((v)>=((b)-(m)))&&((v)<=((b)+(m))))
+
+// pretty-print version, only works with what comes from the SPD: tCKmin or tCKAVGmin
+unsigned long
+pretty_psecs_to_mts(uint64_t psecs)
+{
+ uint64_t ret = 0; // default to error
+ if (WITHIN(psecs, 1250, 1))
+ ret = 1600;
+ else if (WITHIN(psecs, 1071, 1))
+ ret = 1866;
+ else if (WITHIN(psecs, 937, 1))
+ ret = 2133;
+ else if (WITHIN(psecs, 833, 1))
+ ret = 2400;
+ else if (WITHIN(psecs, 750, 1))
+ ret = 2666;
+ return ret;
+}
+
+uint64_t
+mts_to_hertz(uint64_t mts)
+{
+ return ((mts * 1000 * 1000) / 2);
+}
+
+#define DEBUG_RC3X_COMPUTE 0
+#define rc3x_print(...) \
+ do { if (DEBUG_RC3X_COMPUTE) printf(__VA_ARGS__); } while (0)
+
+static int compute_rc3x (int64_t tclk_psecs)
+{
+ long speed;
+ long tclk_psecs_min, tclk_psecs_max;
+ long data_rate_mhz, data_rate_mhz_min, data_rate_mhz_max;
+ int rc3x;
+
+#define ENCODING_BASE 1240
+
+ data_rate_mhz = psecs_to_mts(tclk_psecs);
+
+ /* 2400 MT/s is a special case. Using integer arithmetic it rounds
+ from 833 psecs to 2401 MT/s. Force it to 2400 to pick the
+ proper setting from the table. */
+ if (tclk_psecs == 833)
+ data_rate_mhz = 2400;
+
+ for (speed = ENCODING_BASE; speed < 3200; speed += 20) {
+ int error = 0;
+
+ tclk_psecs_min = hertz_to_psecs(mts_to_hertz(speed + 00)); /* Clock in psecs */
+ tclk_psecs_max = hertz_to_psecs(mts_to_hertz(speed + 18)); /* Clock in psecs */
+
+ data_rate_mhz_min = psecs_to_mts(tclk_psecs_min);
+ data_rate_mhz_max = psecs_to_mts(tclk_psecs_max);
+
+ /* Force alingment to multiple to avound rounding errors. */
+ data_rate_mhz_min = ((data_rate_mhz_min + 18) / 20) * 20;
+ data_rate_mhz_max = ((data_rate_mhz_max + 18) / 20) * 20;
+
+ error += (speed + 00 != data_rate_mhz_min);
+ error += (speed + 20 != data_rate_mhz_max);
+
+ rc3x = (speed - ENCODING_BASE) / 20;
+
+ rc3x_print("rc3x: %02x speed: %4ld MT/s < f <= %4ld MT/s, psec: %3ld:%3ld %4ld:%4ld %s\n",
+ rc3x,
+ speed, speed + 20,
+ tclk_psecs_min, tclk_psecs_max,
+ data_rate_mhz_min, data_rate_mhz_max,
+ error ? "****" : "");
+
+ if (data_rate_mhz <= (speed + 20)) {
+ rc3x_print("rc3x: %4ld MT/s <= %4ld MT/s\n", data_rate_mhz, speed + 20);
+ break;
+ }
+ }
+ return rc3x;
+}
+
+static const int rlevel_separate_ab = 1;
+
+int init_octeon3_ddr3_interface(bdk_node_t node,
+ const ddr_configuration_t *ddr_configuration,
+ uint32_t ddr_hertz,
+ uint32_t cpu_hertz,
+ uint32_t ddr_ref_hertz,
+ int board_type,
+ int board_rev_maj,
+ int board_rev_min,
+ int ddr_interface_num,
+ uint32_t ddr_interface_mask
+ )
+{
+ const char *s;
+
+ const dimm_odt_config_t *odt_1rank_config = ddr_configuration->odt_1rank_config;
+ const dimm_odt_config_t *odt_2rank_config = ddr_configuration->odt_2rank_config;
+ const dimm_odt_config_t *odt_4rank_config = ddr_configuration->odt_4rank_config;
+ const dimm_config_t *dimm_config_table = ddr_configuration->dimm_config_table;
+ const dimm_odt_config_t *odt_config;
+ const ddr3_custom_config_t *custom_lmc_config = &ddr_configuration->custom_lmc_config;
+ int odt_idx;
+
+ /*
+ ** Compute clock rates to the nearest picosecond.
+ */
+ uint64_t tclk_psecs = hertz_to_psecs(ddr_hertz); /* Clock in psecs */
+ uint64_t eclk_psecs = hertz_to_psecs(cpu_hertz); /* Clock in psecs */
+
+ int row_bits, col_bits, num_banks, num_ranks, dram_width;
+ int dimm_count = 0;
+ int fatal_error = 0; /* Accumulate and report all the errors before giving up */
+
+ int safe_ddr_flag = 0; /* Flag that indicates safe DDR settings should be used */
+ int ddr_interface_64b = 1; /* THUNDER Default: 64bit interface width */
+ int ddr_interface_bytemask;
+ uint32_t mem_size_mbytes = 0;
+ unsigned int didx;
+ int bank_bits = 0;
+ int bunk_enable;
+ int rank_mask;
+ int column_bits_start = 1;
+ int row_lsb;
+ int pbank_lsb;
+ int use_ecc = 1;
+ int mtb_psec = 0; /* quiet */
+ short ftb_Dividend;
+ short ftb_Divisor;
+ int tAAmin;
+ int tCKmin;
+ int CL, min_cas_latency = 0, max_cas_latency = 0, override_cas_latency = 0;
+ int ddr_rtt_nom_auto, ddr_rodt_ctl_auto;
+ int i;
+
+ int spd_addr;
+ int spd_org;
+ int spd_banks;
+ int spd_rdimm;
+ int spd_dimm_type;
+ int spd_ecc;
+ uint32_t spd_cas_latency;
+ int spd_mtb_dividend;
+ int spd_mtb_divisor;
+ int spd_tck_min;
+ int spd_taa_min;
+ int spd_twr;
+ int spd_trcd;
+ int spd_trrd;
+ int spd_trp;
+ int spd_tras;
+ int spd_trc;
+ int spd_trfc;
+ int spd_twtr;
+ int spd_trtp;
+ int spd_tfaw;
+ int spd_addr_mirror;
+ int spd_package = 0;
+ int spd_rawcard = 0;
+ int spd_rawcard_AorB = 0;
+ int is_stacked_die = 0;
+ int disable_stacked_die = 0;
+ int is_3ds_dimm = 0; // 3DS
+ int lranks_per_prank = 1; // 3DS: logical ranks per package rank
+ int lranks_bits = 0; // 3DS: logical ranks bits
+ int die_capacity = 0; // in Mbits; only used for 3DS
+
+ /* FTB values are two's complement ranging from +127 to -128. */
+ typedef signed char SC_t;
+
+ int twr;
+ int trcd;
+ int trrd;
+ int trp;
+ int tras;
+ int trc;
+ int trfc;
+ int twtr;
+ int trtp = 0; /* quiet */
+ int tfaw;
+
+ int wlevel_bitmask_errors = 0;
+ int wlevel_loops;
+ int default_rtt_nom[4];
+ int dyn_rtt_nom_mask = 0;
+
+ ddr_type_t ddr_type;
+ int ddr4_tCKAVGmin = 0; /* quiet */
+ int ddr4_tCKAVGmax = 0; /* quiet */
+ int ddr4_tRCDmin = 0; /* quiet */
+ int ddr4_tRPmin = 0; /* quiet */
+ int ddr4_tRASmin = 0; /* quiet */
+ int ddr4_tRCmin = 0; /* quiet */
+ int ddr4_tRFC1min = 0; /* quiet */
+ int ddr4_tRFC2min = 0; /* quiet */
+ int ddr4_tRFC4min = 0; /* quiet */
+ int ddr4_tFAWmin = 0; /* quiet */
+ int ddr4_tRRD_Smin = 0; /* quiet */
+ int ddr4_tRRD_Lmin;
+ int ddr4_tCCD_Lmin;
+ impedence_values_t *imp_values;
+ int default_rodt_ctl;
+ // default to disabled (ie, LMC restart, not chip reset)
+ int ddr_disable_chip_reset = 1;
+ int disable_deskew_training = 0;
+ const char *dimm_type_name;
+
+ /* Allow the Write bit-deskew feature to be enabled when desired. */
+ // NOTE: THUNDER pass 2.x only, 81xx, 83xx
+ int enable_write_deskew = ENABLE_WRITE_DESKEW_DEFAULT;
+
+#if SWL_TRY_HWL_ALT
+ typedef struct {
+ uint16_t hwl_alt_mask; // mask of bytelanes with alternate
+ uint16_t hwl_alt_delay[9]; // bytelane alternate avail if mask=1
+ } hwl_alt_by_rank_t;
+ hwl_alt_by_rank_t hwl_alts[4];
+ memset(hwl_alts, 0, sizeof(hwl_alts));
+#endif /* SWL_TRY_HWL_ALT */
+
+ bdk_lmcx_config_t lmc_config;
+
+ /* Initialize these to shut up the compiler. They are configured
+ and used only for DDR4 */
+ ddr4_tRRD_Lmin = 6000;
+ ddr4_tCCD_Lmin = 6000;
+
+ ddr_print("\nInitializing node %d DDR interface %d, DDR Clock %d, DDR Reference Clock %d\n",
+ node, ddr_interface_num, ddr_hertz, ddr_ref_hertz);
+
+ if (dimm_config_table[0].spd_addr == 0 && !dimm_config_table[0].spd_ptr) {
+ error_print("ERROR: No dimms specified in the dimm_config_table.\n");
+ return (-1);
+ }
+
+ // allow some overrides to be done
+
+ // this one controls whether chip RESET is done, or LMC init restarted from step 6.9.6
+ if ((s = lookup_env_parameter("ddr_disable_chip_reset")) != NULL) {
+ ddr_disable_chip_reset = !!strtoul(s, NULL, 0);
+ }
+ // this one controls whether Deskew Training is performed
+ if ((s = lookup_env_parameter("ddr_disable_deskew_training")) != NULL) {
+ disable_deskew_training = !!strtoul(s, NULL, 0);
+ }
+ // this one is in Validate_Read_Deskew_Training and controls a preliminary delay
+ if ((s = lookup_env_parameter("ddr_deskew_validation_delay")) != NULL) {
+ deskew_validation_delay = strtoul(s, NULL, 0);
+ }
+ // this one is in Perform_Read_Deskew_Training and controls lock retries
+ if ((s = lookup_env_parameter("ddr_lock_retries")) != NULL) {
+ default_lock_retry_limit = strtoul(s, NULL, 0);
+ }
+ // this one controls whether stacked die status can affect processing
+ // disabling it will affect computed vref adjustment, and rodt_row_skip_mask
+ if ((s = lookup_env_parameter("ddr_disable_stacked_die")) != NULL) {
+ disable_stacked_die = !!strtoul(s, NULL, 0);
+ }
+
+ // setup/override for write bit-deskew feature
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
+ // FIXME: allow override
+ if ((s = lookup_env_parameter("ddr_enable_write_deskew")) != NULL) {
+ enable_write_deskew = !!strtoul(s, NULL, 0);
+ } // else take default setting
+ } else { // not pass 2.x
+ enable_write_deskew = 0; // force disabled
+ }
+
+#if 0 // FIXME: do we really need this anymore?
+ if (dram_is_verbose(VBL_NORM)) {
+ printf("DDR SPD Table:");
+ for (didx = 0; didx < DDR_CFG_T_MAX_DIMMS; ++didx) {
+ if (dimm_config_table[didx].spd_addr == 0) break;
+ printf(" --ddr%dspd=0x%02x", ddr_interface_num, dimm_config_table[didx].spd_addr);
+ }
+ printf("\n");
+ }
+#endif
+
+ /*
+ ** Walk the DRAM Socket Configuration Table to see what is installed.
+ */
+ for (didx = 0; didx < DDR_CFG_T_MAX_DIMMS; ++didx)
+ {
+ /* Check for lower DIMM socket populated */
+ if (validate_dimm(node, &dimm_config_table[didx]) == 1) {
+ // NOTE: DIMM info printing is now done later when more details are available
+ ++dimm_count;
+ } else { break; } /* Finished when there is no lower DIMM */
+ }
+
+
+ initialize_ddr_clock(node,
+ ddr_configuration,
+ cpu_hertz,
+ ddr_hertz,
+ ddr_ref_hertz,
+ ddr_interface_num,
+ ddr_interface_mask);
+
+ if (!odt_1rank_config)
+ odt_1rank_config = disable_odt_config;
+ if (!odt_2rank_config)
+ odt_2rank_config = disable_odt_config;
+ if (!odt_4rank_config)
+ odt_4rank_config = disable_odt_config;
+
+ if ((s = lookup_env_parameter("ddr_safe")) != NULL) {
+ safe_ddr_flag = !!strtoul(s, NULL, 0);
+ }
+
+
+ if (dimm_count == 0) {
+ error_print("ERROR: DIMM 0 not detected.\n");
+ return(-1);
+ }
+
+ // look for 32-bit mode specified in the config
+ if (custom_lmc_config->mode32b) {
+ ddr_interface_64b = 0;
+ }
+
+ if (ddr_interface_64b == 0) { // check if 32-bit mode is bad
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN81XX)) {
+ error_print("32-bit interface width is NOT supported for this Thunder model\n");
+ ddr_interface_64b = 1; // force to 64-bit
+ }
+ } else { // check if 64-bit mode is bad
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX)) { // check the fuses on 81XX for forced 32-bit mode
+ BDK_CSR_INIT(mio_fus_dat2, node, BDK_MIO_FUS_DAT2);
+ if (mio_fus_dat2.s.lmc_mode32) {
+ error_print("32-bit interface width is ONLY supported for this Thunder model\n");
+ ddr_interface_64b = 0; // force to 32-bit
+ }
+ }
+ }
+
+ // finally, say we are in 32-bit mode when it has been validated
+ if (ddr_interface_64b == 0) {
+ ddr_print("N%d.LMC%d: Setting 32-bit data width\n",
+ node, ddr_interface_num);
+ }
+
+ /* ddr_type only indicates DDR4 or DDR3 */
+ ddr_type = get_ddr_type(node, &dimm_config_table[0]);
+ debug_print("DRAM Device Type: DDR%d\n", ddr_type);
+
+ spd_dimm_type = get_dimm_module_type(node, &dimm_config_table[0], ddr_type);
+
+ if (ddr_type == DDR4_DRAM) {
+ int spd_module_type;
+ int asymmetric;
+ const char *signal_load[4] = {"", "MLS", "3DS", "RSV"};
+
+ imp_values = &ddr4_impedence_values;
+ dimm_type_name = ddr4_dimm_types[spd_dimm_type];
+
+ spd_addr = read_spd(node, &dimm_config_table[0], DDR4_SPD_ADDRESSING_ROW_COL_BITS);
+ spd_org = read_spd(node, &dimm_config_table[0], DDR4_SPD_MODULE_ORGANIZATION);
+ spd_banks = 0xFF & read_spd(node, &dimm_config_table[0], DDR4_SPD_DENSITY_BANKS);
+
+ bank_bits = (2 + ((spd_banks >> 4) & 0x3)) + ((spd_banks >> 6) & 0x3);
+ bank_bits = min((int)bank_bits, 4); /* Controller can only address 4 bits. */
+
+ spd_package = 0XFF & read_spd(node, &dimm_config_table[0], DDR4_SPD_PACKAGE_TYPE);
+ if (spd_package & 0x80) { // non-monolithic device
+ is_stacked_die = (!disable_stacked_die) ? ((spd_package & 0x73) == 0x11) : 0;
+ ddr_print("DDR4: Package Type 0x%x (%s), %d die\n", spd_package,
+ signal_load[(spd_package & 3)], ((spd_package >> 4) & 7) + 1);
+ is_3ds_dimm = ((spd_package & 3) == 2); // is it 3DS?
+ if (is_3ds_dimm) { // is it 3DS?
+ lranks_per_prank = ((spd_package >> 4) & 7) + 1;
+ // FIXME: should make sure it is only 2H or 4H or 8H?
+ lranks_bits = lranks_per_prank >> 1;
+ if (lranks_bits == 4) lranks_bits = 3;
+ }
+ } else if (spd_package != 0) {
+ // FIXME: print non-zero monolithic device definition
+ ddr_print("DDR4: Package Type MONOLITHIC: %d die, signal load %d\n",
+ ((spd_package >> 4) & 7) + 1, (spd_package & 3));
+ }
+
+ asymmetric = (spd_org >> 6) & 1;
+ if (asymmetric) {
+ int spd_secondary_pkg = read_spd(node, &dimm_config_table[0],
+ DDR4_SPD_SECONDARY_PACKAGE_TYPE);
+ ddr_print("DDR4: Module Organization: ASYMMETRICAL: Secondary Package Type 0x%x\n",
+ spd_secondary_pkg);
+ } else {
+ uint64_t bus_width = 8 << (0x07 & read_spd(node, &dimm_config_table[0],
+ DDR4_SPD_MODULE_MEMORY_BUS_WIDTH));
+ uint64_t ddr_width = 4 << ((spd_org >> 0) & 0x7);
+ uint64_t module_cap;
+ int shift = (spd_banks & 0x0F);
+ die_capacity = (shift < 8) ? (256UL << shift) : ((12UL << (shift & 1)) << 10);
+ ddr_print("DDR4: Module Organization: SYMMETRICAL: capacity per die %d %cbit\n",
+ (die_capacity > 512) ? (die_capacity >> 10) : die_capacity,
+ (die_capacity > 512) ? 'G' : 'M');
+ module_cap = ((uint64_t)die_capacity << 20) / 8UL * bus_width / ddr_width *
+ /* no. pkg ranks*/(1UL + ((spd_org >> 3) & 0x7));
+ if (is_3ds_dimm) // is it 3DS?
+ module_cap *= /* die_count */(uint64_t)(((spd_package >> 4) & 7) + 1);
+ ddr_print("DDR4: Module Organization: SYMMETRICAL: capacity per module %ld GB\n",
+ module_cap >> 30);
+ }
+
+ spd_rawcard = 0xFF & read_spd(node, &dimm_config_table[0], DDR4_SPD_REFERENCE_RAW_CARD);
+ ddr_print("DDR4: Reference Raw Card 0x%x \n", spd_rawcard);
+
+ spd_module_type = read_spd(node, &dimm_config_table[0], DDR4_SPD_KEY_BYTE_MODULE_TYPE);
+ if (spd_module_type & 0x80) { // HYBRID module
+ ddr_print("DDR4: HYBRID module, type %s\n",
+ ((spd_module_type & 0x70) == 0x10) ? "NVDIMM" : "UNKNOWN");
+ }
+
+ spd_dimm_type = spd_module_type & 0x0F;
+ spd_rdimm = (spd_dimm_type == 1) || (spd_dimm_type == 5) || (spd_dimm_type == 8);
+ if (spd_rdimm) {
+ int spd_mfgr_id = read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_MANUFACTURER_ID_LSB) |
+ (read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_MANUFACTURER_ID_MSB) << 8);
+ int spd_register_rev = read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_REVISION_NUMBER);
+ ddr_print("DDR4: RDIMM Register Manufacturer ID 0x%x Revision 0x%x\n",
+ spd_mfgr_id, spd_register_rev);
+
+ // RAWCARD A or B must be bit 7=0 and bits 4-0 either 00000(A) or 00001(B)
+ spd_rawcard_AorB = ((spd_rawcard & 0x9fUL) <= 1);
+ }
+ } else {
+ imp_values = &ddr3_impedence_values;
+ dimm_type_name = ddr3_dimm_types[spd_dimm_type];
+
+ spd_addr = read_spd(node, &dimm_config_table[0], DDR3_SPD_ADDRESSING_ROW_COL_BITS);
+ spd_org = read_spd(node, &dimm_config_table[0], DDR3_SPD_MODULE_ORGANIZATION);
+ spd_banks = read_spd(node, &dimm_config_table[0], DDR3_SPD_DENSITY_BANKS) & 0xff;
+
+ bank_bits = 3 + ((spd_banks >> 4) & 0x7);
+ bank_bits = min((int)bank_bits, 3); /* Controller can only address 3 bits. */
+
+ spd_rdimm = (spd_dimm_type == 1) || (spd_dimm_type == 5) || (spd_dimm_type == 9);
+ }
+
+#if 0 // FIXME: why should this be possible OR needed?
+ if ((s = lookup_env_parameter("ddr_rdimm_ena")) != NULL) {
+ spd_rdimm = !!strtoul(s, NULL, 0);
+ }
+#endif
+
+ debug_print("spd_addr : %#06x\n", spd_addr );
+ debug_print("spd_org : %#06x\n", spd_org );
+ debug_print("spd_banks : %#06x\n", spd_banks );
+
+ row_bits = 12 + ((spd_addr >> 3) & 0x7);
+ col_bits = 9 + ((spd_addr >> 0) & 0x7);
+
+ num_ranks = 1 + ((spd_org >> 3) & 0x7);
+ dram_width = 4 << ((spd_org >> 0) & 0x7);
+ num_banks = 1 << bank_bits;
+
+ if ((s = lookup_env_parameter("ddr_num_ranks")) != NULL) {
+ num_ranks = strtoul(s, NULL, 0);
+ }
+
+ /* FIX
+ ** Check that values are within some theoretical limits.
+ ** col_bits(min) = row_lsb(min) - bank_bits(max) - bus_bits(max) = 14 - 3 - 4 = 7
+ ** col_bits(max) = row_lsb(max) - bank_bits(min) - bus_bits(min) = 18 - 2 - 3 = 13
+ */
+ if ((col_bits > 13) || (col_bits < 7)) {
+ error_print("Unsupported number of Col Bits: %d\n", col_bits);
+ ++fatal_error;
+ }
+
+ /* FIX
+ ** Check that values are within some theoretical limits.
+ ** row_bits(min) = pbank_lsb(min) - row_lsb(max) - rank_bits = 26 - 18 - 1 = 7
+ ** row_bits(max) = pbank_lsb(max) - row_lsb(min) - rank_bits = 33 - 14 - 1 = 18
+ */
+ if ((row_bits > 18) || (row_bits < 7)) {
+ error_print("Unsupported number of Row Bits: %d\n", row_bits);
+ ++fatal_error;
+ }
+
+ if (bdk_is_platform(BDK_PLATFORM_ASIM))
+ wlevel_loops = 0;
+ else {
+ wlevel_loops = WLEVEL_LOOPS_DEFAULT;
+ // accept generic or interface-specific override but not for ASIM...
+ if ((s = lookup_env_parameter("ddr_wlevel_loops")) == NULL)
+ s = lookup_env_parameter("ddr%d_wlevel_loops", ddr_interface_num);
+ if (s != NULL) {
+ wlevel_loops = strtoul(s, NULL, 0);
+ }
+ }
+
+ bunk_enable = (num_ranks > 1);
+
+ column_bits_start = 3;
+
+ row_lsb = column_bits_start + col_bits + bank_bits - (! ddr_interface_64b);
+ debug_print("row_lsb = column_bits_start + col_bits + bank_bits = %d\n", row_lsb);
+
+ pbank_lsb = row_lsb + row_bits + bunk_enable;
+ debug_print("pbank_lsb = row_lsb + row_bits + bunk_enable = %d\n", pbank_lsb);
+
+ if (lranks_per_prank > 1) {
+ pbank_lsb = row_lsb + row_bits + lranks_bits + bunk_enable;
+ ddr_print("DDR4: 3DS: pbank_lsb = (%d row_lsb) + (%d row_bits) + (%d lranks_bits) + (%d bunk_enable) = %d\n",
+ row_lsb, row_bits, lranks_bits, bunk_enable, pbank_lsb);
+ }
+
+ mem_size_mbytes = dimm_count * ((1ull << pbank_lsb) >> 20);
+ if (num_ranks == 4) {
+ /* Quad rank dimm capacity is equivalent to two dual-rank dimms. */
+ mem_size_mbytes *= 2;
+ }
+
+ /* Mask with 1 bits set for for each active rank, allowing 2 bits per dimm.
+ ** This makes later calculations simpler, as a variety of CSRs use this layout.
+ ** This init needs to be updated for dual configs (ie non-identical DIMMs).
+ ** Bit 0 = dimm0, rank 0
+ ** Bit 1 = dimm0, rank 1
+ ** Bit 2 = dimm1, rank 0
+ ** Bit 3 = dimm1, rank 1
+ ** ...
+ */
+ rank_mask = 0x1;
+ if (num_ranks > 1)
+ rank_mask = 0x3;
+ if (num_ranks > 2)
+ rank_mask = 0xf;
+
+ for (i = 1; i < dimm_count; i++)
+ rank_mask |= ((rank_mask & 0x3) << (2*i));
+
+
+#ifdef CAVIUM_ONLY
+ /* Special request: mismatched DIMM support. Slot 0: 2-Rank, Slot 1: 1-Rank */
+ if (0)
+ {
+ /*
+ ** Calculate the total memory size in terms of the total
+ ** number of ranks instead of the number of dimms. The usual
+ ** requirement is for both dimms to be identical. This check
+ ** works around that requirement to allow one exception. The
+ ** dimm in the second slot may now have fewer ranks than the
+ ** first slot.
+ */
+ int spd_org_dimm1;
+ int num_ranks_dimm1;
+ int rank_count;
+ int rank_mask_dimm1;
+
+ if (dimm_count > 1) {
+ spd_org_dimm1 = read_spd(node, &dimm_config_table[1] /* dimm 1*/,
+ DDR3_SPD_MODULE_ORGANIZATION);
+ num_ranks_dimm1 = 1 + ((spd_org_dimm1 >> 3) & 0x7);
+ rank_count = num_ranks/* dimm 0 */ + num_ranks_dimm1 /* dimm 1 */;
+
+ if (num_ranks != num_ranks_dimm1) {
+ mem_size_mbytes = rank_count * ((1ull << (pbank_lsb-bunk_enable)) >> 20);
+ rank_mask = 1 | ((num_ranks > 1) << 1);
+ rank_mask_dimm1 = 1 | ((num_ranks_dimm1 > 1) << 1);
+ rank_mask |= ((rank_mask_dimm1 & 0x3) << 2);
+ ddr_print("DIMM 1 - ranks: %d, size: %d MB\n",
+ num_ranks_dimm1, num_ranks_dimm1 * ((1ull << (pbank_lsb-bunk_enable)) >> 20));
+ }
+ }
+ }
+#endif /* CAVIUM_ONLY */
+
+ spd_ecc = get_dimm_ecc(node, &dimm_config_table[0], ddr_type);
+
+ VB_PRT(VBL_DEV, "Summary: - %d %s%s %dRx%d %s, row bits=%d, col bits=%d, bank bits=%d\n",
+ dimm_count, dimm_type_name, (dimm_count > 1) ? "s" : "",
+ num_ranks, dram_width, (spd_ecc) ? "ECC" : "non-ECC",
+ row_bits, col_bits, bank_bits);
+
+ // always print out the useful DIMM information...
+ for (i = 0; i < DDR_CFG_T_MAX_DIMMS; i++) {
+ if (i < dimm_count)
+ report_dimm(node, &dimm_config_table[i], i, ddr_interface_num,
+ num_ranks, dram_width, mem_size_mbytes / dimm_count);
+ else
+ if (validate_dimm(node, &dimm_config_table[i]) == 0) // only if there is a slot
+ printf("N%d.LMC%d.DIMM%d: Not Present\n", node, ddr_interface_num, i);
+ }
+
+ if (ddr_type == DDR4_DRAM) {
+ spd_cas_latency = ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE0)) << 0);
+ spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE1)) << 8);
+ spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE2)) << 16);
+ spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE3)) << 24);
+ } else {
+ spd_cas_latency = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_CAS_LATENCIES_LSB);
+ spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_CAS_LATENCIES_MSB)) << 8);
+ }
+ debug_print("spd_cas_latency : %#06x\n", spd_cas_latency );
+
+ if (ddr_type == DDR4_DRAM) {
+
+ /* No other values for DDR4 MTB and FTB are specified at the
+ * current time so don't bother reading them. Can't speculate how
+ * new values will be represented.
+ */
+ int spdMTB = 125;
+ int spdFTB = 1;
+
+ tAAmin
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_LATENCY_TAAMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_LATENCY_FINE_TAAMIN);
+
+ ddr4_tCKAVGmin
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MINIMUM_CYCLE_TIME_TCKAVGMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CYCLE_TIME_FINE_TCKAVGMIN);
+
+ ddr4_tCKAVGmax
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MAXIMUM_CYCLE_TIME_TCKAVGMAX)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MAX_CYCLE_TIME_FINE_TCKAVGMAX);
+
+ ddr4_tRCDmin
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_RAS_CAS_DELAY_TRCDMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_RAS_TO_CAS_DELAY_FINE_TRCDMIN);
+
+ ddr4_tRPmin
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN);
+
+ ddr4_tRASmin
+ = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_UPPER_NIBBLES_TRAS_TRC) & 0xf) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN) & 0xff));
+
+ ddr4_tRCmin
+ = spdMTB * ((((read_spd(node, &dimm_config_table[0], DDR4_SPD_UPPER_NIBBLES_TRAS_TRC) >> 4) & 0xf) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN) & 0xff))
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACT_TO_ACT_REFRESH_DELAY_FINE_TRCMIN);
+
+ ddr4_tRFC1min
+ = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC1MIN) & 0xff) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC1MIN) & 0xff));
+
+ ddr4_tRFC2min
+ = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC2MIN) & 0xff) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC2MIN) & 0xff));
+
+ ddr4_tRFC4min
+ = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC4MIN) & 0xff) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC4MIN) & 0xff));
+
+ ddr4_tFAWmin
+ = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_FOUR_ACTIVE_WINDOW_MSN_TFAWMIN) & 0xf) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_FOUR_ACTIVE_WINDOW_LSB_TFAWMIN) & 0xff));
+
+ ddr4_tRRD_Smin
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ROW_ACTIVE_DELAY_SAME_TRRD_SMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACT_TO_ACT_DELAY_DIFF_FINE_TRRD_SMIN);
+
+ ddr4_tRRD_Lmin
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ROW_ACTIVE_DELAY_DIFF_TRRD_LMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACT_TO_ACT_DELAY_SAME_FINE_TRRD_LMIN);
+
+ ddr4_tCCD_Lmin
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_TO_CAS_DELAY_TCCD_LMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_TO_CAS_DELAY_FINE_TCCD_LMIN);
+
+ ddr_print("%-45s : %6d ps\n", "Medium Timebase (MTB)", spdMTB);
+ ddr_print("%-45s : %6d ps\n", "Fine Timebase (FTB)", spdFTB);
+
+ #define DDR4_TWR 15000
+ #define DDR4_TWTR_S 2500
+
+
+ tCKmin = ddr4_tCKAVGmin;
+ twr = DDR4_TWR;
+ trcd = ddr4_tRCDmin;
+ trrd = ddr4_tRRD_Smin;
+ trp = ddr4_tRPmin;
+ tras = ddr4_tRASmin;
+ trc = ddr4_tRCmin;
+ trfc = ddr4_tRFC1min;
+ twtr = DDR4_TWTR_S;
+ tfaw = ddr4_tFAWmin;
+
+ if (spd_rdimm) {
+ spd_addr_mirror = read_spd(node, &dimm_config_table[0], DDR4_SPD_RDIMM_ADDR_MAPPING_FROM_REGISTER_TO_DRAM) & 0x1;
+ } else {
+ spd_addr_mirror = read_spd(node, &dimm_config_table[0], DDR4_SPD_UDIMM_ADDR_MAPPING_FROM_EDGE) & 0x1;
+ }
+ debug_print("spd_addr_mirror : %#06x\n", spd_addr_mirror );
+
+ } else { /* if (ddr_type == DDR4_DRAM) */
+ spd_mtb_dividend = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MEDIUM_TIMEBASE_DIVIDEND);
+ spd_mtb_divisor = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MEDIUM_TIMEBASE_DIVISOR);
+ spd_tck_min = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MINIMUM_CYCLE_TIME_TCKMIN);
+ spd_taa_min = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_CAS_LATENCY_TAAMIN);
+
+ spd_twr = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_WRITE_RECOVERY_TWRMIN);
+ spd_trcd = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_RAS_CAS_DELAY_TRCDMIN);
+ spd_trrd = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_ROW_ACTIVE_DELAY_TRRDMIN);
+ spd_trp = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN);
+ spd_tras = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN);
+ spd_tras |= ((0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_UPPER_NIBBLES_TRAS_TRC)&0xf) << 8);
+ spd_trc = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN);
+ spd_trc |= ((0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_UPPER_NIBBLES_TRAS_TRC)&0xf0) << 4);
+ spd_trfc = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_REFRESH_RECOVERY_LSB_TRFCMIN);
+ spd_trfc |= ((0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_REFRESH_RECOVERY_MSB_TRFCMIN)) << 8);
+ spd_twtr = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_INTERNAL_WRITE_READ_CMD_TWTRMIN);
+ spd_trtp = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_INTERNAL_READ_PRECHARGE_CMD_TRTPMIN);
+ spd_tfaw = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_FOUR_ACTIVE_WINDOW_TFAWMIN);
+ spd_tfaw |= ((0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_UPPER_NIBBLE_TFAW)&0xf) << 8);
+ spd_addr_mirror = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_ADDRESS_MAPPING) & 0x1;
+ spd_addr_mirror = spd_addr_mirror && !spd_rdimm; /* Only address mirror unbuffered dimms. */
+ ftb_Dividend = read_spd(node, &dimm_config_table[0], DDR3_SPD_FINE_TIMEBASE_DIVIDEND_DIVISOR) >> 4;
+ ftb_Divisor = read_spd(node, &dimm_config_table[0], DDR3_SPD_FINE_TIMEBASE_DIVIDEND_DIVISOR) & 0xf;
+ ftb_Divisor = (ftb_Divisor == 0) ? 1 : ftb_Divisor; /* Make sure that it is not 0 */
+
+ debug_print("spd_twr : %#06x\n", spd_twr );
+ debug_print("spd_trcd : %#06x\n", spd_trcd);
+ debug_print("spd_trrd : %#06x\n", spd_trrd);
+ debug_print("spd_trp : %#06x\n", spd_trp );
+ debug_print("spd_tras : %#06x\n", spd_tras);
+ debug_print("spd_trc : %#06x\n", spd_trc );
+ debug_print("spd_trfc : %#06x\n", spd_trfc);
+ debug_print("spd_twtr : %#06x\n", spd_twtr);
+ debug_print("spd_trtp : %#06x\n", spd_trtp);
+ debug_print("spd_tfaw : %#06x\n", spd_tfaw);
+ debug_print("spd_addr_mirror : %#06x\n", spd_addr_mirror);
+
+ mtb_psec = spd_mtb_dividend * 1000 / spd_mtb_divisor;
+ tAAmin = mtb_psec * spd_taa_min;
+ tAAmin += ftb_Dividend * (SC_t) read_spd(node, &dimm_config_table[0], DDR3_SPD_MIN_CAS_LATENCY_FINE_TAAMIN) / ftb_Divisor;
+ tCKmin = mtb_psec * spd_tck_min;
+ tCKmin += ftb_Dividend * (SC_t) read_spd(node, &dimm_config_table[0], DDR3_SPD_MINIMUM_CYCLE_TIME_FINE_TCKMIN) / ftb_Divisor;
+
+ twr = spd_twr * mtb_psec;
+ trcd = spd_trcd * mtb_psec;
+ trrd = spd_trrd * mtb_psec;
+ trp = spd_trp * mtb_psec;
+ tras = spd_tras * mtb_psec;
+ trc = spd_trc * mtb_psec;
+ trfc = spd_trfc * mtb_psec;
+ twtr = spd_twtr * mtb_psec;
+ trtp = spd_trtp * mtb_psec;
+ tfaw = spd_tfaw * mtb_psec;
+
+ } /* if (ddr_type == DDR4_DRAM) */
+
+ if (ddr_type == DDR4_DRAM) {
+ ddr_print("%-45s : %6d ps (%ld MT/s)\n", "SDRAM Minimum Cycle Time (tCKAVGmin)",ddr4_tCKAVGmin,
+ pretty_psecs_to_mts(ddr4_tCKAVGmin));
+ ddr_print("%-45s : %6d ps\n", "SDRAM Maximum Cycle Time (tCKAVGmax)", ddr4_tCKAVGmax);
+ ddr_print("%-45s : %6d ps\n", "Minimum CAS Latency Time (tAAmin)", tAAmin);
+ ddr_print("%-45s : %6d ps\n", "Minimum RAS to CAS Delay Time (tRCDmin)", ddr4_tRCDmin);
+ ddr_print("%-45s : %6d ps\n", "Minimum Row Precharge Delay Time (tRPmin)", ddr4_tRPmin);
+ ddr_print("%-45s : %6d ps\n", "Minimum Active to Precharge Delay (tRASmin)", ddr4_tRASmin);
+ ddr_print("%-45s : %6d ps\n", "Minimum Active to Active/Refr. Delay (tRCmin)", ddr4_tRCmin);
+ ddr_print("%-45s : %6d ps\n", "Minimum Refresh Recovery Delay (tRFC1min)", ddr4_tRFC1min);
+ ddr_print("%-45s : %6d ps\n", "Minimum Refresh Recovery Delay (tRFC2min)", ddr4_tRFC2min);
+ ddr_print("%-45s : %6d ps\n", "Minimum Refresh Recovery Delay (tRFC4min)", ddr4_tRFC4min);
+ ddr_print("%-45s : %6d ps\n", "Minimum Four Activate Window Time (tFAWmin)", ddr4_tFAWmin);
+ ddr_print("%-45s : %6d ps\n", "Minimum Act. to Act. Delay (tRRD_Smin)", ddr4_tRRD_Smin);
+ ddr_print("%-45s : %6d ps\n", "Minimum Act. to Act. Delay (tRRD_Lmin)", ddr4_tRRD_Lmin);
+ ddr_print("%-45s : %6d ps\n", "Minimum CAS to CAS Delay Time (tCCD_Lmin)", ddr4_tCCD_Lmin);
+ } else {
+ ddr_print("Medium Timebase (MTB) : %6d ps\n", mtb_psec);
+ ddr_print("Minimum Cycle Time (tCKmin) : %6d ps (%ld MT/s)\n", tCKmin,
+ pretty_psecs_to_mts(tCKmin));
+ ddr_print("Minimum CAS Latency Time (tAAmin) : %6d ps\n", tAAmin);
+ ddr_print("Write Recovery Time (tWR) : %6d ps\n", twr);
+ ddr_print("Minimum RAS to CAS delay (tRCD) : %6d ps\n", trcd);
+ ddr_print("Minimum Row Active to Row Active delay (tRRD) : %6d ps\n", trrd);
+ ddr_print("Minimum Row Precharge Delay (tRP) : %6d ps\n", trp);
+ ddr_print("Minimum Active to Precharge (tRAS) : %6d ps\n", tras);
+ ddr_print("Minimum Active to Active/Refresh Delay (tRC) : %6d ps\n", trc);
+ ddr_print("Minimum Refresh Recovery Delay (tRFC) : %6d ps\n", trfc);
+ ddr_print("Internal write to read command delay (tWTR) : %6d ps\n", twtr);
+ ddr_print("Min Internal Rd to Precharge Cmd Delay (tRTP) : %6d ps\n", trtp);
+ ddr_print("Minimum Four Activate Window Delay (tFAW) : %6d ps\n", tfaw);
+ }
+
+
+ /* When the cycle time is within 1 psec of the minimum accept it
+ as a slight rounding error and adjust it to exactly the minimum
+ cycle time. This avoids an unnecessary warning. */
+ if (_abs(tclk_psecs - tCKmin) < 2)
+ tclk_psecs = tCKmin;
+
+ if (tclk_psecs < (uint64_t)tCKmin) {
+ ddr_print("WARNING!!!!: DDR Clock Rate (tCLK: %ld) exceeds DIMM specifications (tCKmin: %ld)!!!!\n",
+ tclk_psecs, (uint64_t)tCKmin);
+ }
+
+
+ ddr_print("DDR Clock Rate (tCLK) : %6lu ps\n", tclk_psecs);
+ ddr_print("Core Clock Rate (eCLK) : %6lu ps\n", eclk_psecs);
+
+ if ((s = lookup_env_parameter("ddr_use_ecc")) != NULL) {
+ use_ecc = !!strtoul(s, NULL, 0);
+ }
+ use_ecc = use_ecc && spd_ecc;
+
+ ddr_interface_bytemask = ddr_interface_64b
+ ? (use_ecc ? 0x1ff : 0xff)
+ : (use_ecc ? 0x01f : 0x0f); // FIXME? 81xx does diff from 70xx
+
+ ddr_print("DRAM Interface width: %d bits %s bytemask 0x%x\n",
+ ddr_interface_64b ? 64 : 32, use_ecc ? "+ECC" : "",
+ ddr_interface_bytemask);
+
+ ddr_print("\n------ Board Custom Configuration Settings ------\n");
+ ddr_print("%-45s : %d\n", "MIN_RTT_NOM_IDX ", custom_lmc_config->min_rtt_nom_idx);
+ ddr_print("%-45s : %d\n", "MAX_RTT_NOM_IDX ", custom_lmc_config->max_rtt_nom_idx);
+ ddr_print("%-45s : %d\n", "MIN_RODT_CTL ", custom_lmc_config->min_rodt_ctl);
+ ddr_print("%-45s : %d\n", "MAX_RODT_CTL ", custom_lmc_config->max_rodt_ctl);
+ ddr_print("%-45s : %d\n", "MIN_CAS_LATENCY ", custom_lmc_config->min_cas_latency);
+ ddr_print("%-45s : %d\n", "OFFSET_EN ", custom_lmc_config->offset_en);
+ ddr_print("%-45s : %d\n", "OFFSET_UDIMM ", custom_lmc_config->offset_udimm);
+ ddr_print("%-45s : %d\n", "OFFSET_RDIMM ", custom_lmc_config->offset_rdimm);
+ ddr_print("%-45s : %d\n", "DDR_RTT_NOM_AUTO ", custom_lmc_config->ddr_rtt_nom_auto);
+ ddr_print("%-45s : %d\n", "DDR_RODT_CTL_AUTO ", custom_lmc_config->ddr_rodt_ctl_auto);
+ if (spd_rdimm)
+ ddr_print("%-45s : %d\n", "RLEVEL_COMP_OFFSET", custom_lmc_config->rlevel_comp_offset_rdimm);
+ else
+ ddr_print("%-45s : %d\n", "RLEVEL_COMP_OFFSET", custom_lmc_config->rlevel_comp_offset_udimm);
+ ddr_print("%-45s : %d\n", "RLEVEL_COMPUTE ", custom_lmc_config->rlevel_compute);
+ ddr_print("%-45s : %d\n", "DDR2T_UDIMM ", custom_lmc_config->ddr2t_udimm);
+ ddr_print("%-45s : %d\n", "DDR2T_RDIMM ", custom_lmc_config->ddr2t_rdimm);
+ ddr_print("%-45s : %d\n", "FPRCH2 ", custom_lmc_config->fprch2);
+ ddr_print("-------------------------------------------------\n");
+
+
+ CL = divide_roundup(tAAmin, tclk_psecs);
+
+ ddr_print("Desired CAS Latency : %6d\n", CL);
+
+ min_cas_latency = custom_lmc_config->min_cas_latency;
+
+
+ if ((s = lookup_env_parameter("ddr_min_cas_latency")) != NULL) {
+ min_cas_latency = strtoul(s, NULL, 0);
+ }
+
+ {
+ int base_CL;
+ ddr_print("CAS Latencies supported in DIMM :");
+ base_CL = (ddr_type == DDR4_DRAM) ? 7 : 4;
+ for (i=0; i<32; ++i) {
+ if ((spd_cas_latency >> i) & 1) {
+ ddr_print(" %d", i+base_CL);
+ max_cas_latency = i+base_CL;
+ if (min_cas_latency == 0)
+ min_cas_latency = i+base_CL;
+ }
+ }
+ ddr_print("\n");
+
+ /* Use relaxed timing when running slower than the minimum
+ supported speed. Adjust timing to match the smallest supported
+ CAS Latency. */
+ if (CL < min_cas_latency) {
+ uint64_t adjusted_tclk = tAAmin / min_cas_latency;
+ CL = min_cas_latency;
+ ddr_print("Slow clock speed. Adjusting timing: tClk = %lu, Adjusted tClk = %ld\n",
+ tclk_psecs, adjusted_tclk);
+ tclk_psecs = adjusted_tclk;
+ }
+
+ if ((s = lookup_env_parameter("ddr_cas_latency")) != NULL) {
+ override_cas_latency = strtoul(s, NULL, 0);
+ }
+
+ /* Make sure that the selected cas latency is legal */
+ for (i=(CL-base_CL); i<32; ++i) {
+ if ((spd_cas_latency >> i) & 1) {
+ CL = i+base_CL;
+ break;
+ }
+ }
+ }
+
+ if (CL > max_cas_latency)
+ CL = max_cas_latency;
+
+ if (override_cas_latency != 0) {
+ CL = override_cas_latency;
+ }
+
+ ddr_print("CAS Latency : %6d\n", CL);
+
+ if ((CL * tCKmin) > 20000)
+ {
+ ddr_print("(CLactual * tCKmin) = %d exceeds 20 ns\n", (CL * tCKmin));
+ }
+
+ if ((num_banks != 4) && (num_banks != 8) && (num_banks != 16))
+ {
+ error_print("Unsupported number of banks %d. Must be 4 or 8 or 16.\n", num_banks);
+ ++fatal_error;
+ }
+
+ if ((num_ranks != 1) && (num_ranks != 2) && (num_ranks != 4))
+ {
+ error_print("Unsupported number of ranks: %d\n", num_ranks);
+ ++fatal_error;
+ }
+
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN81XX)) { // 88XX or 83XX, but not 81XX
+ if ((dram_width != 8) && (dram_width != 16) && (dram_width != 4)) {
+ error_print("Unsupported SDRAM Width, x%d. Must be x4, x8 or x16.\n", dram_width);
+ ++fatal_error;
+ }
+ } else if ((dram_width != 8) && (dram_width != 16)) { // 81XX can only do x8 or x16
+ error_print("Unsupported SDRAM Width, x%d. Must be x8 or x16.\n", dram_width);
+ ++fatal_error;
+ }
+
+
+ /*
+ ** Bail out here if things are not copasetic.
+ */
+ if (fatal_error)
+ return(-1);
+
+ /*
+ * 6.9.6 LMC RESET Initialization
+ *
+ * The purpose of this step is to assert/deassert the RESET# pin at the
+ * DDR3/DDR4 parts.
+ *
+ * This LMC RESET step is done for all enabled LMCs.
+ */
+ perform_lmc_reset(node, ddr_interface_num);
+
+ // Make sure scrambling is disabled during init...
+ {
+ bdk_lmcx_control_t lmc_control;
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ lmc_control.s.scramble_ena = 0;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG0(ddr_interface_num), 0);
+ DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG1(ddr_interface_num), 0);
+ DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG2(ddr_interface_num), 0);
+ }
+
+
+ odt_idx = dimm_count - 1;
+
+ switch (num_ranks) {
+ case 1:
+ odt_config = odt_1rank_config;
+ break;
+ case 2:
+ odt_config = odt_2rank_config;
+ break;
+ case 4:
+ odt_config = odt_4rank_config;
+ break;
+ default:
+ odt_config = disable_odt_config;
+ error_print("Unsupported number of ranks: %d\n", num_ranks);
+ ++fatal_error;
+ }
+
+
+ /* Parameters from DDR3 Specifications */
+#define DDR3_tREFI 7800000 /* 7.8 us */
+#define DDR3_ZQCS 80000ull /* 80 ns */
+#define DDR3_ZQCS_Interval 1280000000 /* 128ms/100 */
+#define DDR3_tCKE 5000 /* 5 ns */
+#define DDR3_tMRD 4 /* 4 nCK */
+#define DDR3_tDLLK 512 /* 512 nCK */
+#define DDR3_tMPRR 1 /* 1 nCK */
+#define DDR3_tWLMRD 40 /* 40 nCK */
+#define DDR3_tWLDQSEN 25 /* 25 nCK */
+
+ /* Parameters from DDR4 Specifications */
+#define DDR4_tMRD 8 /* 8 nCK */
+#define DDR4_tDLLK 768 /* 768 nCK */
+
+ /*
+ * 6.9.7 Early LMC Initialization
+ *
+ * All of DDR PLL, LMC CK, and LMC DRESET initializations must be
+ * completed prior to starting this LMC initialization sequence.
+ *
+ * Perform the following five substeps for early LMC initialization:
+ *
+ * 1. Software must ensure there are no pending DRAM transactions.
+ *
+ * 2. Write LMC(0)_CONFIG, LMC(0)_CONTROL, LMC(0)_TIMING_PARAMS0,
+ * LMC(0)_TIMING_PARAMS1, LMC(0)_MODEREG_PARAMS0,
+ * LMC(0)_MODEREG_PARAMS1, LMC(0)_DUAL_MEMCFG, LMC(0)_NXM,
+ * LMC(0)_WODT_MASK, LMC(0)_RODT_MASK, LMC(0)_COMP_CTL2,
+ * LMC(0)_PHY_CTL, LMC(0)_DIMM0/1_PARAMS, and LMC(0)_DIMM_CTL with
+ * appropriate values. All sections in this chapter can be used to
+ * derive proper register settings.
+ */
+
+ /* LMC(0)_CONFIG */
+ {
+ lmc_config.u = 0;
+
+ lmc_config.s.ecc_ena = use_ecc;
+ lmc_config.s.row_lsb = encode_row_lsb_ddr3(row_lsb, ddr_interface_64b);
+ lmc_config.s.pbank_lsb = encode_pbank_lsb_ddr3(pbank_lsb, ddr_interface_64b);
+
+ lmc_config.s.idlepower = 0; /* Disabled */
+
+ if ((s = lookup_env_parameter("ddr_idlepower")) != NULL) {
+ lmc_config.s.idlepower = strtoul(s, NULL, 0);
+ }
+
+ lmc_config.s.forcewrite = 0; /* Disabled */
+ lmc_config.s.ecc_adr = 1; /* Include memory reference address in the ECC */
+
+ if ((s = lookup_env_parameter("ddr_ecc_adr")) != NULL) {
+ lmc_config.s.ecc_adr = strtoul(s, NULL, 0);
+ }
+
+ lmc_config.s.reset = 0;
+
+ /*
+ * Program LMC0_CONFIG[24:18], ref_zqcs_int(6:0) to
+ * RND-DN(tREFI/clkPeriod/512) Program LMC0_CONFIG[36:25],
+ * ref_zqcs_int(18:7) to
+ * RND-DN(ZQCS_Interval/clkPeriod/(512*128)). Note that this
+ * value should always be greater than 32, to account for
+ * resistor calibration delays.
+ */
+
+ lmc_config.s.ref_zqcs_int = ((DDR3_tREFI/tclk_psecs/512) & 0x7f);
+ lmc_config.s.ref_zqcs_int |= ((max(33ull, (DDR3_ZQCS_Interval/(tclk_psecs/100)/(512*128))) & 0xfff) << 7);
+
+
+ lmc_config.s.early_dqx = 1; /* Default to enabled */
+
+ if ((s = lookup_env_parameter("ddr_early_dqx")) == NULL)
+ s = lookup_env_parameter("ddr%d_early_dqx", ddr_interface_num);
+ if (s != NULL) {
+ lmc_config.s.early_dqx = strtoul(s, NULL, 0);
+ }
+
+ lmc_config.s.sref_with_dll = 0;
+
+ lmc_config.s.rank_ena = bunk_enable;
+ lmc_config.s.rankmask = rank_mask; /* Set later */
+ lmc_config.s.mirrmask = (spd_addr_mirror << 1 | spd_addr_mirror << 3) & rank_mask;
+ lmc_config.s.init_status = rank_mask; /* Set once and don't change it. */
+ lmc_config.s.early_unload_d0_r0 = 0;
+ lmc_config.s.early_unload_d0_r1 = 0;
+ lmc_config.s.early_unload_d1_r0 = 0;
+ lmc_config.s.early_unload_d1_r1 = 0;
+ lmc_config.s.scrz = 0;
+ // set 32-bit mode for real only when selected AND 81xx...
+ if (!ddr_interface_64b && CAVIUM_IS_MODEL(CAVIUM_CN81XX)) {
+ lmc_config.s.mode32b = 1;
+ }
+ VB_PRT(VBL_DEV, "%-45s : %d\n", "MODE32B (init)", lmc_config.s.mode32b);
+ lmc_config.s.mode_x4dev = (dram_width == 4) ? 1 : 0;
+ lmc_config.s.bg2_enable = ((ddr_type == DDR4_DRAM) && (dram_width == 16)) ? 0 : 1;
+
+ if ((s = lookup_env_parameter_ull("ddr_config")) != NULL) {
+ lmc_config.u = strtoull(s, NULL, 0);
+ }
+ ddr_print("LMC_CONFIG : 0x%016lx\n", lmc_config.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+ }
+
+ /* LMC(0)_CONTROL */
+ {
+ bdk_lmcx_control_t lmc_control;
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ lmc_control.s.rdimm_ena = spd_rdimm;
+ lmc_control.s.bwcnt = 0; /* Clear counter later */
+ if (spd_rdimm)
+ lmc_control.s.ddr2t = (safe_ddr_flag ? 1 : custom_lmc_config->ddr2t_rdimm );
+ else
+ lmc_control.s.ddr2t = (safe_ddr_flag ? 1 : custom_lmc_config->ddr2t_udimm );
+ lmc_control.s.pocas = 0;
+ lmc_control.s.fprch2 = (safe_ddr_flag ? 2 : custom_lmc_config->fprch2 );
+ lmc_control.s.throttle_rd = safe_ddr_flag ? 1 : 0;
+ lmc_control.s.throttle_wr = safe_ddr_flag ? 1 : 0;
+ lmc_control.s.inorder_rd = safe_ddr_flag ? 1 : 0;
+ lmc_control.s.inorder_wr = safe_ddr_flag ? 1 : 0;
+ lmc_control.cn81xx.elev_prio_dis = safe_ddr_flag ? 1 : 0;
+ lmc_control.s.nxm_write_en = 0; /* discards writes to
+ addresses that don't exist
+ in the DRAM */
+ lmc_control.s.max_write_batch = 8;
+ lmc_control.s.xor_bank = 1;
+ lmc_control.s.auto_dclkdis = 1;
+ lmc_control.s.int_zqcs_dis = 0;
+ lmc_control.s.ext_zqcs_dis = 0;
+ lmc_control.s.bprch = 1;
+ lmc_control.s.wodt_bprch = 1;
+ lmc_control.s.rodt_bprch = 1;
+
+ if ((s = lookup_env_parameter("ddr_xor_bank")) != NULL) {
+ lmc_control.s.xor_bank = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_2t")) != NULL) {
+ lmc_control.s.ddr2t = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_fprch2")) != NULL) {
+ lmc_control.s.fprch2 = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_bprch")) != NULL) {
+ lmc_control.s.bprch = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_wodt_bprch")) != NULL) {
+ lmc_control.s.wodt_bprch = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_rodt_bprch")) != NULL) {
+ lmc_control.s.rodt_bprch = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_int_zqcs_dis")) != NULL) {
+ lmc_control.s.int_zqcs_dis = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_ext_zqcs_dis")) != NULL) {
+ lmc_control.s.ext_zqcs_dis = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr_control")) != NULL) {
+ lmc_control.u = strtoull(s, NULL, 0);
+ }
+ ddr_print("LMC_CONTROL : 0x%016lx\n", lmc_control.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+ }
+
+ /* LMC(0)_TIMING_PARAMS0 */
+ {
+ unsigned trp_value;
+ bdk_lmcx_timing_params0_t lmc_timing_params0;
+ lmc_timing_params0.u = BDK_CSR_READ(node, BDK_LMCX_TIMING_PARAMS0(ddr_interface_num));
+
+ trp_value = divide_roundup(trp, tclk_psecs) - 1;
+ ddr_print("TIMING_PARAMS0[TRP]: NEW 0x%x, OLD 0x%x\n", trp_value,
+ trp_value + (unsigned)(divide_roundup(max(4*tclk_psecs, 7500ull), tclk_psecs)) - 4);
+#if 1
+ if ((s = lookup_env_parameter_ull("ddr_use_old_trp")) != NULL) {
+ if (!!strtoull(s, NULL, 0)) {
+ trp_value += divide_roundup(max(4*tclk_psecs, 7500ull), tclk_psecs) - 4;
+ ddr_print("TIMING_PARAMS0[trp]: USING OLD 0x%x\n", trp_value);
+ }
+ }
+#endif
+
+ lmc_timing_params0.s.txpr = divide_roundup(max(5*tclk_psecs, trfc+10000ull), 16*tclk_psecs);
+ lmc_timing_params0.s.tzqinit = divide_roundup(max(512*tclk_psecs, 640000ull), (256*tclk_psecs));
+ lmc_timing_params0.s.trp = trp_value & 0x1f;
+ lmc_timing_params0.s.tcksre = divide_roundup(max(5*tclk_psecs, 10000ull), tclk_psecs) - 1;
+
+ if (ddr_type == DDR4_DRAM) {
+ lmc_timing_params0.s.tzqcs = divide_roundup(128*tclk_psecs, (16*tclk_psecs)); /* Always 8. */
+ lmc_timing_params0.s.tcke = divide_roundup(max(3*tclk_psecs, (uint64_t) DDR3_tCKE), tclk_psecs) - 1;
+ lmc_timing_params0.s.tmrd = divide_roundup((DDR4_tMRD*tclk_psecs), tclk_psecs) - 1;
+ //lmc_timing_params0.s.tmod = divide_roundup(max(24*tclk_psecs, 15000ull), tclk_psecs) - 1;
+ lmc_timing_params0.s.tmod = 25; /* 25 is the max allowed */
+ lmc_timing_params0.s.tdllk = divide_roundup(DDR4_tDLLK, 256);
+ } else {
+ lmc_timing_params0.s.tzqcs = divide_roundup(max(64*tclk_psecs, DDR3_ZQCS), (16*tclk_psecs));
+ lmc_timing_params0.s.tcke = divide_roundup(DDR3_tCKE, tclk_psecs) - 1;
+ lmc_timing_params0.s.tmrd = divide_roundup((DDR3_tMRD*tclk_psecs), tclk_psecs) - 1;
+ lmc_timing_params0.s.tmod = divide_roundup(max(12*tclk_psecs, 15000ull), tclk_psecs) - 1;
+ lmc_timing_params0.s.tdllk = divide_roundup(DDR3_tDLLK, 256);
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr_timing_params0")) != NULL) {
+ lmc_timing_params0.u = strtoull(s, NULL, 0);
+ }
+ ddr_print("TIMING_PARAMS0 : 0x%016lx\n", lmc_timing_params0.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS0(ddr_interface_num), lmc_timing_params0.u);
+ }
+
+ /* LMC(0)_TIMING_PARAMS1 */
+ {
+ int txp, temp_trcd, trfc_dlr;
+ bdk_lmcx_timing_params1_t lmc_timing_params1;
+ lmc_timing_params1.u = BDK_CSR_READ(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num));
+
+ lmc_timing_params1.s.tmprr = divide_roundup(DDR3_tMPRR*tclk_psecs, tclk_psecs) - 1;
+
+ lmc_timing_params1.s.tras = divide_roundup(tras, tclk_psecs) - 1;
+
+ // NOTE: this is reworked for pass 2.x
+ temp_trcd = divide_roundup(trcd, tclk_psecs);
+#if 1
+ if (temp_trcd > 15)
+ ddr_print("TIMING_PARAMS1[trcd]: need extension bit for 0x%x\n", temp_trcd);
+#endif
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_trcd > 15)) {
+ /* Let .trcd=0 serve as a flag that the field has
+ overflowed. Must use Additive Latency mode as a
+ workaround. */
+ temp_trcd = 0;
+ }
+ lmc_timing_params1.s.trcd = temp_trcd & 0x0f;
+ lmc_timing_params1.s.trcd_ext = (temp_trcd >> 4) & 1;
+
+ lmc_timing_params1.s.twtr = divide_roundup(twtr, tclk_psecs) - 1;
+ lmc_timing_params1.s.trfc = divide_roundup(trfc, 8*tclk_psecs);
+
+ // workaround needed for all THUNDER chips thru T88 Pass 2.0,
+ // but not 81xx and 83xx...
+ if ((ddr_type == DDR4_DRAM) && CAVIUM_IS_MODEL(CAVIUM_CN88XX)) {
+ /* Workaround bug 24006. Use Trrd_l. */
+ lmc_timing_params1.s.trrd = divide_roundup(ddr4_tRRD_Lmin, tclk_psecs) - 2;
+ } else
+ lmc_timing_params1.s.trrd = divide_roundup(trrd, tclk_psecs) - 2;
+
+ /*
+ ** tXP = max( 3nCK, 7.5 ns) DDR3-800 tCLK = 2500 psec
+ ** tXP = max( 3nCK, 7.5 ns) DDR3-1066 tCLK = 1875 psec
+ ** tXP = max( 3nCK, 6.0 ns) DDR3-1333 tCLK = 1500 psec
+ ** tXP = max( 3nCK, 6.0 ns) DDR3-1600 tCLK = 1250 psec
+ ** tXP = max( 3nCK, 6.0 ns) DDR3-1866 tCLK = 1071 psec
+ ** tXP = max( 3nCK, 6.0 ns) DDR3-2133 tCLK = 937 psec
+ */
+ txp = (tclk_psecs < 1875) ? 6000 : 7500;
+ // NOTE: this is reworked for pass 2.x
+ int temp_txp = divide_roundup(max(3*tclk_psecs, (unsigned)txp), tclk_psecs) - 1;
+#if 1
+ if (temp_txp > 7)
+ ddr_print("TIMING_PARAMS1[txp]: need extension bit for 0x%x\n", temp_txp);
+#endif
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_txp > 7)) {
+ temp_txp = 7; // max it out
+ }
+ lmc_timing_params1.s.txp = temp_txp & 7;
+ lmc_timing_params1.s.txp_ext = (temp_txp >> 3) & 1;
+
+ lmc_timing_params1.s.twlmrd = divide_roundup(DDR3_tWLMRD*tclk_psecs, 4*tclk_psecs);
+ lmc_timing_params1.s.twldqsen = divide_roundup(DDR3_tWLDQSEN*tclk_psecs, 4*tclk_psecs);
+ lmc_timing_params1.s.tfaw = divide_roundup(tfaw, 4*tclk_psecs);
+ lmc_timing_params1.s.txpdll = divide_roundup(max(10*tclk_psecs, 24000ull), tclk_psecs) - 1;
+
+ if ((ddr_type == DDR4_DRAM) && is_3ds_dimm) {
+ /*
+ 4 Gb: tRFC_DLR = 90 ns
+ 8 Gb: tRFC_DLR = 120 ns
+ 16 Gb: tRFC_DLR = 190 ns FIXME?
+ */
+ // RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+ if (die_capacity == 0x1000) // 4 Gbit
+ trfc_dlr = 90;
+ else if (die_capacity == 0x2000) // 8 Gbit
+ trfc_dlr = 120;
+ else if (die_capacity == 0x4000) // 16 Gbit
+ trfc_dlr = 190;
+ else
+ trfc_dlr = 0;
+
+ if (trfc_dlr == 0) {
+ ddr_print("N%d.LMC%d: ERROR: tRFC_DLR: die_capacity %u Mbit is illegal\n",
+ node, ddr_interface_num, die_capacity);
+ } else {
+ lmc_timing_params1.s.trfc_dlr = divide_roundup(trfc_dlr * 1000UL, 8*tclk_psecs);
+ ddr_print("N%d.LMC%d: TIMING_PARAMS1[trfc_dlr] set to %u\n",
+ node, ddr_interface_num, lmc_timing_params1.s.trfc_dlr);
+ }
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr_timing_params1")) != NULL) {
+ lmc_timing_params1.u = strtoull(s, NULL, 0);
+ }
+ ddr_print("TIMING_PARAMS1 : 0x%016lx\n", lmc_timing_params1.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num), lmc_timing_params1.u);
+ }
+
+ /* LMC(0)_TIMING_PARAMS2 */
+ if (ddr_type == DDR4_DRAM) {
+ bdk_lmcx_timing_params1_t lmc_timing_params1;
+ bdk_lmcx_timing_params2_t lmc_timing_params2;
+ lmc_timing_params1.u = BDK_CSR_READ(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num));
+ lmc_timing_params2.u = BDK_CSR_READ(node, BDK_LMCX_TIMING_PARAMS2(ddr_interface_num));
+ ddr_print("TIMING_PARAMS2 : 0x%016lx\n", lmc_timing_params2.u);
+
+ //lmc_timing_params2.s.trrd_l = divide_roundup(ddr4_tRRD_Lmin, tclk_psecs) - 1;
+ // NOTE: this is reworked for pass 2.x
+ int temp_trrd_l = divide_roundup(ddr4_tRRD_Lmin, tclk_psecs) - 2;
+#if 1
+ if (temp_trrd_l > 7)
+ ddr_print("TIMING_PARAMS2[trrd_l]: need extension bit for 0x%x\n", temp_trrd_l);
+#endif
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_trrd_l > 7)) {
+ temp_trrd_l = 7; // max it out
+ }
+ lmc_timing_params2.s.trrd_l = temp_trrd_l & 7;
+ lmc_timing_params2.s.trrd_l_ext = (temp_trrd_l >> 3) & 1;
+
+ lmc_timing_params2.s.twtr_l = divide_nint(max(4*tclk_psecs, 7500ull), tclk_psecs) - 1; // correct for 1600-2400
+ lmc_timing_params2.s.t_rw_op_max = 7;
+ lmc_timing_params2.s.trtp = divide_roundup(max(4*tclk_psecs, 7500ull), tclk_psecs) - 1;
+
+ ddr_print("TIMING_PARAMS2 : 0x%016lx\n", lmc_timing_params2.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS2(ddr_interface_num), lmc_timing_params2.u);
+
+ /* Workaround Errata 25823 - LMC: Possible DDR4 tWTR_L not met
+ for Write-to-Read operations to the same Bank Group */
+ if (lmc_timing_params1.s.twtr < (lmc_timing_params2.s.twtr_l - 4)) {
+ lmc_timing_params1.s.twtr = lmc_timing_params2.s.twtr_l - 4;
+ ddr_print("ERRATA 25823: NEW: TWTR: %d, TWTR_L: %d\n", lmc_timing_params1.s.twtr, lmc_timing_params2.s.twtr_l);
+ ddr_print("TIMING_PARAMS1 : 0x%016lx\n", lmc_timing_params1.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num), lmc_timing_params1.u);
+ }
+ }
+
+ /* LMC(0)_MODEREG_PARAMS0 */
+ {
+ bdk_lmcx_modereg_params0_t lmc_modereg_params0;
+ int param;
+
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+
+ if (ddr_type == DDR4_DRAM) {
+ lmc_modereg_params0.s.cwl = 0; /* 1600 (1250ps) */
+ if (tclk_psecs < 1250)
+ lmc_modereg_params0.s.cwl = 1; /* 1866 (1072ps) */
+ if (tclk_psecs < 1072)
+ lmc_modereg_params0.s.cwl = 2; /* 2133 (938ps) */
+ if (tclk_psecs < 938)
+ lmc_modereg_params0.s.cwl = 3; /* 2400 (833ps) */
+ if (tclk_psecs < 833)
+ lmc_modereg_params0.s.cwl = 4; /* 2666 (750ps) */
+ if (tclk_psecs < 750)
+ lmc_modereg_params0.s.cwl = 5; /* 3200 (625ps) */
+ } else {
+ /*
+ ** CSR CWL CAS write Latency
+ ** === === =================================
+ ** 0 5 ( tCK(avg) >= 2.5 ns)
+ ** 1 6 (2.5 ns > tCK(avg) >= 1.875 ns)
+ ** 2 7 (1.875 ns > tCK(avg) >= 1.5 ns)
+ ** 3 8 (1.5 ns > tCK(avg) >= 1.25 ns)
+ ** 4 9 (1.25 ns > tCK(avg) >= 1.07 ns)
+ ** 5 10 (1.07 ns > tCK(avg) >= 0.935 ns)
+ ** 6 11 (0.935 ns > tCK(avg) >= 0.833 ns)
+ ** 7 12 (0.833 ns > tCK(avg) >= 0.75 ns)
+ */
+
+ lmc_modereg_params0.s.cwl = 0;
+ if (tclk_psecs < 2500)
+ lmc_modereg_params0.s.cwl = 1;
+ if (tclk_psecs < 1875)
+ lmc_modereg_params0.s.cwl = 2;
+ if (tclk_psecs < 1500)
+ lmc_modereg_params0.s.cwl = 3;
+ if (tclk_psecs < 1250)
+ lmc_modereg_params0.s.cwl = 4;
+ if (tclk_psecs < 1070)
+ lmc_modereg_params0.s.cwl = 5;
+ if (tclk_psecs < 935)
+ lmc_modereg_params0.s.cwl = 6;
+ if (tclk_psecs < 833)
+ lmc_modereg_params0.s.cwl = 7;
+ }
+
+ if ((s = lookup_env_parameter("ddr_cwl")) != NULL) {
+ lmc_modereg_params0.s.cwl = strtoul(s, NULL, 0) - 5;
+ }
+
+ if (ddr_type == DDR4_DRAM) {
+ ddr_print("%-45s : %d, [0x%x]\n", "CAS Write Latency CWL, [CSR]",
+ lmc_modereg_params0.s.cwl + 9
+ + ((lmc_modereg_params0.s.cwl>2) ? (lmc_modereg_params0.s.cwl-3) * 2 : 0),
+ lmc_modereg_params0.s.cwl);
+ } else {
+ ddr_print("%-45s : %d, [0x%x]\n", "CAS Write Latency CWL, [CSR]",
+ lmc_modereg_params0.s.cwl + 5,
+ lmc_modereg_params0.s.cwl);
+ }
+
+ lmc_modereg_params0.s.mprloc = 0;
+ lmc_modereg_params0.s.mpr = 0;
+ lmc_modereg_params0.s.dll = (ddr_type == DDR4_DRAM)?1:0; /* disable(0) for DDR3 and enable(1) for DDR4 */
+ lmc_modereg_params0.s.al = 0;
+ lmc_modereg_params0.s.wlev = 0; /* Read Only */
+ lmc_modereg_params0.s.tdqs = ((ddr_type == DDR4_DRAM) || (dram_width != 8))?0:1; /* disable(0) for DDR4 and x4/x16 DDR3 */
+ lmc_modereg_params0.s.qoff = 0;
+ //lmc_modereg_params0.s.bl = 0; /* Don't touch block dirty logic */
+
+ if ((s = lookup_env_parameter("ddr_cl")) != NULL) {
+ CL = strtoul(s, NULL, 0);
+ ddr_print("CAS Latency : %6d\n", CL);
+ }
+
+ if (ddr_type == DDR4_DRAM) {
+ lmc_modereg_params0.s.cl = 0x0;
+ if (CL > 9)
+ lmc_modereg_params0.s.cl = 0x1;
+ if (CL > 10)
+ lmc_modereg_params0.s.cl = 0x2;
+ if (CL > 11)
+ lmc_modereg_params0.s.cl = 0x3;
+ if (CL > 12)
+ lmc_modereg_params0.s.cl = 0x4;
+ if (CL > 13)
+ lmc_modereg_params0.s.cl = 0x5;
+ if (CL > 14)
+ lmc_modereg_params0.s.cl = 0x6;
+ if (CL > 15)
+ lmc_modereg_params0.s.cl = 0x7;
+ if (CL > 16)
+ lmc_modereg_params0.s.cl = 0x8;
+ if (CL > 18)
+ lmc_modereg_params0.s.cl = 0x9;
+ if (CL > 20)
+ lmc_modereg_params0.s.cl = 0xA;
+ if (CL > 24)
+ lmc_modereg_params0.s.cl = 0xB;
+ } else {
+ lmc_modereg_params0.s.cl = 0x2;
+ if (CL > 5)
+ lmc_modereg_params0.s.cl = 0x4;
+ if (CL > 6)
+ lmc_modereg_params0.s.cl = 0x6;
+ if (CL > 7)
+ lmc_modereg_params0.s.cl = 0x8;
+ if (CL > 8)
+ lmc_modereg_params0.s.cl = 0xA;
+ if (CL > 9)
+ lmc_modereg_params0.s.cl = 0xC;
+ if (CL > 10)
+ lmc_modereg_params0.s.cl = 0xE;
+ if (CL > 11)
+ lmc_modereg_params0.s.cl = 0x1;
+ if (CL > 12)
+ lmc_modereg_params0.s.cl = 0x3;
+ if (CL > 13)
+ lmc_modereg_params0.s.cl = 0x5;
+ if (CL > 14)
+ lmc_modereg_params0.s.cl = 0x7;
+ if (CL > 15)
+ lmc_modereg_params0.s.cl = 0x9;
+ }
+
+ lmc_modereg_params0.s.rbt = 0; /* Read Only. */
+ lmc_modereg_params0.s.tm = 0;
+ lmc_modereg_params0.s.dllr = 0;
+
+ param = divide_roundup(twr, tclk_psecs);
+
+ if (ddr_type == DDR4_DRAM) { /* DDR4 */
+ lmc_modereg_params0.s.wrp = 1;
+ if (param > 12)
+ lmc_modereg_params0.s.wrp = 2;
+ if (param > 14)
+ lmc_modereg_params0.s.wrp = 3;
+ if (param > 16)
+ lmc_modereg_params0.s.wrp = 4;
+ if (param > 18)
+ lmc_modereg_params0.s.wrp = 5;
+ if (param > 20)
+ lmc_modereg_params0.s.wrp = 6;
+ if (param > 24) /* RESERVED in DDR4 spec */
+ lmc_modereg_params0.s.wrp = 7;
+ } else { /* DDR3 */
+ lmc_modereg_params0.s.wrp = 1;
+ if (param > 5)
+ lmc_modereg_params0.s.wrp = 2;
+ if (param > 6)
+ lmc_modereg_params0.s.wrp = 3;
+ if (param > 7)
+ lmc_modereg_params0.s.wrp = 4;
+ if (param > 8)
+ lmc_modereg_params0.s.wrp = 5;
+ if (param > 10)
+ lmc_modereg_params0.s.wrp = 6;
+ if (param > 12)
+ lmc_modereg_params0.s.wrp = 7;
+ }
+
+ lmc_modereg_params0.s.ppd = 0;
+
+ if ((s = lookup_env_parameter("ddr_wrp")) != NULL) {
+ lmc_modereg_params0.s.wrp = strtoul(s, NULL, 0);
+ }
+
+ ddr_print("%-45s : %d, [0x%x]\n", "Write recovery for auto precharge WRP, [CSR]",
+ param, lmc_modereg_params0.s.wrp);
+
+ if ((s = lookup_env_parameter_ull("ddr_modereg_params0")) != NULL) {
+ lmc_modereg_params0.u = strtoull(s, NULL, 0);
+ }
+ ddr_print("MODEREG_PARAMS0 : 0x%016lx\n", lmc_modereg_params0.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+ }
+
+ /* LMC(0)_MODEREG_PARAMS1 */
+ {
+ bdk_lmcx_modereg_params1_t lmc_modereg_params1;
+
+ lmc_modereg_params1.u = odt_config[odt_idx].odt_mask1.u;
+
+#ifdef CAVIUM_ONLY
+ /* Special request: mismatched DIMM support. Slot 0: 2-Rank, Slot 1: 1-Rank */
+ if (rank_mask == 0x7) { /* 2-Rank, 1-Rank */
+ lmc_modereg_params1.s.rtt_nom_00 = 0;
+ lmc_modereg_params1.s.rtt_nom_01 = 3; /* rttnom_40ohm */
+ lmc_modereg_params1.s.rtt_nom_10 = 3; /* rttnom_40ohm */
+ lmc_modereg_params1.s.rtt_nom_11 = 0;
+ dyn_rtt_nom_mask = 0x6;
+ }
+#endif /* CAVIUM_ONLY */
+
+ if ((s = lookup_env_parameter("ddr_rtt_nom_mask")) != NULL) {
+ dyn_rtt_nom_mask = strtoul(s, NULL, 0);
+ }
+
+
+ /* Save the original rtt_nom settings before sweeping through settings. */
+ default_rtt_nom[0] = lmc_modereg_params1.s.rtt_nom_00;
+ default_rtt_nom[1] = lmc_modereg_params1.s.rtt_nom_01;
+ default_rtt_nom[2] = lmc_modereg_params1.s.rtt_nom_10;
+ default_rtt_nom[3] = lmc_modereg_params1.s.rtt_nom_11;
+
+ ddr_rtt_nom_auto = custom_lmc_config->ddr_rtt_nom_auto;
+
+ for (i=0; i<4; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_rtt_nom_%1d%1d", !!(i&2), !!(i&1))) == NULL)
+ s = lookup_env_parameter("ddr%d_rtt_nom_%1d%1d", ddr_interface_num, !!(i&2), !!(i&1));
+ if (s != NULL) {
+ value = strtoul(s, NULL, 0);
+ lmc_modereg_params1.u &= ~((uint64_t)0x7 << (i*12+9));
+ lmc_modereg_params1.u |= ( (value & 0x7) << (i*12+9));
+ default_rtt_nom[i] = value;
+ ddr_rtt_nom_auto = 0;
+ }
+ }
+
+ if ((s = lookup_env_parameter("ddr_rtt_nom")) == NULL)
+ s = lookup_env_parameter("ddr%d_rtt_nom", ddr_interface_num);
+ if (s != NULL) {
+ uint64_t value;
+ value = strtoul(s, NULL, 0);
+
+ if (dyn_rtt_nom_mask & 1)
+ default_rtt_nom[0] = lmc_modereg_params1.s.rtt_nom_00 = value;
+ if (dyn_rtt_nom_mask & 2)
+ default_rtt_nom[1] = lmc_modereg_params1.s.rtt_nom_01 = value;
+ if (dyn_rtt_nom_mask & 4)
+ default_rtt_nom[2] = lmc_modereg_params1.s.rtt_nom_10 = value;
+ if (dyn_rtt_nom_mask & 8)
+ default_rtt_nom[3] = lmc_modereg_params1.s.rtt_nom_11 = value;
+
+ ddr_rtt_nom_auto = 0;
+ }
+
+ if ((s = lookup_env_parameter("ddr_rtt_wr")) != NULL) {
+ uint64_t value = strtoul(s, NULL, 0);
+ for (i=0; i<4; ++i) {
+ INSRT_WR(&lmc_modereg_params1.u, i, value);
+ }
+ }
+
+ for (i = 0; i < 4; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_rtt_wr_%1d%1d", !!(i&2), !!(i&1))) == NULL)
+ s = lookup_env_parameter("ddr%d_rtt_wr_%1d%1d", ddr_interface_num, !!(i&2), !!(i&1));
+ if (s != NULL) {
+ value = strtoul(s, NULL, 0);
+ INSRT_WR(&lmc_modereg_params1.u, i, value);
+ }
+ }
+
+ // Make sure pass 1 has valid RTT_WR settings, because
+ // configuration files may be set-up for pass 2, and
+ // pass 1 supports no RTT_WR extension bits
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) {
+ for (i = 0; i < 4; ++i) {
+ if (EXTR_WR(lmc_modereg_params1.u, i) > 3) { // if 80 or undefined
+ INSRT_WR(&lmc_modereg_params1.u, i, 1); // FIXME? always insert 120
+ ddr_print("RTT_WR_%d%d set to 120 for CN88XX pass 1\n", !!(i&2), i&1);
+ }
+ }
+ }
+ if ((s = lookup_env_parameter("ddr_dic")) != NULL) {
+ uint64_t value = strtoul(s, NULL, 0);
+ for (i=0; i<4; ++i) {
+ lmc_modereg_params1.u &= ~((uint64_t)0x3 << (i*12+7));
+ lmc_modereg_params1.u |= ( (value & 0x3) << (i*12+7));
+ }
+ }
+
+ for (i=0; i<4; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_dic_%1d%1d", !!(i&2), !!(i&1))) != NULL) {
+ value = strtoul(s, NULL, 0);
+ lmc_modereg_params1.u &= ~((uint64_t)0x3 << (i*12+7));
+ lmc_modereg_params1.u |= ( (value & 0x3) << (i*12+7));
+ }
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr_modereg_params1")) != NULL) {
+ lmc_modereg_params1.u = strtoull(s, NULL, 0);
+ }
+
+ ddr_print("RTT_NOM %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_11],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_10],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_01],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_00],
+ lmc_modereg_params1.s.rtt_nom_11,
+ lmc_modereg_params1.s.rtt_nom_10,
+ lmc_modereg_params1.s.rtt_nom_01,
+ lmc_modereg_params1.s.rtt_nom_00);
+
+ ddr_print("RTT_WR %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 3)],
+ imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 2)],
+ imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 1)],
+ imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 0)],
+ EXTR_WR(lmc_modereg_params1.u, 3),
+ EXTR_WR(lmc_modereg_params1.u, 2),
+ EXTR_WR(lmc_modereg_params1.u, 1),
+ EXTR_WR(lmc_modereg_params1.u, 0));
+
+ ddr_print("DIC %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->dic_ohms[lmc_modereg_params1.s.dic_11],
+ imp_values->dic_ohms[lmc_modereg_params1.s.dic_10],
+ imp_values->dic_ohms[lmc_modereg_params1.s.dic_01],
+ imp_values->dic_ohms[lmc_modereg_params1.s.dic_00],
+ lmc_modereg_params1.s.dic_11,
+ lmc_modereg_params1.s.dic_10,
+ lmc_modereg_params1.s.dic_01,
+ lmc_modereg_params1.s.dic_00);
+
+ ddr_print("MODEREG_PARAMS1 : 0x%016lx\n", lmc_modereg_params1.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num), lmc_modereg_params1.u);
+
+ } /* LMC(0)_MODEREG_PARAMS1 */
+
+ /* LMC(0)_MODEREG_PARAMS2 */
+ if (ddr_type == DDR4_DRAM) {
+ bdk_lmcx_modereg_params2_t lmc_modereg_params2;
+ lmc_modereg_params2.u = odt_config[odt_idx].odt_mask2.u;
+
+ for (i=0; i<4; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_rtt_park_%1d%1d", !!(i&2), !!(i&1))) != NULL) {
+ value = strtoul(s, NULL, 0);
+ lmc_modereg_params2.u &= ~((uint64_t)0x7 << (i*10+0));
+ lmc_modereg_params2.u |= ( (value & 0x7) << (i*10+0));
+ }
+ }
+
+ if ((s = lookup_env_parameter("ddr_rtt_park")) != NULL) {
+ uint64_t value = strtoul(s, NULL, 0);
+ for (i=0; i<4; ++i) {
+ lmc_modereg_params2.u &= ~((uint64_t)0x7 << (i*10+0));
+ lmc_modereg_params2.u |= ( (value & 0x7) << (i*10+0));
+ }
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr_modereg_params2")) != NULL) {
+ lmc_modereg_params2.u = strtoull(s, NULL, 0);
+ }
+
+ ddr_print("RTT_PARK %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_11],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_10],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_01],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_00],
+ lmc_modereg_params2.s.rtt_park_11,
+ lmc_modereg_params2.s.rtt_park_10,
+ lmc_modereg_params2.s.rtt_park_01,
+ lmc_modereg_params2.s.rtt_park_00);
+
+ ddr_print("%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_RANGE",
+ lmc_modereg_params2.s.vref_range_11,
+ lmc_modereg_params2.s.vref_range_10,
+ lmc_modereg_params2.s.vref_range_01,
+ lmc_modereg_params2.s.vref_range_00);
+
+ ddr_print("%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_VALUE",
+ lmc_modereg_params2.s.vref_value_11,
+ lmc_modereg_params2.s.vref_value_10,
+ lmc_modereg_params2.s.vref_value_01,
+ lmc_modereg_params2.s.vref_value_00);
+
+ ddr_print("MODEREG_PARAMS2 : 0x%016lx\n", lmc_modereg_params2.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num), lmc_modereg_params2.u);
+
+ } /* LMC(0)_MODEREG_PARAMS2 */
+
+ /* LMC(0)_MODEREG_PARAMS3 */
+ if (ddr_type == DDR4_DRAM) {
+ bdk_lmcx_modereg_params3_t lmc_modereg_params3;
+
+ lmc_modereg_params3.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS3(ddr_interface_num));
+
+ //lmc_modereg_params3.s.max_pd =
+ //lmc_modereg_params3.s.tc_ref =
+ //lmc_modereg_params3.s.vref_mon =
+ //lmc_modereg_params3.s.cal =
+ //lmc_modereg_params3.s.sre_abort =
+ //lmc_modereg_params3.s.rd_preamble =
+ //lmc_modereg_params3.s.wr_preamble =
+ //lmc_modereg_params3.s.par_lat_mode =
+ //lmc_modereg_params3.s.odt_pd =
+ //lmc_modereg_params3.s.ca_par_pers =
+ //lmc_modereg_params3.s.dm =
+ //lmc_modereg_params3.s.wr_dbi =
+ //lmc_modereg_params3.s.rd_dbi =
+ lmc_modereg_params3.s.tccd_l = max(divide_roundup(ddr4_tCCD_Lmin, tclk_psecs), 5ull) - 4;
+ //lmc_modereg_params3.s.lpasr =
+ //lmc_modereg_params3.s.crc =
+ //lmc_modereg_params3.s.gd =
+ //lmc_modereg_params3.s.pda =
+ //lmc_modereg_params3.s.temp_sense =
+ //lmc_modereg_params3.s.fgrm =
+ //lmc_modereg_params3.s.wr_cmd_lat =
+ //lmc_modereg_params3.s.mpr_fmt =
+
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) {
+ int delay = 0;
+ if ((lranks_per_prank == 4) && (ddr_hertz >= 1000000000))
+ delay = 1;
+ lmc_modereg_params3.s.xrank_add_tccd_l = delay;
+ lmc_modereg_params3.s.xrank_add_tccd_s = delay;
+ }
+
+ ddr_print("MODEREG_PARAMS3 : 0x%016lx\n", lmc_modereg_params3.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS3(ddr_interface_num), lmc_modereg_params3.u);
+ } /* LMC(0)_MODEREG_PARAMS3 */
+
+ /* LMC(0)_NXM */
+ {
+ bdk_lmcx_nxm_t lmc_nxm;
+ int num_bits = row_lsb + row_bits + lranks_bits - 26;
+ lmc_nxm.u = BDK_CSR_READ(node, BDK_LMCX_NXM(ddr_interface_num));
+
+ if (rank_mask & 0x1)
+ lmc_nxm.s.mem_msb_d0_r0 = num_bits;
+ if (rank_mask & 0x2)
+ lmc_nxm.s.mem_msb_d0_r1 = num_bits;
+ if (rank_mask & 0x4)
+ lmc_nxm.s.mem_msb_d1_r0 = num_bits;
+ if (rank_mask & 0x8)
+ lmc_nxm.s.mem_msb_d1_r1 = num_bits;
+
+ lmc_nxm.s.cs_mask = ~rank_mask & 0xff; /* Set the mask for non-existant ranks. */
+
+ if ((s = lookup_env_parameter_ull("ddr_nxm")) != NULL) {
+ lmc_nxm.u = strtoull(s, NULL, 0);
+ }
+ ddr_print("LMC_NXM : 0x%016lx\n", lmc_nxm.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_NXM(ddr_interface_num), lmc_nxm.u);
+ }
+
+ /* LMC(0)_WODT_MASK */
+ {
+ bdk_lmcx_wodt_mask_t lmc_wodt_mask;
+ lmc_wodt_mask.u = odt_config[odt_idx].odt_mask;
+
+ if ((s = lookup_env_parameter_ull("ddr_wodt_mask")) != NULL) {
+ lmc_wodt_mask.u = strtoull(s, NULL, 0);
+ }
+
+ ddr_print("WODT_MASK : 0x%016lx\n", lmc_wodt_mask.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_WODT_MASK(ddr_interface_num), lmc_wodt_mask.u);
+ }
+
+ /* LMC(0)_RODT_MASK */
+ {
+ int rankx;
+ bdk_lmcx_rodt_mask_t lmc_rodt_mask;
+ lmc_rodt_mask.u = odt_config[odt_idx].rodt_ctl;
+
+ if ((s = lookup_env_parameter_ull("ddr_rodt_mask")) != NULL) {
+ lmc_rodt_mask.u = strtoull(s, NULL, 0);
+ }
+
+ ddr_print("%-45s : 0x%016lx\n", "RODT_MASK", lmc_rodt_mask.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_RODT_MASK(ddr_interface_num), lmc_rodt_mask.u);
+
+ dyn_rtt_nom_mask = 0;
+ for (rankx = 0; rankx < dimm_count * 4;rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+ dyn_rtt_nom_mask |= ((lmc_rodt_mask.u >> (8*rankx)) & 0xff);
+ }
+ if (num_ranks == 4) {
+ /* Normally ODT1 is wired to rank 1. For quad-ranked DIMMs
+ ODT1 is wired to the third rank (rank 2). The mask,
+ dyn_rtt_nom_mask, is used to indicate for which ranks
+ to sweep RTT_NOM during read-leveling. Shift the bit
+ from the ODT1 position over to the "ODT2" position so
+ that the read-leveling analysis comes out right. */
+ int odt1_bit = dyn_rtt_nom_mask & 2;
+ dyn_rtt_nom_mask &= ~2;
+ dyn_rtt_nom_mask |= odt1_bit<<1;
+ }
+ ddr_print("%-45s : 0x%02x\n", "DYN_RTT_NOM_MASK", dyn_rtt_nom_mask);
+ }
+
+ /* LMC(0)_COMP_CTL2 */
+ {
+ bdk_lmcx_comp_ctl2_t comp_ctl2;
+
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+
+ comp_ctl2.s.dqx_ctl = odt_config[odt_idx].odt_ena;
+ comp_ctl2.s.ck_ctl = (custom_lmc_config->ck_ctl == 0) ? 4 : custom_lmc_config->ck_ctl; /* Default 4=34.3 ohm */
+ comp_ctl2.s.cmd_ctl = (custom_lmc_config->cmd_ctl == 0) ? 4 : custom_lmc_config->cmd_ctl; /* Default 4=34.3 ohm */
+ comp_ctl2.s.control_ctl = (custom_lmc_config->ctl_ctl == 0) ? 4 : custom_lmc_config->ctl_ctl; /* Default 4=34.3 ohm */
+
+ // NOTE: these are now done earlier, in Step 6.9.3
+ // comp_ctl2.s.ntune_offset = 0;
+ // comp_ctl2.s.ptune_offset = 0;
+
+ ddr_rodt_ctl_auto = custom_lmc_config->ddr_rodt_ctl_auto;
+ if ((s = lookup_env_parameter("ddr_rodt_ctl_auto")) != NULL) {
+ ddr_rodt_ctl_auto = !!strtoul(s, NULL, 0);
+ }
+
+ default_rodt_ctl = odt_config[odt_idx].qs_dic;
+ if ((s = lookup_env_parameter("ddr_rodt_ctl")) == NULL)
+ s = lookup_env_parameter("ddr%d_rodt_ctl", ddr_interface_num);
+ if (s != NULL) {
+ default_rodt_ctl = strtoul(s, NULL, 0);
+ ddr_rodt_ctl_auto = 0;
+ }
+
+ comp_ctl2.s.rodt_ctl = default_rodt_ctl;
+
+ // if DDR4, force CK_CTL to 26 ohms if it is currently 34 ohms, and DCLK speed is 1 GHz or more...
+ if ((ddr_type == DDR4_DRAM) && (comp_ctl2.s.ck_ctl == ddr4_driver_34_ohm) && (ddr_hertz >= 1000000000)) {
+ comp_ctl2.s.ck_ctl = ddr4_driver_26_ohm; // lowest for DDR4 is 26 ohms
+ ddr_print("Forcing DDR4 COMP_CTL2[CK_CTL] to %d, %d ohms\n", comp_ctl2.s.ck_ctl,
+ imp_values->drive_strength[comp_ctl2.s.ck_ctl]);
+ }
+
+ if ((s = lookup_env_parameter("ddr_ck_ctl")) != NULL) {
+ comp_ctl2.s.ck_ctl = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_cmd_ctl")) != NULL) {
+ comp_ctl2.s.cmd_ctl = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_control_ctl")) != NULL) {
+ comp_ctl2.s.control_ctl = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dqx_ctl")) != NULL) {
+ comp_ctl2.s.dqx_ctl = strtoul(s, NULL, 0);
+ }
+
+ ddr_print("%-45s : %d, %d ohms\n", "DQX_CTL ", comp_ctl2.s.dqx_ctl,
+ imp_values->dqx_strength [comp_ctl2.s.dqx_ctl ]);
+ ddr_print("%-45s : %d, %d ohms\n", "CK_CTL ", comp_ctl2.s.ck_ctl,
+ imp_values->drive_strength[comp_ctl2.s.ck_ctl ]);
+ ddr_print("%-45s : %d, %d ohms\n", "CMD_CTL ", comp_ctl2.s.cmd_ctl,
+ imp_values->drive_strength[comp_ctl2.s.cmd_ctl ]);
+ ddr_print("%-45s : %d, %d ohms\n", "CONTROL_CTL ", comp_ctl2.s.control_ctl,
+ imp_values->drive_strength[comp_ctl2.s.control_ctl]);
+ ddr_print("Read ODT_CTL : 0x%x (%d ohms)\n",
+ comp_ctl2.s.rodt_ctl, imp_values->rodt_ohms[comp_ctl2.s.rodt_ctl]);
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), comp_ctl2.u);
+ }
+
+ /* LMC(0)_PHY_CTL */
+ {
+ bdk_lmcx_phy_ctl_t lmc_phy_ctl;
+ lmc_phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
+ lmc_phy_ctl.s.ts_stagger = 0;
+
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (lranks_per_prank > 1)) {
+ lmc_phy_ctl.cn81xx.c0_sel = lmc_phy_ctl.cn81xx.c1_sel = 2; // C0 is TEN, C1 is A17
+ ddr_print("N%d.LMC%d: 3DS: setting PHY_CTL[cx_csel] = %d\n",
+ node, ddr_interface_num, lmc_phy_ctl.cn81xx.c1_sel);
+ }
+
+ ddr_print("PHY_CTL : 0x%016lx\n", lmc_phy_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), lmc_phy_ctl.u);
+ }
+
+ /* LMC(0)_DIMM0/1_PARAMS */
+ if (spd_rdimm) {
+ bdk_lmcx_dimm_ctl_t lmc_dimm_ctl;
+
+ for (didx = 0; didx < (unsigned)dimm_count; ++didx) {
+ bdk_lmcx_dimmx_params_t lmc_dimmx_params;
+ int dimm = didx;
+ int rc;
+
+ lmc_dimmx_params.u = BDK_CSR_READ(node, BDK_LMCX_DIMMX_PARAMS(ddr_interface_num, dimm));
+
+
+ if (ddr_type == DDR4_DRAM) {
+
+ bdk_lmcx_dimmx_ddr4_params0_t lmc_dimmx_ddr4_params0;
+ bdk_lmcx_dimmx_ddr4_params1_t lmc_dimmx_ddr4_params1;
+ bdk_lmcx_ddr4_dimm_ctl_t lmc_ddr4_dimm_ctl;
+
+ lmc_dimmx_params.s.rc0 = 0;
+ lmc_dimmx_params.s.rc1 = 0;
+ lmc_dimmx_params.s.rc2 = 0;
+
+ rc = read_spd(node, &dimm_config_table[didx], DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CTL);
+ lmc_dimmx_params.s.rc3 = (rc >> 4) & 0xf;
+ lmc_dimmx_params.s.rc4 = ((rc >> 0) & 0x3) << 2;
+ lmc_dimmx_params.s.rc4 |= ((rc >> 2) & 0x3) << 0;
+
+ rc = read_spd(node, &dimm_config_table[didx], DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CK);
+ lmc_dimmx_params.s.rc5 = ((rc >> 0) & 0x3) << 2;
+ lmc_dimmx_params.s.rc5 |= ((rc >> 2) & 0x3) << 0;
+
+ lmc_dimmx_params.s.rc6 = 0;
+ lmc_dimmx_params.s.rc7 = 0;
+ lmc_dimmx_params.s.rc8 = 0;
+ lmc_dimmx_params.s.rc9 = 0;
+
+ /*
+ ** rc10 DDR4 RDIMM Operating Speed
+ ** ==== =========================================================
+ ** 0 tclk_psecs >= 1250 psec DDR4-1600 (1250 ps)
+ ** 1 1250 psec > tclk_psecs >= 1071 psec DDR4-1866 (1071 ps)
+ ** 2 1071 psec > tclk_psecs >= 938 psec DDR4-2133 ( 938 ps)
+ ** 3 938 psec > tclk_psecs >= 833 psec DDR4-2400 ( 833 ps)
+ ** 4 833 psec > tclk_psecs >= 750 psec DDR4-2666 ( 750 ps)
+ ** 5 750 psec > tclk_psecs >= 625 psec DDR4-3200 ( 625 ps)
+ */
+ lmc_dimmx_params.s.rc10 = 0;
+ if (1250 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 1;
+ if (1071 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 2;
+ if (938 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 3;
+ if (833 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 4;
+ if (750 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 5;
+
+ lmc_dimmx_params.s.rc11 = 0;
+ lmc_dimmx_params.s.rc12 = 0;
+ lmc_dimmx_params.s.rc13 = (spd_dimm_type == 4) ? 0 : 4; /* 0=LRDIMM, 1=RDIMM */
+ lmc_dimmx_params.s.rc13 |= (ddr_type == DDR4_DRAM) ? (spd_addr_mirror << 3) : 0;
+ lmc_dimmx_params.s.rc14 = 0;
+ //lmc_dimmx_params.s.rc15 = 4; /* 0 nCK latency adder */
+ lmc_dimmx_params.s.rc15 = 0; /* 1 nCK latency adder */
+
+ lmc_dimmx_ddr4_params0.u = 0;
+
+ lmc_dimmx_ddr4_params0.s.rc8x = 0;
+ lmc_dimmx_ddr4_params0.s.rc7x = 0;
+ lmc_dimmx_ddr4_params0.s.rc6x = 0;
+ lmc_dimmx_ddr4_params0.s.rc5x = 0;
+ lmc_dimmx_ddr4_params0.s.rc4x = 0;
+
+ lmc_dimmx_ddr4_params0.s.rc3x = compute_rc3x(tclk_psecs);
+
+ lmc_dimmx_ddr4_params0.s.rc2x = 0;
+ lmc_dimmx_ddr4_params0.s.rc1x = 0;
+
+ lmc_dimmx_ddr4_params1.u = 0;
+
+ lmc_dimmx_ddr4_params1.s.rcbx = 0;
+ lmc_dimmx_ddr4_params1.s.rcax = 0;
+ lmc_dimmx_ddr4_params1.s.rc9x = 0;
+
+ lmc_ddr4_dimm_ctl.u = 0;
+ lmc_ddr4_dimm_ctl.s.ddr4_dimm0_wmask = 0x004;
+ lmc_ddr4_dimm_ctl.s.ddr4_dimm1_wmask = (dimm_count > 1) ? 0x004 : 0x0000;
+
+ /*
+ * Handle any overrides from envvars here...
+ */
+ if ((s = lookup_env_parameter("ddr_ddr4_params0")) != NULL) {
+ lmc_dimmx_ddr4_params0.u = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_ddr4_params1")) != NULL) {
+ lmc_dimmx_ddr4_params1.u = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_ddr4_dimm_ctl")) != NULL) {
+ lmc_ddr4_dimm_ctl.u = strtoul(s, NULL, 0);
+ }
+
+ for (i=0; i<11; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_ddr4_rc%1xx", i+1)) != NULL) {
+ value = strtoul(s, NULL, 0);
+ if (i < 8) {
+ lmc_dimmx_ddr4_params0.u &= ~((uint64_t)0xff << (i*8));
+ lmc_dimmx_ddr4_params0.u |= (value << (i*8));
+ } else {
+ lmc_dimmx_ddr4_params1.u &= ~((uint64_t)0xff << ((i-8)*8));
+ lmc_dimmx_ddr4_params1.u |= (value << ((i-8)*8));
+ }
+ }
+ }
+
+ /*
+ * write the final CSR values
+ */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMMX_DDR4_PARAMS0(ddr_interface_num, dimm), lmc_dimmx_ddr4_params0.u);
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_DDR4_DIMM_CTL(ddr_interface_num), lmc_ddr4_dimm_ctl.u);
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMMX_DDR4_PARAMS1(ddr_interface_num, dimm), lmc_dimmx_ddr4_params1.u);
+
+ ddr_print("DIMM%d Register Control Words RCBx:RC1x : %x %x %x %x %x %x %x %x %x %x %x\n",
+ dimm,
+ lmc_dimmx_ddr4_params1.s.rcbx,
+ lmc_dimmx_ddr4_params1.s.rcax,
+ lmc_dimmx_ddr4_params1.s.rc9x,
+ lmc_dimmx_ddr4_params0.s.rc8x,
+ lmc_dimmx_ddr4_params0.s.rc7x,
+ lmc_dimmx_ddr4_params0.s.rc6x,
+ lmc_dimmx_ddr4_params0.s.rc5x,
+ lmc_dimmx_ddr4_params0.s.rc4x,
+ lmc_dimmx_ddr4_params0.s.rc3x,
+ lmc_dimmx_ddr4_params0.s.rc2x,
+ lmc_dimmx_ddr4_params0.s.rc1x );
+
+ } else { /* if (ddr_type == DDR4_DRAM) */
+ rc = read_spd(node, &dimm_config_table[didx], 69);
+ lmc_dimmx_params.s.rc0 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc1 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 70);
+ lmc_dimmx_params.s.rc2 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc3 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 71);
+ lmc_dimmx_params.s.rc4 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc5 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 72);
+ lmc_dimmx_params.s.rc6 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc7 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 73);
+ lmc_dimmx_params.s.rc8 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc9 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 74);
+ lmc_dimmx_params.s.rc10 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc11 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 75);
+ lmc_dimmx_params.s.rc12 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc13 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 76);
+ lmc_dimmx_params.s.rc14 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc15 = (rc >> 4) & 0xf;
+
+
+ if ((s = lookup_env_parameter("ddr_clk_drive")) != NULL) {
+ if (strcmp(s,"light") == 0) {
+ lmc_dimmx_params.s.rc5 = 0x0; /* Light Drive */
+ }
+ if (strcmp(s,"moderate") == 0) {
+ lmc_dimmx_params.s.rc5 = 0x5; /* Moderate Drive */
+ }
+ if (strcmp(s,"strong") == 0) {
+ lmc_dimmx_params.s.rc5 = 0xA; /* Strong Drive */
+ }
+ }
+
+ if ((s = lookup_env_parameter("ddr_cmd_drive")) != NULL) {
+ if (strcmp(s,"light") == 0) {
+ lmc_dimmx_params.s.rc3 = 0x0; /* Light Drive */
+ }
+ if (strcmp(s,"moderate") == 0) {
+ lmc_dimmx_params.s.rc3 = 0x5; /* Moderate Drive */
+ }
+ if (strcmp(s,"strong") == 0) {
+ lmc_dimmx_params.s.rc3 = 0xA; /* Strong Drive */
+ }
+ }
+
+ if ((s = lookup_env_parameter("ddr_ctl_drive")) != NULL) {
+ if (strcmp(s,"light") == 0) {
+ lmc_dimmx_params.s.rc4 = 0x0; /* Light Drive */
+ }
+ if (strcmp(s,"moderate") == 0) {
+ lmc_dimmx_params.s.rc4 = 0x5; /* Moderate Drive */
+ }
+ }
+
+
+ /*
+ ** rc10 DDR3 RDIMM Operating Speed
+ ** ==== =========================================================
+ ** 0 tclk_psecs >= 2500 psec DDR3/DDR3L-800 (default)
+ ** 1 2500 psec > tclk_psecs >= 1875 psec DDR3/DDR3L-1066
+ ** 2 1875 psec > tclk_psecs >= 1500 psec DDR3/DDR3L-1333
+ ** 3 1500 psec > tclk_psecs >= 1250 psec DDR3/DDR3L-1600
+ ** 4 1250 psec > tclk_psecs >= 1071 psec DDR3-1866
+ */
+ lmc_dimmx_params.s.rc10 = 0;
+ if (2500 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 1;
+ if (1875 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 2;
+ if (1500 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 3;
+ if (1250 > tclk_psecs)
+ lmc_dimmx_params.s.rc10 = 4;
+
+ } /* if (ddr_type == DDR4_DRAM) */
+
+ if ((s = lookup_env_parameter("ddr_dimmx_params")) != NULL) {
+ lmc_dimmx_params.u = strtoul(s, NULL, 0);
+ }
+
+ for (i=0; i<16; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_rc%d", i)) != NULL) {
+ value = strtoul(s, NULL, 0);
+ lmc_dimmx_params.u &= ~((uint64_t)0xf << (i*4));
+ lmc_dimmx_params.u |= ( value << (i*4));
+ }
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMMX_PARAMS(ddr_interface_num, dimm), lmc_dimmx_params.u);
+
+ ddr_print("DIMM%d Register Control Words RC15:RC0 : %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
+ dimm,
+ lmc_dimmx_params.s.rc15,
+ lmc_dimmx_params.s.rc14,
+ lmc_dimmx_params.s.rc13,
+ lmc_dimmx_params.s.rc12,
+ lmc_dimmx_params.s.rc11,
+ lmc_dimmx_params.s.rc10,
+ lmc_dimmx_params.s.rc9 ,
+ lmc_dimmx_params.s.rc8 ,
+ lmc_dimmx_params.s.rc7 ,
+ lmc_dimmx_params.s.rc6 ,
+ lmc_dimmx_params.s.rc5 ,
+ lmc_dimmx_params.s.rc4 ,
+ lmc_dimmx_params.s.rc3 ,
+ lmc_dimmx_params.s.rc2 ,
+ lmc_dimmx_params.s.rc1 ,
+ lmc_dimmx_params.s.rc0 );
+ } /* for didx */
+
+ if (ddr_type == DDR4_DRAM) {
+
+ /* LMC0_DIMM_CTL */
+ lmc_dimm_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DIMM_CTL(ddr_interface_num));
+ lmc_dimm_ctl.s.dimm0_wmask = 0xdf3f;
+ lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0xdf3f : 0x0000;
+ lmc_dimm_ctl.s.tcws = 0x4e0;
+ lmc_dimm_ctl.cn88xx.parity = custom_lmc_config->parity;
+
+ if ((s = lookup_env_parameter("ddr_dimm0_wmask")) != NULL) {
+ lmc_dimm_ctl.s.dimm0_wmask = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm1_wmask")) != NULL) {
+ lmc_dimm_ctl.s.dimm1_wmask = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm_ctl_parity")) != NULL) {
+ lmc_dimm_ctl.cn88xx.parity = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm_ctl_tcws")) != NULL) {
+ lmc_dimm_ctl.s.tcws = strtoul(s, NULL, 0);
+ }
+
+ ddr_print("LMC DIMM_CTL : 0x%016lx\n", lmc_dimm_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
+
+ perform_octeon3_ddr3_sequence(node, rank_mask,
+ ddr_interface_num, 0x7 ); /* Init RCW */
+
+ /* Write RC0D last */
+ lmc_dimm_ctl.s.dimm0_wmask = 0x2000;
+ lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0x2000 : 0x0000;
+ ddr_print("LMC DIMM_CTL : 0x%016lx\n", lmc_dimm_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
+
+ /* Don't write any extended registers the second time */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DDR4_DIMM_CTL(ddr_interface_num), 0);
+
+ perform_octeon3_ddr3_sequence(node, rank_mask,
+ ddr_interface_num, 0x7 ); /* Init RCW */
+ } else {
+
+ /* LMC0_DIMM_CTL */
+ lmc_dimm_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DIMM_CTL(ddr_interface_num));
+ lmc_dimm_ctl.s.dimm0_wmask = 0xffff;
+ lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0xffff : 0x0000;
+ lmc_dimm_ctl.s.tcws = 0x4e0;
+ lmc_dimm_ctl.cn88xx.parity = custom_lmc_config->parity;
+
+ if ((s = lookup_env_parameter("ddr_dimm0_wmask")) != NULL) {
+ lmc_dimm_ctl.s.dimm0_wmask = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm1_wmask")) != NULL) {
+ lmc_dimm_ctl.s.dimm1_wmask = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm_ctl_parity")) != NULL) {
+ lmc_dimm_ctl.cn88xx.parity = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm_ctl_tcws")) != NULL) {
+ lmc_dimm_ctl.s.tcws = strtoul(s, NULL, 0);
+ }
+
+ ddr_print("LMC DIMM_CTL : 0x%016lx\n", lmc_dimm_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
+
+ perform_octeon3_ddr3_sequence(node, rank_mask,
+ ddr_interface_num, 0x7 ); /* Init RCW */
+ }
+ } else { /* if (spd_rdimm) */
+ /* Disable register control writes for unbuffered */
+ bdk_lmcx_dimm_ctl_t lmc_dimm_ctl;
+ lmc_dimm_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DIMM_CTL(ddr_interface_num));
+ lmc_dimm_ctl.s.dimm0_wmask = 0;
+ lmc_dimm_ctl.s.dimm1_wmask = 0;
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
+ } /* if (spd_rdimm) */
+
+ /*
+ * Comments (steps 3 through 5) continue in perform_octeon3_ddr3_sequence()
+ */
+ {
+ bdk_lmcx_modereg_params0_t lmc_modereg_params0;
+
+ if (ddr_memory_preserved(node)) {
+ /* Contents are being preserved. Take DRAM out of
+ self-refresh first. Then init steps can procede
+ normally */
+ perform_octeon3_ddr3_sequence(node, rank_mask,
+ ddr_interface_num, 3); /* self-refresh exit */
+ }
+
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+
+ lmc_modereg_params0.s.dllr = 1; /* Set during first init sequence */
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+
+ perform_ddr_init_sequence(node, rank_mask, ddr_interface_num);
+
+ lmc_modereg_params0.s.dllr = 0; /* Clear for normal operation */
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+ }
+
+ // NOTE: this must be done for pass 2.x and pass 1.x
+ if ((spd_rdimm) && (ddr_type == DDR4_DRAM)) {
+ VB_PRT(VBL_FAE, "Running init sequence 1\n");
+ change_rdimm_mpr_pattern(node, rank_mask, ddr_interface_num, dimm_count);
+ }
+
+#define DEFAULT_INTERNAL_VREF_TRAINING_LIMIT 5
+ int internal_retries = 0;
+ int deskew_training_errors;
+ int dac_eval_retries;
+ int dac_settings[9];
+ int num_samples;
+ int sample, lane;
+ int last_lane = ((ddr_interface_64b) ? 8 : 4) + use_ecc;
+
+#define DEFAULT_DAC_SAMPLES 7 // originally was 5
+#define DAC_RETRIES_LIMIT 2
+
+ typedef struct {
+ int16_t bytes[DEFAULT_DAC_SAMPLES];
+ } bytelane_sample_t;
+ bytelane_sample_t lanes[9];
+
+ memset(lanes, 0, sizeof(lanes));
+
+ if ((ddr_type == DDR4_DRAM) && !CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) {
+ num_samples = DEFAULT_DAC_SAMPLES;
+ } else {
+ num_samples = 1; // if DDR3 or no ability to write DAC values
+ }
+
+ perform_internal_vref_training:
+
+ for (sample = 0; sample < num_samples; sample++) {
+
+ dac_eval_retries = 0;
+
+ do { // make offset and internal vref training repeatable
+
+ /* 6.9.8 LMC Offset Training
+ LMC requires input-receiver offset training. */
+ Perform_Offset_Training(node, rank_mask, ddr_interface_num);
+
+ /* 6.9.9 LMC Internal Vref Training
+ LMC requires input-reference-voltage training. */
+ Perform_Internal_VREF_Training(node, rank_mask, ddr_interface_num);
+
+ // read and maybe display the DAC values for a sample
+ read_DAC_DBI_settings(node, ddr_interface_num, /*DAC*/1, dac_settings);
+ if ((num_samples == 1) || dram_is_verbose(VBL_DEV)) {
+ display_DAC_DBI_settings(node, ddr_interface_num, /*DAC*/1, use_ecc,
+ dac_settings, "Internal VREF");
+ }
+
+ // for DDR4, evaluate the DAC settings and retry if any issues
+ if (ddr_type == DDR4_DRAM) {
+ if (evaluate_DAC_settings(ddr_interface_64b, use_ecc, dac_settings)) {
+ if (++dac_eval_retries > DAC_RETRIES_LIMIT) {
+ ddr_print("N%d.LMC%d: DDR4 internal VREF DAC settings: retries exhausted; continuing...\n",
+ node, ddr_interface_num);
+ } else {
+ ddr_print("N%d.LMC%d: DDR4 internal VREF DAC settings inconsistent; retrying....\n",
+ node, ddr_interface_num); // FIXME? verbosity!!!
+ continue;
+ }
+ }
+ if (num_samples > 1) { // taking multiple samples, otherwise do nothing
+ // good sample or exhausted retries, record it
+ for (lane = 0; lane < last_lane; lane++) {
+ lanes[lane].bytes[sample] = dac_settings[lane];
+ }
+ }
+ }
+ break; // done if DDR3, or good sample, or exhausted retries
+
+ } while (1);
+
+ } /* for (sample = 0; sample < num_samples; sample++) */
+
+ if (num_samples > 1) {
+ debug_print("N%d.LMC%d: DDR4 internal VREF DAC settings: processing multiple samples...\n",
+ node, ddr_interface_num);
+
+ for (lane = 0; lane < last_lane; lane++) {
+ dac_settings[lane] = process_samples_average(&lanes[lane].bytes[0], num_samples,
+ ddr_interface_num, lane);
+ }
+ display_DAC_DBI_settings(node, ddr_interface_num, /*DAC*/1, use_ecc, dac_settings, "Averaged VREF");
+
+ // finally, write the final DAC values
+ for (lane = 0; lane < last_lane; lane++) {
+ load_dac_override(node, ddr_interface_num, dac_settings[lane], lane);
+ }
+ }
+
+#if DAC_OVERRIDE_EARLY
+ // as a second step, after internal VREF training, before starting deskew training:
+ // for DDR3 and THUNDER pass 2.x, override the DAC setting to 127
+ if ((ddr_type == DDR3_DRAM) && !CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
+ load_dac_override(node, ddr_interface_num, 127, /* all */0x0A);
+ ddr_print("N%d.LMC%d: Overriding DDR3 internal VREF DAC settings to 127 (early).\n",
+ node, ddr_interface_num);
+ }
+#endif
+
+ /*
+ * 6.9.10 LMC Read Deskew Training
+ * LMC requires input-read-data deskew training.
+ */
+ if (! disable_deskew_training) {
+
+ deskew_training_errors = Perform_Read_Deskew_Training(node, rank_mask, ddr_interface_num,
+ spd_rawcard_AorB, 0, ddr_interface_64b);
+
+ // All the Deskew lock and saturation retries (may) have been done,
+ // but we ended up with nibble errors; so, as a last ditch effort,
+ // enable retries of the Internal Vref Training...
+ if (deskew_training_errors) {
+ if (internal_retries < DEFAULT_INTERNAL_VREF_TRAINING_LIMIT) {
+ internal_retries++;
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Deskew training results still unsettled - retrying internal Vref training (%d)\n",
+ node, ddr_interface_num, internal_retries);
+ goto perform_internal_vref_training;
+ } else {
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Deskew training incomplete - %d retries exhausted, but continuing...\n",
+ node, ddr_interface_num, internal_retries);
+ }
+ }
+
+ // FIXME: treat this as the final DSK print from now on, and print if VBL_NORM or above
+ // also, save the results of the original training
+ Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &deskew_training_results, VBL_NORM);
+
+ // setup write bit-deskew if enabled...
+ if (enable_write_deskew) {
+ ddr_print("N%d.LMC%d: WRITE BIT-DESKEW feature enabled- going NEUTRAL.\n",
+ node, ddr_interface_num);
+ Neutral_Write_Deskew_Setup(node, ddr_interface_num);
+ } /* if (enable_write_deskew) */
+
+ } /* if (! disable_deskew_training) */
+
+#if !DAC_OVERRIDE_EARLY
+ // as a final step in internal VREF training, after deskew training but before HW WL:
+ // for DDR3 and THUNDER pass 2.x, override the DAC setting to 127
+ if ((ddr_type == DDR3_DRAM) && !CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
+ load_dac_override(node, ddr_interface_num, 127, /* all */0x0A);
+ ddr_print("N%d.LMC%d, Overriding DDR3 internal VREF DAC settings to 127 (late).\n",
+ node, ddr_interface_num);
+ }
+#endif
+
+
+ /* LMC(0)_EXT_CONFIG */
+ {
+ bdk_lmcx_ext_config_t ext_config;
+ ext_config.u = BDK_CSR_READ(node, BDK_LMCX_EXT_CONFIG(ddr_interface_num));
+ ext_config.s.vrefint_seq_deskew = 0;
+ ext_config.s.read_ena_bprch = 1;
+ ext_config.s.read_ena_fprch = 1;
+ ext_config.s.drive_ena_fprch = 1;
+ ext_config.s.drive_ena_bprch = 1;
+ ext_config.s.invert_data = 0; // make sure this is OFF for all current chips
+
+ if ((s = lookup_env_parameter("ddr_read_fprch")) != NULL) {
+ ext_config.s.read_ena_fprch = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_read_bprch")) != NULL) {
+ ext_config.s.read_ena_bprch = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_drive_fprch")) != NULL) {
+ ext_config.s.drive_ena_fprch = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_drive_bprch")) != NULL) {
+ ext_config.s.drive_ena_bprch = strtoul(s, NULL, 0);
+ }
+
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (lranks_per_prank > 1)) {
+ ext_config.s.dimm0_cid = ext_config.s.dimm1_cid = lranks_bits;
+ ddr_print("N%d.LMC%d: 3DS: setting EXT_CONFIG[dimmx_cid] = %d\n",
+ node, ddr_interface_num, ext_config.s.dimm0_cid);
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_EXT_CONFIG(ddr_interface_num), ext_config.u);
+ ddr_print("%-45s : 0x%016lx\n", "EXT_CONFIG", ext_config.u);
+ }
+
+
+ {
+ int save_ref_zqcs_int;
+ uint64_t temp_delay_usecs;
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+
+ /* Temporarily select the minimum ZQCS interval and wait
+ long enough for a few ZQCS calibrations to occur. This
+ should ensure that the calibration circuitry is
+ stabilized before read/write leveling occurs. */
+ save_ref_zqcs_int = lmc_config.s.ref_zqcs_int;
+ lmc_config.s.ref_zqcs_int = 1 | (32<<7); /* set smallest interval */
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+ BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+
+ /* Compute an appropriate delay based on the current ZQCS
+ interval. The delay should be long enough for the
+ current ZQCS delay counter to expire plus ten of the
+ minimum intarvals to ensure that some calibrations
+ occur. */
+ temp_delay_usecs = (((uint64_t)save_ref_zqcs_int >> 7)
+ * tclk_psecs * 100 * 512 * 128) / (10000*10000)
+ + 10 * ((uint64_t)32 * tclk_psecs * 100 * 512 * 128) / (10000*10000);
+
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Waiting %ld usecs for ZQCS calibrations to start\n",
+ node, ddr_interface_num, temp_delay_usecs);
+ bdk_wait_usec(temp_delay_usecs);
+
+ lmc_config.s.ref_zqcs_int = save_ref_zqcs_int; /* Restore computed interval */
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+ BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ }
+
+ /*
+ * 6.9.11 LMC Write Leveling
+ *
+ * LMC supports an automatic write leveling like that described in the
+ * JEDEC DDR3 specifications separately per byte-lane.
+ *
+ * All of DDR PLL, LMC CK, LMC DRESET, and early LMC initializations must
+ * be completed prior to starting this LMC write-leveling sequence.
+ *
+ * There are many possible procedures that will write-level all the
+ * attached DDR3 DRAM parts. One possibility is for software to simply
+ * write the desired values into LMC(0)_WLEVEL_RANK(0..3). This section
+ * describes one possible sequence that uses LMC's autowrite-leveling
+ * capabilities.
+ *
+ * 1. If the DQS/DQ delays on the board may be more than the ADD/CMD
+ * delays, then ensure that LMC(0)_CONFIG[EARLY_DQX] is set at this
+ * point.
+ *
+ * Do the remaining steps 2-7 separately for each rank i with attached
+ * DRAM.
+ *
+ * 2. Write LMC(0)_WLEVEL_RANKi = 0.
+ *
+ * 3. For ×8 parts:
+ *
+ * Without changing any other fields in LMC(0)_WLEVEL_CTL, write
+ * LMC(0)_WLEVEL_CTL[LANEMASK] to select all byte lanes with attached
+ * DRAM.
+ *
+ * For ×16 parts:
+ *
+ * Without changing any other fields in LMC(0)_WLEVEL_CTL, write
+ * LMC(0)_WLEVEL_CTL[LANEMASK] to select all even byte lanes with
+ * attached DRAM.
+ *
+ * 4. Without changing any other fields in LMC(0)_CONFIG,
+ *
+ * o write LMC(0)_SEQ_CTL[SEQ_SEL] to select write-leveling
+ *
+ * o write LMC(0)_CONFIG[RANKMASK] = (1 << i)
+ *
+ * o write LMC(0)_SEQ_CTL[INIT_START] = 1
+ *
+ * LMC will initiate write-leveling at this point. Assuming
+ * LMC(0)_WLEVEL_CTL [SSET] = 0, LMC first enables write-leveling on
+ * the selected DRAM rank via a DDR3 MR1 write, then sequences through
+ * and accumulates write-leveling results for eight different delay
+ * settings twice, starting at a delay of zero in this case since
+ * LMC(0)_WLEVEL_RANKi[BYTE*<4:3>] = 0, increasing by 1/8 CK each
+ * setting, covering a total distance of one CK, then disables the
+ * write-leveling via another DDR3 MR1 write.
+ *
+ * After the sequence through 16 delay settings is complete:
+ *
+ * o LMC sets LMC(0)_WLEVEL_RANKi[STATUS] = 3
+ *
+ * o LMC sets LMC(0)_WLEVEL_RANKi[BYTE*<2:0>] (for all ranks selected
+ * by LMC(0)_WLEVEL_CTL[LANEMASK]) to indicate the first write
+ * leveling result of 1 that followed result of 0 during the
+ * sequence, except that the LMC always writes
+ * LMC(0)_WLEVEL_RANKi[BYTE*<0>]=0.
+ *
+ * o Software can read the eight write-leveling results from the first
+ * pass through the delay settings by reading
+ * LMC(0)_WLEVEL_DBG[BITMASK] (after writing
+ * LMC(0)_WLEVEL_DBG[BYTE]). (LMC does not retain the writeleveling
+ * results from the second pass through the eight delay
+ * settings. They should often be identical to the
+ * LMC(0)_WLEVEL_DBG[BITMASK] results, though.)
+ *
+ * 5. Wait until LMC(0)_WLEVEL_RANKi[STATUS] != 2.
+ *
+ * LMC will have updated LMC(0)_WLEVEL_RANKi[BYTE*<2:0>] for all byte
+ * lanes selected by LMC(0)_WLEVEL_CTL[LANEMASK] at this point.
+ * LMC(0)_WLEVEL_RANKi[BYTE*<4:3>] will still be the value that
+ * software wrote in substep 2 above, which is 0.
+ *
+ * 6. For ×16 parts:
+ *
+ * Without changing any other fields in LMC(0)_WLEVEL_CTL, write
+ * LMC(0)_WLEVEL_CTL[LANEMASK] to select all odd byte lanes with
+ * attached DRAM.
+ *
+ * Repeat substeps 4 and 5 with this new LMC(0)_WLEVEL_CTL[LANEMASK]
+ * setting. Skip to substep 7 if this has already been done.
+ *
+ * For ×8 parts:
+ *
+ * Skip this substep. Go to substep 7.
+ *
+ * 7. Calculate LMC(0)_WLEVEL_RANKi[BYTE*<4:3>] settings for all byte
+ * lanes on all ranks with attached DRAM.
+ *
+ * At this point, all byte lanes on rank i with attached DRAM should
+ * have been write-leveled, and LMC(0)_WLEVEL_RANKi[BYTE*<2:0>] has
+ * the result for each byte lane.
+ *
+ * But note that the DDR3 write-leveling sequence will only determine
+ * the delay modulo the CK cycle time, and cannot determine how many
+ * additional CK cycles of delay are present. Software must calculate
+ * the number of CK cycles, or equivalently, the
+ * LMC(0)_WLEVEL_RANKi[BYTE*<4:3>] settings.
+ *
+ * This BYTE*<4:3> calculation is system/board specific.
+ *
+ * Many techniques can be used to calculate write-leveling BYTE*<4:3> values,
+ * including:
+ *
+ * o Known values for some byte lanes.
+ *
+ * o Relative values for some byte lanes relative to others.
+ *
+ * For example, suppose lane X is likely to require a larger
+ * write-leveling delay than lane Y. A BYTEX<2:0> value that is much
+ * smaller than the BYTEY<2:0> value may then indicate that the
+ * required lane X delay wrapped into the next CK, so BYTEX<4:3>
+ * should be set to BYTEY<4:3>+1.
+ *
+ * When ECC DRAM is not present (i.e. when DRAM is not attached to the
+ * DDR_CBS_0_* and DDR_CB<7:0> chip signals, or the DDR_DQS_<4>_* and
+ * DDR_DQ<35:32> chip signals), write LMC(0)_WLEVEL_RANK*[BYTE8] =
+ * LMC(0)_WLEVEL_RANK*[BYTE0], using the final calculated BYTE0 value.
+ * Write LMC(0)_WLEVEL_RANK*[BYTE4] = LMC(0)_WLEVEL_RANK*[BYTE0],
+ * using the final calculated BYTE0 value.
+ *
+ * 8. Initialize LMC(0)_WLEVEL_RANK* values for all unused ranks.
+ *
+ * Let rank i be a rank with attached DRAM.
+ *
+ * For all ranks j that do not have attached DRAM, set
+ * LMC(0)_WLEVEL_RANKj = LMC(0)_WLEVEL_RANKi.
+ */
+ { // Start HW write-leveling block
+#pragma pack(push,1)
+ bdk_lmcx_wlevel_ctl_t wlevel_ctl;
+ bdk_lmcx_wlevel_rankx_t lmc_wlevel_rank;
+ int rankx = 0;
+ int wlevel_bitmask[9];
+ int byte_idx;
+ int ecc_ena;
+ int ddr_wlevel_roundup = 0;
+ int ddr_wlevel_printall = (dram_is_verbose(VBL_FAE)); // or default to 1 to print all HW WL samples
+ int disable_hwl_validity = 0;
+ int default_wlevel_rtt_nom;
+#if WODT_MASK_2R_1S
+ uint64_t saved_wodt_mask = 0;
+#endif
+#pragma pack(pop)
+
+ if (wlevel_loops)
+ ddr_print("N%d.LMC%d: Performing Hardware Write-Leveling\n", node, ddr_interface_num);
+ else {
+ wlevel_bitmask_errors = 1; /* Force software write-leveling to run */
+ ddr_print("N%d.LMC%d: Forcing software Write-Leveling\n", node, ddr_interface_num);
+ }
+
+ default_wlevel_rtt_nom = (ddr_type == DDR3_DRAM) ? rttnom_20ohm : ddr4_rttnom_40ohm ; /* FIXME? */
+
+#if WODT_MASK_2R_1S
+ if ((ddr_type == DDR4_DRAM) && (num_ranks == 2) && (dimm_count == 1)) {
+ /* LMC(0)_WODT_MASK */
+ bdk_lmcx_wodt_mask_t lmc_wodt_mask;
+ // always save original so we can always restore later
+ saved_wodt_mask = BDK_CSR_READ(node, BDK_LMCX_WODT_MASK(ddr_interface_num));
+ if ((s = lookup_env_parameter_ull("ddr_hwl_wodt_mask")) != NULL) {
+ lmc_wodt_mask.u = strtoull(s, NULL, 0);
+ if (lmc_wodt_mask.u != saved_wodt_mask) { // print/store only when diff
+ ddr_print("WODT_MASK : 0x%016lx\n", lmc_wodt_mask.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_WODT_MASK(ddr_interface_num), lmc_wodt_mask.u);
+ }
+ }
+ }
+#endif /* WODT_MASK_2R_1S */
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ ecc_ena = lmc_config.s.ecc_ena;
+
+ if ((s = lookup_env_parameter("ddr_wlevel_roundup")) != NULL) {
+ ddr_wlevel_roundup = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_wlevel_printall")) != NULL) {
+ ddr_wlevel_printall = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_disable_hwl_validity")) != NULL) {
+ disable_hwl_validity = !!strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_wlevel_rtt_nom")) != NULL) {
+ default_wlevel_rtt_nom = strtoul(s, NULL, 0);
+ }
+
+ // For DDR3, we leave the WLEVEL_CTL fields at default settings
+ // For DDR4, we touch WLEVEL_CTL fields OR_DIS or BITMASK here
+ if (ddr_type == DDR4_DRAM) {
+ int default_or_dis = 1;
+ int default_bitmask = 0xFF;
+
+ // when x4, use only the lower nibble bits
+ if (dram_width == 4) {
+ default_bitmask = 0x0F;
+ VB_PRT(VBL_DEV, "N%d.LMC%d: WLEVEL_CTL: default bitmask is 0x%2x for DDR4 x4\n",
+ node, ddr_interface_num, default_bitmask);
+ }
+
+ wlevel_ctl.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num));
+ wlevel_ctl.s.or_dis = default_or_dis;
+ wlevel_ctl.s.bitmask = default_bitmask;
+
+ // allow overrides
+ if ((s = lookup_env_parameter("ddr_wlevel_ctl_or_dis")) != NULL) {
+ wlevel_ctl.s.or_dis = !!strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_wlevel_ctl_bitmask")) != NULL) {
+ wlevel_ctl.s.bitmask = strtoul(s, NULL, 0);
+ }
+
+ // print only if not defaults
+ if ((wlevel_ctl.s.or_dis != default_or_dis) || (wlevel_ctl.s.bitmask != default_bitmask)) {
+ ddr_print("N%d.LMC%d: WLEVEL_CTL: or_dis=%d, bitmask=0x%02x\n",
+ node, ddr_interface_num, wlevel_ctl.s.or_dis, wlevel_ctl.s.bitmask);
+ }
+ // always write
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
+ }
+
+ // Start the hardware write-leveling loop per rank
+ for (rankx = 0; rankx < dimm_count * 4; rankx++) {
+
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+#if HW_WL_MAJORITY
+ // array to collect counts of byte-lane values
+ // assume low-order 3 bits and even, so really only 2 bit values
+ int wlevel_bytes[9][4];
+ memset(wlevel_bytes, 0, sizeof(wlevel_bytes));
+#endif
+
+ // restructure the looping so we can keep trying until we get the samples we want
+ //for (int wloop = 0; wloop < wlevel_loops; wloop++) {
+ int wloop = 0;
+ int wloop_retries = 0; // retries per sample for HW-related issues with bitmasks or values
+ int wloop_retries_total = 0;
+ int wloop_retries_exhausted = 0;
+#define WLOOP_RETRIES_DEFAULT 5
+ int wlevel_validity_errors;
+ int wlevel_bitmask_errors_rank = 0;
+ int wlevel_validity_errors_rank = 0;
+
+ while (wloop < wlevel_loops) {
+
+ wlevel_ctl.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num));
+
+ wlevel_ctl.s.rtt_nom = (default_wlevel_rtt_nom > 0) ? (default_wlevel_rtt_nom - 1) : 7;
+
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), 0); /* Clear write-level delays */
+
+ wlevel_bitmask_errors = 0; /* Reset error counters */
+ wlevel_validity_errors = 0;
+
+ for (byte_idx=0; byte_idx<9; ++byte_idx) {
+ wlevel_bitmask[byte_idx] = 0; /* Reset bitmasks */
+ }
+
+#if HWL_BY_BYTE // FIXME???
+ /* Make a separate pass for each byte to reduce power. */
+ for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
+
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+
+ wlevel_ctl.s.lanemask = (1<<byte_idx);
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
+
+ /* Read and write values back in order to update the
+ status field. This insures that we read the updated
+ values after write-leveling has completed. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
+ BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx)));
+
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 6); /* write-leveling */
+
+ if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
+ BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for WLEVEL\n");
+ }
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+
+ wlevel_bitmask[byte_idx] = octeon_read_lmcx_ddr3_wlevel_dbg(node, ddr_interface_num, byte_idx);
+ if (wlevel_bitmask[byte_idx] == 0)
+ ++wlevel_bitmask_errors;
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+
+ wlevel_ctl.s.lanemask = /*0x1ff*/ddr_interface_bytemask; // restore for RL
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
+#else
+ // do all the byte-lanes at the same time
+ wlevel_ctl.s.lanemask = /*0x1ff*/ddr_interface_bytemask; // FIXME?
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
+
+ /* Read and write values back in order to update the
+ status field. This insures that we read the updated
+ values after write-leveling has completed. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
+ BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx)));
+
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 6); /* write-leveling */
+
+ if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
+ BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for WLEVEL\n");
+ }
+
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+
+ for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ wlevel_bitmask[byte_idx] = octeon_read_lmcx_ddr3_wlevel_dbg(node, ddr_interface_num, byte_idx);
+ if (wlevel_bitmask[byte_idx] == 0)
+ ++wlevel_bitmask_errors;
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+#endif
+
+ // check validity only if no bitmask errors
+ if (wlevel_bitmask_errors == 0) {
+ if ((spd_dimm_type != 5) &&
+ (spd_dimm_type != 6) &&
+ (spd_dimm_type != 8) &&
+ (spd_dimm_type != 9) &&
+ (dram_width != 16) &&
+ (ddr_interface_64b) &&
+ !(disable_hwl_validity))
+ { // bypass if mini-[RU]DIMM or x16 or 32-bit or SO-[RU]DIMM
+ wlevel_validity_errors =
+ Validate_HW_WL_Settings(node, ddr_interface_num,
+ &lmc_wlevel_rank, ecc_ena);
+ wlevel_validity_errors_rank += (wlevel_validity_errors != 0);
+ }
+ } else
+ wlevel_bitmask_errors_rank++;
+
+ // before we print, if we had bitmask or validity errors, do a retry...
+ if ((wlevel_bitmask_errors != 0) || (wlevel_validity_errors != 0)) {
+ // VBL must be high to show the bad bitmaps or delays here also
+ if (dram_is_verbose(VBL_DEV2)) {
+ display_WL_BM(node, ddr_interface_num, rankx, wlevel_bitmask);
+ display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ }
+ if (wloop_retries < WLOOP_RETRIES_DEFAULT) {
+ wloop_retries++;
+ wloop_retries_total++;
+ // this printout is per-retry: only when VBL is high enough (DEV2?)
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: H/W Write-Leveling had %s errors - retrying...\n",
+ node, ddr_interface_num, rankx,
+ (wlevel_bitmask_errors) ? "Bitmask" : "Validity");
+ continue; // this takes us back to the top without counting a sample
+ } else { // ran out of retries for this sample
+ // retries exhausted, do not print at normal VBL
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: H/W Write-Leveling issues: %s errors\n",
+ node, ddr_interface_num, rankx,
+ (wlevel_bitmask_errors) ? "Bitmask" : "Validity");
+ wloop_retries_exhausted++;
+ }
+ }
+ // no errors or exhausted retries, use this sample
+ wloop_retries = 0; //reset for next sample
+
+ // when only 1 sample or forced, print the bitmasks first and current HW WL
+ if ((wlevel_loops == 1) || ddr_wlevel_printall) {
+ display_WL_BM(node, ddr_interface_num, rankx, wlevel_bitmask);
+ display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ }
+
+ if (ddr_wlevel_roundup) { /* Round up odd bitmask delays */
+ for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ update_wlevel_rank_struct(&lmc_wlevel_rank,
+ byte_idx,
+ roundup_ddr3_wlevel_bitmask(wlevel_bitmask[byte_idx]));
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ }
+
+#if HW_WL_MAJORITY
+ // OK, we have a decent sample, no bitmask or validity errors
+ for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ // increment count of byte-lane value
+ int ix = (get_wlevel_rank_struct(&lmc_wlevel_rank, byte_idx) >> 1) & 3; // only 4 values
+ wlevel_bytes[byte_idx][ix]++;
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+#endif
+
+ wloop++; // if we get here, we have taken a decent sample
+
+ } /* while (wloop < wlevel_loops) */
+
+#if HW_WL_MAJORITY
+ // if we did sample more than once, try to pick a majority vote
+ if (wlevel_loops > 1) {
+ // look for the majority in each byte-lane
+ for (byte_idx = 0; byte_idx < (8+ecc_ena); ++byte_idx) {
+ int mx = -1, mc = 0, xc = 0, cc = 0;
+ int ix, ic;
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ for (ix = 0; ix < 4; ix++) {
+ ic = wlevel_bytes[byte_idx][ix];
+ // make a bitmask of the ones with a count
+ if (ic > 0) {
+ mc |= (1 << ix);
+ cc++; // count how many had non-zero counts
+ }
+ // find the majority
+ if (ic > xc) { // new max?
+ xc = ic; // yes
+ mx = ix; // set its index
+ }
+ }
+#if SWL_TRY_HWL_ALT
+ // see if there was an alternate
+ int alts = (mc & ~(1 << mx)); // take out the majority choice
+ if (alts != 0) {
+ for (ix = 0; ix < 4; ix++) {
+ if (alts & (1 << ix)) { // FIXME: could be done multiple times? bad if so
+ hwl_alts[rankx].hwl_alt_mask |= (1 << byte_idx); // set the mask
+ hwl_alts[rankx].hwl_alt_delay[byte_idx] = ix << 1; // record the value
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SWL_TRY_HWL_ALT: Byte %d maj %d (%d) alt %d (%d).\n",
+ node, ddr_interface_num, rankx, byte_idx, mx << 1, xc,
+ ix << 1, wlevel_bytes[byte_idx][ix]);
+ }
+ }
+ } else {
+ debug_print("N%d.LMC%d.R%d: SWL_TRY_HWL_ALT: Byte %d maj %d alt NONE.\n",
+ node, ddr_interface_num, rankx, byte_idx, mx << 1);
+ }
+#endif /* SWL_TRY_HWL_ALT */
+ if (cc > 2) { // unlikely, but...
+ // assume: counts for 3 indices are all 1
+ // possiblities are: 0/2/4, 2/4/6, 0/4/6, 0/2/6
+ // and the desired?: 2 , 4 , 6, 0
+ // we choose the middle, assuming one of the outliers is bad
+ // NOTE: this is an ugly hack at the moment; there must be a better way
+ switch (mc) {
+ case 0x7: mx = 1; break; // was 0/2/4, choose 2
+ case 0xb: mx = 0; break; // was 0/2/6, choose 0
+ case 0xd: mx = 3; break; // was 0/4/6, choose 6
+ case 0xe: mx = 2; break; // was 2/4/6, choose 4
+ default:
+ case 0xf: mx = 1; break; // was 0/2/4/6, choose 2?
+ }
+ error_print("N%d.LMC%d.R%d: HW WL MAJORITY: bad byte-lane %d (0x%x), using %d.\n",
+ node, ddr_interface_num, rankx, byte_idx, mc, mx << 1);
+ }
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte_idx, mx << 1);
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ display_WL_with_final(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ } /* if (wlevel_loops > 1) */
+#endif /* HW_WL_MAJORITY */
+ // maybe print an error summary for the rank
+ if ((wlevel_bitmask_errors_rank != 0) || (wlevel_validity_errors_rank != 0)) {
+ VB_PRT(VBL_FAE, "N%d.LMC%d.R%d: H/W Write-Leveling errors - %d bitmask, %d validity, %d retries, %d exhausted\n",
+ node, ddr_interface_num, rankx,
+ wlevel_bitmask_errors_rank, wlevel_validity_errors_rank,
+ wloop_retries_total, wloop_retries_exhausted);
+ }
+
+ } /* for (rankx = 0; rankx < dimm_count * 4;rankx++) */
+
+#if WODT_MASK_2R_1S
+ if ((ddr_type == DDR4_DRAM) && (num_ranks == 2) && (dimm_count == 1)) {
+ /* LMC(0)_WODT_MASK */
+ bdk_lmcx_wodt_mask_t lmc_wodt_mask;
+ // always read current so we can see if its different from saved
+ lmc_wodt_mask.u = BDK_CSR_READ(node, BDK_LMCX_WODT_MASK(ddr_interface_num));
+ if (lmc_wodt_mask.u != saved_wodt_mask) { // always restore what was saved if diff
+ lmc_wodt_mask.u = saved_wodt_mask;
+ ddr_print("WODT_MASK : 0x%016lx\n", lmc_wodt_mask.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_WODT_MASK(ddr_interface_num), lmc_wodt_mask.u);
+ }
+ }
+#endif /* WODT_MASK_2R_1S */
+
+ } // End HW write-leveling block
+
+ // At the end of HW Write Leveling, check on some things...
+ if (! disable_deskew_training) {
+
+ deskew_counts_t dsk_counts;
+ int retry_count = 0;
+
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Check Deskew Settings before Read-Leveling.\n", node, ddr_interface_num);
+
+ do {
+ Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &dsk_counts, VBL_FAE);
+
+ // only RAWCARD A or B will not benefit from retraining if there's only saturation
+ // or any rawcard if there is a nibble error
+ if ((!spd_rawcard_AorB && dsk_counts.saturated > 0) ||
+ ((dsk_counts.nibrng_errs != 0) || (dsk_counts.nibunl_errs != 0)))
+ {
+ retry_count++;
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Deskew Status indicates saturation or nibble errors - retry %d Training.\n",
+ node, ddr_interface_num, retry_count);
+ Perform_Read_Deskew_Training(node, rank_mask, ddr_interface_num,
+ spd_rawcard_AorB, 0, ddr_interface_64b);
+ } else
+ break;
+ } while (retry_count < 5);
+
+ // print the last setting only if we had to do retries here
+ if (retry_count > 0)
+ Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &dsk_counts, VBL_NORM);
+ }
+
+ /*
+ * 6.9.12 LMC Read Leveling
+ *
+ * LMC supports an automatic read-leveling separately per byte-lane using
+ * the DDR3 multipurpose register predefined pattern for system
+ * calibration defined in the JEDEC DDR3 specifications.
+ *
+ * All of DDR PLL, LMC CK, and LMC DRESET, and early LMC initializations
+ * must be completed prior to starting this LMC read-leveling sequence.
+ *
+ * Software could simply write the desired read-leveling values into
+ * LMC(0)_RLEVEL_RANK(0..3). This section describes a sequence that uses
+ * LMC's autoread-leveling capabilities.
+ *
+ * When LMC does the read-leveling sequence for a rank, it first enables
+ * the DDR3 multipurpose register predefined pattern for system
+ * calibration on the selected DRAM rank via a DDR3 MR3 write, then
+ * executes 64 RD operations at different internal delay settings, then
+ * disables the predefined pattern via another DDR3 MR3 write
+ * operation. LMC determines the pass or fail of each of the 64 settings
+ * independently for each byte lane, then writes appropriate
+ * LMC(0)_RLEVEL_RANK(0..3)[BYTE*] values for the rank.
+ *
+ * After read-leveling for a rank, software can read the 64 pass/fail
+ * indications for one byte lane via LMC(0)_RLEVEL_DBG[BITMASK]. Software
+ * can observe all pass/fail results for all byte lanes in a rank via
+ * separate read-leveling sequences on the rank with different
+ * LMC(0)_RLEVEL_CTL[BYTE] values.
+ *
+ * The 64 pass/fail results will typically have failures for the low
+ * delays, followed by a run of some passing settings, followed by more
+ * failures in the remaining high delays. LMC sets
+ * LMC(0)_RLEVEL_RANK(0..3)[BYTE*] to one of the passing settings.
+ * First, LMC selects the longest run of successes in the 64 results. (In
+ * the unlikely event that there is more than one longest run, LMC
+ * selects the first one.) Then if LMC(0)_RLEVEL_CTL[OFFSET_EN] = 1 and
+ * the selected run has more than LMC(0)_RLEVEL_CTL[OFFSET] successes,
+ * LMC selects the last passing setting in the run minus
+ * LMC(0)_RLEVEL_CTL[OFFSET]. Otherwise LMC selects the middle setting in
+ * the run (rounding earlier when necessary). We expect the read-leveling
+ * sequence to produce good results with the reset values
+ * LMC(0)_RLEVEL_CTL [OFFSET_EN]=1, LMC(0)_RLEVEL_CTL[OFFSET] = 2.
+ *
+ * The read-leveling sequence has the following steps:
+ *
+ * 1. Select desired LMC(0)_RLEVEL_CTL[OFFSET_EN,OFFSET,BYTE] settings.
+ * Do the remaining substeps 2-4 separately for each rank i with
+ * attached DRAM.
+ *
+ * 2. Without changing any other fields in LMC(0)_CONFIG,
+ *
+ * o write LMC(0)_SEQ_CTL[SEQ_SEL] to select read-leveling
+ *
+ * o write LMC(0)_CONFIG[RANKMASK] = (1 << i)
+ *
+ * o write LMC(0)_SEQ_CTL[INIT_START] = 1
+ *
+ * This initiates the previously-described read-leveling.
+ *
+ * 3. Wait until LMC(0)_RLEVEL_RANKi[STATUS] != 2
+ *
+ * LMC will have updated LMC(0)_RLEVEL_RANKi[BYTE*] for all byte lanes
+ * at this point.
+ *
+ * If ECC DRAM is not present (i.e. when DRAM is not attached to the
+ * DDR_CBS_0_* and DDR_CB<7:0> chip signals, or the DDR_DQS_<4>_* and
+ * DDR_DQ<35:32> chip signals), write LMC(0)_RLEVEL_RANK*[BYTE8] =
+ * LMC(0)_RLEVEL_RANK*[BYTE0]. Write LMC(0)_RLEVEL_RANK*[BYTE4] =
+ * LMC(0)_RLEVEL_RANK*[BYTE0].
+ *
+ * 4. If desired, consult LMC(0)_RLEVEL_DBG[BITMASK] and compare to
+ * LMC(0)_RLEVEL_RANKi[BYTE*] for the lane selected by
+ * LMC(0)_RLEVEL_CTL[BYTE]. If desired, modify LMC(0)_RLEVEL_CTL[BYTE]
+ * to a new value and repeat so that all BITMASKs can be observed.
+ *
+ * 5. Initialize LMC(0)_RLEVEL_RANK* values for all unused ranks.
+ *
+ * Let rank i be a rank with attached DRAM.
+ *
+ * For all ranks j that do not have attached DRAM, set
+ * LMC(0)_RLEVEL_RANKj = LMC(0)_RLEVEL_RANKi.
+ *
+ * This read-leveling sequence can help select the proper CN70XX ODT
+ * resistance value (LMC(0)_COMP_CTL2[RODT_CTL]). A hardware-generated
+ * LMC(0)_RLEVEL_RANKi[BYTEj] value (for a used byte lane j) that is
+ * drastically different from a neighboring LMC(0)_RLEVEL_RANKi[BYTEk]
+ * (for a used byte lane k) can indicate that the CN70XX ODT value is
+ * bad. It is possible to simultaneously optimize both
+ * LMC(0)_COMP_CTL2[RODT_CTL] and LMC(0)_RLEVEL_RANKn[BYTE*] values by
+ * performing this read-leveling sequence for several
+ * LMC(0)_COMP_CTL2[RODT_CTL] values and selecting the one with the best
+ * LMC(0)_RLEVEL_RANKn[BYTE*] profile for the ranks.
+ */
+
+ {
+#pragma pack(push,4)
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank;
+ bdk_lmcx_comp_ctl2_t lmc_comp_ctl2;
+ bdk_lmcx_rlevel_ctl_t rlevel_ctl;
+ bdk_lmcx_control_t lmc_control;
+ bdk_lmcx_modereg_params1_t lmc_modereg_params1;
+ unsigned char rodt_ctl;
+ unsigned char rankx = 0;
+ int rlevel_rodt_errors = 0;
+ unsigned char ecc_ena;
+ unsigned char rtt_nom;
+ unsigned char rtt_idx;
+ int min_rtt_nom_idx;
+ int max_rtt_nom_idx;
+ int min_rodt_ctl;
+ int max_rodt_ctl;
+ int rlevel_debug_loops = 1;
+ unsigned char save_ddr2t;
+ int rlevel_avg_loops;
+ int ddr_rlevel_compute;
+ int saved_ddr__ptune, saved_ddr__ntune, rlevel_comp_offset;
+ int saved_int_zqcs_dis = 0;
+ int disable_sequential_delay_check = 0;
+ int maximum_adjacent_rlevel_delay_increment = 0;
+ struct {
+ uint64_t setting;
+ int score;
+ } rlevel_scoreboard[RTT_NOM_OHMS_COUNT][RODT_OHMS_COUNT][4];
+ int print_nom_ohms;
+#if PERFECT_BITMASK_COUNTING
+ typedef struct {
+ uint8_t count[9][32]; // 8+ECC by 32 values
+ uint8_t total[9]; // 8+ECC
+ } rank_perfect_t;
+ rank_perfect_t rank_perfect_counts[4];
+#endif
+
+#pragma pack(pop)
+
+#if PERFECT_BITMASK_COUNTING
+ memset(rank_perfect_counts, 0, sizeof(rank_perfect_counts));
+#endif /* PERFECT_BITMASK_COUNTING */
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ save_ddr2t = lmc_control.s.ddr2t;
+
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ ecc_ena = lmc_config.s.ecc_ena;
+
+#if 0
+ {
+ int save_ref_zqcs_int;
+ uint64_t temp_delay_usecs;
+
+ /* Temporarily select the minimum ZQCS interval and wait
+ long enough for a few ZQCS calibrations to occur. This
+ should ensure that the calibration circuitry is
+ stabilized before read-leveling occurs. */
+ save_ref_zqcs_int = lmc_config.s.ref_zqcs_int;
+ lmc_config.s.ref_zqcs_int = 1 | (32<<7); /* set smallest interval */
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+ BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+
+ /* Compute an appropriate delay based on the current ZQCS
+ interval. The delay should be long enough for the
+ current ZQCS delay counter to expire plus ten of the
+ minimum intarvals to ensure that some calibrations
+ occur. */
+ temp_delay_usecs = (((uint64_t)save_ref_zqcs_int >> 7)
+ * tclk_psecs * 100 * 512 * 128) / (10000*10000)
+ + 10 * ((uint64_t)32 * tclk_psecs * 100 * 512 * 128) / (10000*10000);
+
+ ddr_print ("Waiting %lu usecs for ZQCS calibrations to start\n",
+ temp_delay_usecs);
+ bdk_wait_usec(temp_delay_usecs);
+
+ lmc_config.s.ref_zqcs_int = save_ref_zqcs_int; /* Restore computed interval */
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+ BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ }
+#endif
+
+ if ((s = lookup_env_parameter("ddr_rlevel_2t")) != NULL) {
+ lmc_control.s.ddr2t = strtoul(s, NULL, 0);
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ ddr_print("N%d.LMC%d: Performing Read-Leveling\n", node, ddr_interface_num);
+
+ rlevel_ctl.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num));
+
+ rlevel_avg_loops = custom_lmc_config->rlevel_average_loops;
+ if (rlevel_avg_loops == 0) {
+ rlevel_avg_loops = RLEVEL_AVG_LOOPS_DEFAULT;
+ if ((dimm_count == 1) || (num_ranks == 1)) // up the samples for these cases
+ rlevel_avg_loops = rlevel_avg_loops * 2 + 1;
+ }
+
+ ddr_rlevel_compute = custom_lmc_config->rlevel_compute;
+ rlevel_ctl.s.offset_en = custom_lmc_config->offset_en;
+ rlevel_ctl.s.offset = spd_rdimm
+ ? custom_lmc_config->offset_rdimm
+ : custom_lmc_config->offset_udimm;
+
+ rlevel_ctl.s.delay_unload_0 = 1; /* should normally be set */
+ rlevel_ctl.s.delay_unload_1 = 1; /* should normally be set */
+ rlevel_ctl.s.delay_unload_2 = 1; /* should normally be set */
+ rlevel_ctl.s.delay_unload_3 = 1; /* should normally be set */
+
+ rlevel_ctl.s.or_dis = 1; // default to get best bitmasks
+ if ((s = lookup_env_parameter("ddr_rlevel_or_dis")) != NULL) {
+ rlevel_ctl.s.or_dis = !!strtoul(s, NULL, 0);
+ }
+ rlevel_ctl.s.bitmask = 0xff; // should work in 32b mode also
+ if ((s = lookup_env_parameter("ddr_rlevel_ctl_bitmask")) != NULL) {
+ rlevel_ctl.s.bitmask = strtoul(s, NULL, 0);
+ }
+ debug_print("N%d.LMC%d: RLEVEL_CTL: or_dis=%d, bitmask=0x%02x\n",
+ node, ddr_interface_num,
+ rlevel_ctl.s.or_dis, rlevel_ctl.s.bitmask);
+
+ rlevel_comp_offset = spd_rdimm
+ ? custom_lmc_config->rlevel_comp_offset_rdimm
+ : custom_lmc_config->rlevel_comp_offset_udimm;
+
+ if ((s = lookup_env_parameter("ddr_rlevel_offset")) != NULL) {
+ rlevel_ctl.s.offset = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_rlevel_offset_en")) != NULL) {
+ rlevel_ctl.s.offset_en = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_rlevel_ctl")) != NULL) {
+ rlevel_ctl.u = strtoul(s, NULL, 0);
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num), rlevel_ctl.u);
+
+ if (bdk_is_platform(BDK_PLATFORM_ASIM))
+ rlevel_debug_loops = 0;
+
+ if ((s = lookup_env_parameter("ddr%d_rlevel_debug_loops", ddr_interface_num)) != NULL) {
+ rlevel_debug_loops = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_rtt_nom_auto")) != NULL) {
+ ddr_rtt_nom_auto = !!strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_rlevel_average")) != NULL) {
+ rlevel_avg_loops = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_rlevel_compute")) != NULL) {
+ ddr_rlevel_compute = strtoul(s, NULL, 0);
+ }
+
+ ddr_print("RLEVEL_CTL : 0x%016lx\n", rlevel_ctl.u);
+ ddr_print("RLEVEL_OFFSET : %6d\n", rlevel_ctl.s.offset);
+ ddr_print("RLEVEL_OFFSET_EN : %6d\n", rlevel_ctl.s.offset_en);
+
+ /* The purpose for the indexed table is to sort the settings
+ ** by the ohm value to simplify the testing when incrementing
+ ** through the settings. (index => ohms) 1=120, 2=60, 3=40,
+ ** 4=30, 5=20 */
+ min_rtt_nom_idx = (custom_lmc_config->min_rtt_nom_idx == 0) ? 1 : custom_lmc_config->min_rtt_nom_idx;
+ max_rtt_nom_idx = (custom_lmc_config->max_rtt_nom_idx == 0) ? 5 : custom_lmc_config->max_rtt_nom_idx;
+
+ min_rodt_ctl = (custom_lmc_config->min_rodt_ctl == 0) ? 1 : custom_lmc_config->min_rodt_ctl;
+ max_rodt_ctl = (custom_lmc_config->max_rodt_ctl == 0) ? 5 : custom_lmc_config->max_rodt_ctl;
+
+ if ((s = lookup_env_parameter("ddr_min_rodt_ctl")) != NULL) {
+ min_rodt_ctl = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_max_rodt_ctl")) != NULL) {
+ max_rodt_ctl = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_min_rtt_nom_idx")) != NULL) {
+ min_rtt_nom_idx = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_max_rtt_nom_idx")) != NULL) {
+ max_rtt_nom_idx = strtoul(s, NULL, 0);
+ }
+
+#ifdef ENABLE_CUSTOM_RLEVEL_TABLE
+ if (custom_lmc_config->rlevel_table != NULL) {
+ char part_number[21];
+ /* Check for hard-coded read-leveling settings */
+ get_dimm_part_number(part_number, node, &dimm_config_table[0], 0, ddr_type);
+ for (rankx = 0; rankx < dimm_count * 4;rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+
+ i = 0;
+ while (custom_lmc_config->rlevel_table[i].part != NULL) {
+ debug_print("DIMM part number:\"%s\", SPD: \"%s\"\n", custom_lmc_config->rlevel_table[i].part, part_number);
+ if ((strcmp(part_number, custom_lmc_config->rlevel_table[i].part) == 0)
+ && (_abs(custom_lmc_config->rlevel_table[i].speed - 2*ddr_hertz/(1000*1000)) < 10 ))
+ {
+ ddr_print("Using hard-coded read leveling for DIMM part number: \"%s\"\n", part_number);
+ lmc_rlevel_rank.u = custom_lmc_config->rlevel_table[i].rlevel_rank[ddr_interface_num][rankx];
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ display_RL(node, ddr_interface_num, lmc_rlevel_rank, rankx);
+ rlevel_debug_loops = 0; /* Disable h/w read-leveling */
+ break;
+ }
+ ++i;
+ }
+ }
+ }
+#endif /* ENABLE_CUSTOM_RLEVEL_TABLE */
+
+ while(rlevel_debug_loops--) {
+ /* Initialize the error scoreboard */
+ memset(rlevel_scoreboard, 0, sizeof(rlevel_scoreboard));
+
+ if ((s = lookup_env_parameter("ddr_rlevel_comp_offset")) != NULL) {
+ rlevel_comp_offset = strtoul(s, NULL, 0);
+ }
+
+ disable_sequential_delay_check = custom_lmc_config->disable_sequential_delay_check;
+
+ if ((s = lookup_env_parameter("ddr_disable_sequential_delay_check")) != NULL) {
+ disable_sequential_delay_check = strtoul(s, NULL, 0);
+ }
+
+ maximum_adjacent_rlevel_delay_increment = custom_lmc_config->maximum_adjacent_rlevel_delay_increment;
+
+ if ((s = lookup_env_parameter("ddr_maximum_adjacent_rlevel_delay_increment")) != NULL) {
+ maximum_adjacent_rlevel_delay_increment = strtoul(s, NULL, 0);
+ }
+
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ saved_ddr__ptune = lmc_comp_ctl2.s.ddr__ptune;
+ saved_ddr__ntune = lmc_comp_ctl2.s.ddr__ntune;
+
+ /* Disable dynamic compensation settings */
+ if (rlevel_comp_offset != 0) {
+ lmc_comp_ctl2.s.ptune = saved_ddr__ptune;
+ lmc_comp_ctl2.s.ntune = saved_ddr__ntune;
+
+ /* Round up the ptune calculation to bias the odd cases toward ptune */
+ lmc_comp_ctl2.s.ptune += divide_roundup(rlevel_comp_offset, 2);
+ lmc_comp_ctl2.s.ntune -= rlevel_comp_offset/2;
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ saved_int_zqcs_dis = lmc_control.s.int_zqcs_dis;
+ lmc_control.s.int_zqcs_dis = 1; /* Disable ZQCS while in bypass. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ lmc_comp_ctl2.s.byp = 1; /* Enable bypass mode */
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
+ ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
+ lmc_comp_ctl2.s.ddr__ptune, lmc_comp_ctl2.s.ddr__ntune);
+ }
+
+ lmc_modereg_params1.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num));
+
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+
+ /* When the read ODT mask is zero the dyn_rtt_nom_mask is
+ zero than RTT_NOM will not be changing during
+ read-leveling. Since the value is fixed we only need
+ to test it once. */
+ if (dyn_rtt_nom_mask == 0) {
+ print_nom_ohms = -1; // flag not to print NOM ohms
+ if (rtt_idx != min_rtt_nom_idx)
+ continue;
+ } else {
+ if (dyn_rtt_nom_mask & 1) lmc_modereg_params1.s.rtt_nom_00 = rtt_nom;
+ if (dyn_rtt_nom_mask & 2) lmc_modereg_params1.s.rtt_nom_01 = rtt_nom;
+ if (dyn_rtt_nom_mask & 4) lmc_modereg_params1.s.rtt_nom_10 = rtt_nom;
+ if (dyn_rtt_nom_mask & 8) lmc_modereg_params1.s.rtt_nom_11 = rtt_nom;
+ // FIXME? rank 0 ohms always for the printout?
+ print_nom_ohms = imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_00];
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num), lmc_modereg_params1.u);
+ VB_PRT(VBL_TME, "\n");
+ VB_PRT(VBL_TME, "RTT_NOM %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_11],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_10],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_01],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_00],
+ lmc_modereg_params1.s.rtt_nom_11,
+ lmc_modereg_params1.s.rtt_nom_10,
+ lmc_modereg_params1.s.rtt_nom_01,
+ lmc_modereg_params1.s.rtt_nom_00);
+
+ perform_ddr_init_sequence(node, rank_mask, ddr_interface_num);
+
+ // Try RANK outside RODT to rearrange the output...
+ for (rankx = 0; rankx < dimm_count * 4; rankx++) {
+ int byte_idx;
+ rlevel_byte_data_t rlevel_byte[9];
+ int average_loops;
+ int rlevel_rank_errors, rlevel_bitmask_errors, rlevel_nonseq_errors;
+ rlevel_bitmask_t rlevel_bitmask[9];
+#if PICK_BEST_RANK_SCORE_NOT_AVG
+ int rlevel_best_rank_score;
+#endif
+
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+#if PICK_BEST_RANK_SCORE_NOT_AVG
+ rlevel_best_rank_score = DEFAULT_BEST_RANK_SCORE;
+#endif
+ rlevel_rodt_errors = 0;
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ lmc_comp_ctl2.s.rodt_ctl = rodt_ctl;
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ bdk_wait_usec(1); /* Give it a little time to take affect */
+ VB_PRT(VBL_DEV, "Read ODT_CTL : 0x%x (%d ohms)\n",
+ lmc_comp_ctl2.s.rodt_ctl, imp_values->rodt_ohms[lmc_comp_ctl2.s.rodt_ctl]);
+
+ memset(rlevel_byte, 0, sizeof(rlevel_byte));
+
+ for (average_loops = 0; average_loops < rlevel_avg_loops; average_loops++) {
+ rlevel_bitmask_errors = 0;
+
+ if (! (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM))) {
+ /* Clear read-level delays */
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
+
+ /* read-leveling */
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1);
+
+ if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
+ BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for RLEVEL\n");
+ }
+ }
+
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+
+ { // start bitmask interpretation block
+ int redoing_nonseq_errs = 0;
+
+ memset(rlevel_bitmask, 0, sizeof(rlevel_bitmask));
+
+ if (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM)) {
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank_aside;
+ bdk_lmcx_modereg_params0_t lmc_modereg_params0;
+
+ /* A-side */
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+ lmc_modereg_params0.s.mprloc = 0; /* MPR Page 0 Location 0 */
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+
+ /* Clear read-level delays */
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
+
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1); /* read-leveling */
+
+ if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
+ BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for RLEVEL\n");
+
+ }
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+
+ lmc_rlevel_rank_aside.u = lmc_rlevel_rank.u;
+
+ rlevel_bitmask[0].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 0);
+ rlevel_bitmask[1].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 1);
+ rlevel_bitmask[2].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 2);
+ rlevel_bitmask[3].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 3);
+ rlevel_bitmask[8].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 8);
+ /* A-side complete */
+
+
+ /* B-side */
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+ lmc_modereg_params0.s.mprloc = 3; /* MPR Page 0 Location 3 */
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+
+ /* Clear read-level delays */
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
+
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1); /* read-leveling */
+
+ if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
+ BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for RLEVEL\n");
+ }
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+
+ rlevel_bitmask[4].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 4);
+ rlevel_bitmask[5].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 5);
+ rlevel_bitmask[6].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 6);
+ rlevel_bitmask[7].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 7);
+ /* B-side complete */
+
+
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 0, lmc_rlevel_rank_aside.cn83xx.byte0);
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 1, lmc_rlevel_rank_aside.cn83xx.byte1);
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 2, lmc_rlevel_rank_aside.cn83xx.byte2);
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 3, lmc_rlevel_rank_aside.cn83xx.byte3);
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 8, lmc_rlevel_rank_aside.cn83xx.byte8); /* ECC A-side */
+
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+ lmc_modereg_params0.s.mprloc = 0; /* MPR Page 0 Location 0 */
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+
+ } /* if (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM)) */
+
+ /*
+ * Evaluate the quality of the read-leveling delays from the bitmasks.
+ * Also save off a software computed read-leveling mask that may be
+ * used later to qualify the delay results from Octeon.
+ */
+ for (byte_idx = 0; byte_idx < (8+ecc_ena); ++byte_idx) {
+ int bmerr;
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ if (! (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM))) {
+ rlevel_bitmask[byte_idx].bm =
+ octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, byte_idx);
+ }
+ bmerr = validate_ddr3_rlevel_bitmask(&rlevel_bitmask[byte_idx], ddr_type);
+ rlevel_bitmask[byte_idx].errs = bmerr;
+ rlevel_bitmask_errors += bmerr;
+#if PERFECT_BITMASK_COUNTING
+ if ((ddr_type == DDR4_DRAM) && !bmerr) { // count only the "perfect" bitmasks
+ // FIXME: could optimize this a bit?
+ int delay = get_rlevel_rank_struct(&lmc_rlevel_rank, byte_idx);
+ rank_perfect_counts[rankx].count[byte_idx][delay] += 1;
+ rank_perfect_counts[rankx].total[byte_idx] += 1;
+ }
+#endif /* PERFECT_BITMASK_COUNTING */
+ }
+
+ /* Set delays for unused bytes to match byte 0. */
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ if (ddr_interface_bytemask & (1 << byte_idx))
+ continue;
+ update_rlevel_rank_struct(&lmc_rlevel_rank, byte_idx, lmc_rlevel_rank.cn83xx.byte0);
+ }
+
+ /* Save a copy of the byte delays in physical
+ order for sequential evaluation. */
+ unpack_rlevel_settings(ddr_interface_bytemask, ecc_ena, rlevel_byte, lmc_rlevel_rank);
+ redo_nonseq_errs:
+
+ rlevel_nonseq_errors = 0;
+
+ if (! disable_sequential_delay_check) {
+ if ((ddr_interface_bytemask & 0xff) == 0xff) {
+ /* Evaluate delay sequence across the whole range of bytes for standard dimms. */
+ if ((spd_dimm_type == 1) || (spd_dimm_type == 5)) { /* 1=RDIMM, 5=Mini-RDIMM */
+ int register_adjacent_delay = _abs(rlevel_byte[4].delay - rlevel_byte[5].delay);
+ /* Registered dimm topology routes from the center. */
+ rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 3+ecc_ena,
+ maximum_adjacent_rlevel_delay_increment);
+ rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 5, 7+ecc_ena,
+ maximum_adjacent_rlevel_delay_increment);
+ // byte 5 sqerrs never gets cleared for RDIMMs
+ rlevel_byte[5].sqerrs = 0;
+ if (register_adjacent_delay > 1) {
+ /* Assess proximity of bytes on opposite sides of register */
+ rlevel_nonseq_errors += (register_adjacent_delay-1) * RLEVEL_ADJACENT_DELAY_ERROR;
+ // update byte 5 error
+ rlevel_byte[5].sqerrs += (register_adjacent_delay-1) * RLEVEL_ADJACENT_DELAY_ERROR;
+ }
+ }
+ if ((spd_dimm_type == 2) || (spd_dimm_type == 6)) { /* 2=UDIMM, 6=Mini-UDIMM */
+ /* Unbuffered dimm topology routes from end to end. */
+ rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 7+ecc_ena,
+ maximum_adjacent_rlevel_delay_increment);
+ }
+ } else {
+ rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 3+ecc_ena,
+ maximum_adjacent_rlevel_delay_increment);
+ }
+ } /* if (! disable_sequential_delay_check) */
+
+#if 0
+ // FIXME FIXME: disabled for now, it was too much...
+
+ // Calculate total errors for the rank:
+ // we do NOT add nonsequential errors if mini-[RU]DIMM or x16;
+ // mini-DIMMs and x16 devices have unusual sequence geometries.
+ // Make the final scores for them depend only on the bitmasks...
+ rlevel_rank_errors = rlevel_bitmask_errors;
+ if ((spd_dimm_type != 5) &&
+ (spd_dimm_type != 6) &&
+ (dram_width != 16))
+ {
+ rlevel_rank_errors += rlevel_nonseq_errors;
+ }
+#else
+ rlevel_rank_errors = rlevel_bitmask_errors + rlevel_nonseq_errors;
+#endif
+
+ // print original sample here only if we are not really averaging or picking best
+ // also do not print if we were redoing the NONSEQ score for using COMPUTED
+ if (!redoing_nonseq_errs && ((rlevel_avg_loops < 2) || dram_is_verbose(VBL_DEV2))) {
+ display_RL_BM(node, ddr_interface_num, rankx, rlevel_bitmask, ecc_ena);
+ display_RL_BM_scores(node, ddr_interface_num, rankx, rlevel_bitmask, ecc_ena);
+ display_RL_SEQ_scores(node, ddr_interface_num, rankx, rlevel_byte, ecc_ena);
+ display_RL_with_score(node, ddr_interface_num, lmc_rlevel_rank, rankx, rlevel_rank_errors);
+ }
+
+ if (ddr_rlevel_compute) {
+ if (!redoing_nonseq_errs) {
+ /* Recompute the delays based on the bitmask */
+ for (byte_idx = 0; byte_idx < (8+ecc_ena); ++byte_idx) {
+ if (!(ddr_interface_bytemask & (1 << byte_idx)))
+ continue;
+ update_rlevel_rank_struct(&lmc_rlevel_rank, byte_idx,
+ compute_ddr3_rlevel_delay(rlevel_bitmask[byte_idx].mstart,
+ rlevel_bitmask[byte_idx].width,
+ rlevel_ctl));
+ }
+
+ /* Override the copy of byte delays with the computed results. */
+ unpack_rlevel_settings(ddr_interface_bytemask, ecc_ena, rlevel_byte, lmc_rlevel_rank);
+
+ redoing_nonseq_errs = 1;
+ goto redo_nonseq_errs;
+
+ } else {
+ /* now print this if already printed the original sample */
+ if ((rlevel_avg_loops < 2) || dram_is_verbose(VBL_DEV2)) {
+ display_RL_with_computed(node, ddr_interface_num,
+ lmc_rlevel_rank, rankx,
+ rlevel_rank_errors);
+ }
+ }
+ } /* if (ddr_rlevel_compute) */
+
+ } // end bitmask interpretation block
+
+#if PICK_BEST_RANK_SCORE_NOT_AVG
+
+ // if it is a better (lower) score, then keep it
+ if (rlevel_rank_errors < rlevel_best_rank_score) {
+ rlevel_best_rank_score = rlevel_rank_errors;
+
+ // save the new best delays and best errors
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ rlevel_byte[byte_idx].best = rlevel_byte[byte_idx].delay;
+ rlevel_byte[byte_idx].bestsq = rlevel_byte[byte_idx].sqerrs;
+ // save bitmasks and their scores as well
+ // xlate UNPACKED index to PACKED index to get from rlevel_bitmask
+ rlevel_byte[byte_idx].bm = rlevel_bitmask[XUP(byte_idx, !!ecc_ena)].bm;
+ rlevel_byte[byte_idx].bmerrs = rlevel_bitmask[XUP(byte_idx, !!ecc_ena)].errs;
+ }
+ }
+#else /* PICK_BEST_RANK_SCORE_NOT_AVG */
+
+ /* Accumulate the total score across averaging loops for this setting */
+ debug_print("rlevel_scoreboard[rtt_nom=%d][rodt_ctl=%d][rankx=%d].score: %d [%d]\n",
+ rtt_nom, rodt_ctl, rankx, rlevel_rank_errors, average_loops);
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score += rlevel_rank_errors;
+
+ /* Accumulate the delay totals and loop counts
+ necessary to compute average delay results */
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ if (rlevel_byte[byte_idx].delay != 0) { /* Don't include delay=0 in the average */
+ ++rlevel_byte[byte_idx].loop_count;
+ rlevel_byte[byte_idx].loop_total += rlevel_byte[byte_idx].delay;
+ }
+ } /* for (byte_idx = 0; byte_idx < 9; ++byte_idx) */
+#endif /* PICK_BEST_RANK_SCORE_NOT_AVG */
+
+ rlevel_rodt_errors += rlevel_rank_errors;
+
+ } /* for (average_loops = 0; average_loops < rlevel_avg_loops; average_loops++) */
+
+#if PICK_BEST_RANK_SCORE_NOT_AVG
+
+ /* We recorded the best score across the averaging loops */
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score = rlevel_best_rank_score;
+
+ /* Restore the delays from the best fields that go with the best score */
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ rlevel_byte[byte_idx].delay = rlevel_byte[byte_idx].best;
+ rlevel_byte[byte_idx].sqerrs = rlevel_byte[byte_idx].bestsq;
+ }
+#else /* PICK_BEST_RANK_SCORE_NOT_AVG */
+
+ /* Compute the average score across averaging loops */
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score =
+ divide_nint(rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score, rlevel_avg_loops);
+
+ /* Compute the average delay results */
+ for (byte_idx=0; byte_idx < 9; ++byte_idx) {
+ if (rlevel_byte[byte_idx].loop_count == 0)
+ rlevel_byte[byte_idx].loop_count = 1;
+ rlevel_byte[byte_idx].delay = divide_nint(rlevel_byte[byte_idx].loop_total,
+ rlevel_byte[byte_idx].loop_count);
+ }
+#endif /* PICK_BEST_RANK_SCORE_NOT_AVG */
+
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+
+ pack_rlevel_settings(ddr_interface_bytemask, ecc_ena, rlevel_byte, &lmc_rlevel_rank);
+
+ if (rlevel_avg_loops > 1) {
+#if PICK_BEST_RANK_SCORE_NOT_AVG
+ // restore the "best" bitmasks and their scores for printing
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ if ((ddr_interface_bytemask & (1 << byte_idx)) == 0)
+ continue;
+ // xlate PACKED index to UNPACKED index to get from rlevel_byte
+ rlevel_bitmask[byte_idx].bm = rlevel_byte[XPU(byte_idx, !!ecc_ena)].bm;
+ rlevel_bitmask[byte_idx].errs = rlevel_byte[XPU(byte_idx, !!ecc_ena)].bmerrs;
+ }
+ // print bitmasks/scores here only for DEV // FIXME? lower VBL?
+ if (dram_is_verbose(VBL_DEV)) {
+ display_RL_BM(node, ddr_interface_num, rankx, rlevel_bitmask, ecc_ena);
+ display_RL_BM_scores(node, ddr_interface_num, rankx, rlevel_bitmask, ecc_ena);
+ display_RL_SEQ_scores(node, ddr_interface_num, rankx, rlevel_byte, ecc_ena);
+ }
+
+ display_RL_with_RODT(node, ddr_interface_num, lmc_rlevel_rank, rankx,
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score,
+ print_nom_ohms, imp_values->rodt_ohms[rodt_ctl],
+ WITH_RODT_BESTSCORE);
+
+#else /* PICK_BEST_RANK_SCORE_NOT_AVG */
+ display_RL_with_average(node, ddr_interface_num, lmc_rlevel_rank, rankx,
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score);
+#endif /* PICK_BEST_RANK_SCORE_NOT_AVG */
+
+ } /* if (rlevel_avg_loops > 1) */
+
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].setting = lmc_rlevel_rank.u;
+
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rankx = 0; rankx < dimm_count*4; rankx++) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<max_rtt_nom_idx; ++rtt_idx) */
+
+
+ /* Re-enable dynamic compensation settings. */
+ if (rlevel_comp_offset != 0) {
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+
+ lmc_comp_ctl2.s.ptune = 0;
+ lmc_comp_ctl2.s.ntune = 0;
+ lmc_comp_ctl2.s.byp = 0; /* Disable bypass mode */
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read once */
+
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
+ ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
+ lmc_comp_ctl2.s.ddr__ptune, lmc_comp_ctl2.s.ddr__ntune);
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ lmc_control.s.int_zqcs_dis = saved_int_zqcs_dis; /* Restore original setting */
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ }
+
+
+ {
+ int override_compensation = 0;
+ if ((s = lookup_env_parameter("ddr__ptune")) != NULL) {
+ saved_ddr__ptune = strtoul(s, NULL, 0);
+ override_compensation = 1;
+ }
+ if ((s = lookup_env_parameter("ddr__ntune")) != NULL) {
+ saved_ddr__ntune = strtoul(s, NULL, 0);
+ override_compensation = 1;
+ }
+ if (override_compensation) {
+ lmc_comp_ctl2.s.ptune = saved_ddr__ptune;
+ lmc_comp_ctl2.s.ntune = saved_ddr__ntune;
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ saved_int_zqcs_dis = lmc_control.s.int_zqcs_dis;
+ lmc_control.s.int_zqcs_dis = 1; /* Disable ZQCS while in bypass. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ lmc_comp_ctl2.s.byp = 1; /* Enable bypass mode */
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
+
+ ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
+ lmc_comp_ctl2.s.ptune, lmc_comp_ctl2.s.ntune);
+ }
+ }
+ { /* Evaluation block */
+ int best_rodt_score = DEFAULT_BEST_RANK_SCORE; /* Start with an arbitrarily high score */
+ int auto_rodt_ctl = 0;
+ int auto_rtt_nom = 0;
+ int rodt_score;
+ int rodt_row_skip_mask = 0;
+
+ // just add specific RODT rows to the skip mask for DDR4 at this time...
+ if (ddr_type == DDR4_DRAM) {
+ rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_34_ohm); // skip RODT row 34 ohms for all DDR4 types
+ rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_40_ohm); // skip RODT row 40 ohms for all DDR4 types
+#if ADD_48_OHM_SKIP
+ rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_48_ohm); // skip RODT row 48 ohms for all DDR4 types
+#endif /* ADD_48OHM_SKIP */
+#if NOSKIP_40_48_OHM
+ // For now, do not skip RODT row 40 or 48 ohm when ddr_hertz is above 1075 MHz
+ if (ddr_hertz > 1075000000) {
+ rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_40_ohm); // noskip RODT row 40 ohms
+ rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_48_ohm); // noskip RODT row 48 ohms
+ }
+#endif /* NOSKIP_40_48_OHM */
+#if NOSKIP_48_STACKED
+ // For now, do not skip RODT row 48 ohm for 2Rx4 stacked die DIMMs
+ if ((is_stacked_die) && (num_ranks == 2) && (dram_width == 4)) {
+ rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_48_ohm); // noskip RODT row 48 ohms
+ }
+#endif /* NOSKIP_48_STACKED */
+#if NOSKIP_FOR_MINI
+ // for now, leave all rows eligible when we have mini-DIMMs...
+ if ((spd_dimm_type == 5) || (spd_dimm_type == 6)) {
+ rodt_row_skip_mask = 0;
+ }
+#endif /* NOSKIP_FOR_MINI */
+#if NOSKIP_FOR_2S_1R
+ // for now, leave all rows eligible when we have a 2-slot 1-rank config
+ if ((dimm_count == 2) && (num_ranks == 1)) {
+ rodt_row_skip_mask = 0;
+ }
+#endif /* NOSKIP_FOR_2S_1R */
+ }
+
+ VB_PRT(VBL_DEV, "Evaluating Read-Leveling Scoreboard for AUTO settings.\n");
+ for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+
+ /* When the read ODT mask is zero the dyn_rtt_nom_mask is
+ zero than RTT_NOM will not be changing during
+ read-leveling. Since the value is fixed we only need
+ to test it once. */
+ if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
+ continue;
+
+ for (rodt_ctl=max_rodt_ctl; rodt_ctl>=min_rodt_ctl; --rodt_ctl) {
+ rodt_score = 0;
+ for (rankx = 0; rankx < dimm_count * 4;rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+ debug_print("rlevel_scoreboard[rtt_nom=%d][rodt_ctl=%d][rankx=%d].score:%d\n",
+ rtt_nom, rodt_ctl, rankx, rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score);
+ rodt_score += rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score;
+ }
+ // FIXME: do we need to skip RODT rows here, like we do below in the by-RANK settings?
+
+ /* When using automatic ODT settings use the ODT
+ settings associated with the best score for
+ all of the tested ODT combinations. */
+
+ if ((rodt_score < best_rodt_score) || // always take lower score, OR
+ ((rodt_score == best_rodt_score) && // take same score if RODT ohms are higher
+ (imp_values->rodt_ohms[rodt_ctl] > imp_values->rodt_ohms[auto_rodt_ctl])))
+ {
+ debug_print("AUTO: new best score for rodt:%d (%3d), new score:%d, previous score:%d\n",
+ rodt_ctl, imp_values->rodt_ohms[rodt_ctl], rodt_score, best_rodt_score);
+ best_rodt_score = rodt_score;
+ auto_rodt_ctl = rodt_ctl;
+ auto_rtt_nom = rtt_nom;
+ }
+ } /* for (rodt_ctl=max_rodt_ctl; rodt_ctl>=min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
+
+ lmc_modereg_params1.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num));
+
+ if (ddr_rtt_nom_auto) {
+ /* Store the automatically set RTT_NOM value */
+ if (dyn_rtt_nom_mask & 1) lmc_modereg_params1.s.rtt_nom_00 = auto_rtt_nom;
+ if (dyn_rtt_nom_mask & 2) lmc_modereg_params1.s.rtt_nom_01 = auto_rtt_nom;
+ if (dyn_rtt_nom_mask & 4) lmc_modereg_params1.s.rtt_nom_10 = auto_rtt_nom;
+ if (dyn_rtt_nom_mask & 8) lmc_modereg_params1.s.rtt_nom_11 = auto_rtt_nom;
+ } else {
+ /* restore the manual settings to the register */
+ lmc_modereg_params1.s.rtt_nom_00 = default_rtt_nom[0];
+ lmc_modereg_params1.s.rtt_nom_01 = default_rtt_nom[1];
+ lmc_modereg_params1.s.rtt_nom_10 = default_rtt_nom[2];
+ lmc_modereg_params1.s.rtt_nom_11 = default_rtt_nom[3];
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num), lmc_modereg_params1.u);
+ VB_PRT(VBL_DEV, "RTT_NOM %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_11],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_10],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_01],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_00],
+ lmc_modereg_params1.s.rtt_nom_11,
+ lmc_modereg_params1.s.rtt_nom_10,
+ lmc_modereg_params1.s.rtt_nom_01,
+ lmc_modereg_params1.s.rtt_nom_00);
+
+ VB_PRT(VBL_DEV, "RTT_WR %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 3)],
+ imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 2)],
+ imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 1)],
+ imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 0)],
+ EXTR_WR(lmc_modereg_params1.u, 3),
+ EXTR_WR(lmc_modereg_params1.u, 2),
+ EXTR_WR(lmc_modereg_params1.u, 1),
+ EXTR_WR(lmc_modereg_params1.u, 0));
+
+ VB_PRT(VBL_DEV, "DIC %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->dic_ohms[lmc_modereg_params1.s.dic_11],
+ imp_values->dic_ohms[lmc_modereg_params1.s.dic_10],
+ imp_values->dic_ohms[lmc_modereg_params1.s.dic_01],
+ imp_values->dic_ohms[lmc_modereg_params1.s.dic_00],
+ lmc_modereg_params1.s.dic_11,
+ lmc_modereg_params1.s.dic_10,
+ lmc_modereg_params1.s.dic_01,
+ lmc_modereg_params1.s.dic_00);
+
+ if (ddr_type == DDR4_DRAM) {
+ bdk_lmcx_modereg_params2_t lmc_modereg_params2;
+ /*
+ * We must read the CSR, and not depend on odt_config[odt_idx].odt_mask2,
+ * since we could have overridden values with envvars.
+ * NOTE: this corrects the printout, since the CSR is not written with the old values...
+ */
+ lmc_modereg_params2.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num));
+
+ VB_PRT(VBL_DEV, "RTT_PARK %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_11],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_10],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_01],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_00],
+ lmc_modereg_params2.s.rtt_park_11,
+ lmc_modereg_params2.s.rtt_park_10,
+ lmc_modereg_params2.s.rtt_park_01,
+ lmc_modereg_params2.s.rtt_park_00);
+
+ VB_PRT(VBL_DEV, "%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_RANGE",
+ lmc_modereg_params2.s.vref_range_11,
+ lmc_modereg_params2.s.vref_range_10,
+ lmc_modereg_params2.s.vref_range_01,
+ lmc_modereg_params2.s.vref_range_00);
+
+ VB_PRT(VBL_DEV, "%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_VALUE",
+ lmc_modereg_params2.s.vref_value_11,
+ lmc_modereg_params2.s.vref_value_10,
+ lmc_modereg_params2.s.vref_value_01,
+ lmc_modereg_params2.s.vref_value_00);
+ }
+
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ if (ddr_rodt_ctl_auto)
+ lmc_comp_ctl2.s.rodt_ctl = auto_rodt_ctl;
+ else
+ lmc_comp_ctl2.s.rodt_ctl = default_rodt_ctl; // back to the original setting
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ VB_PRT(VBL_DEV, "Read ODT_CTL : 0x%x (%d ohms)\n",
+ lmc_comp_ctl2.s.rodt_ctl, imp_values->rodt_ohms[lmc_comp_ctl2.s.rodt_ctl]);
+
+ ////////////////// this is the start of the RANK MAJOR LOOP
+
+ for (rankx = 0; rankx < dimm_count * 4; rankx++) {
+ int best_rank_score = DEFAULT_BEST_RANK_SCORE; /* Start with an arbitrarily high score */
+ int best_rank_rtt_nom = 0;
+ //int best_rank_nom_ohms = 0;
+ int best_rank_ctl = 0;
+ int best_rank_ohms = 0;
+ int best_rankx = 0;
+
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ /* Use the delays associated with the best score for each individual rank */
+ VB_PRT(VBL_TME, "Evaluating Read-Leveling Scoreboard for Rank %d settings.\n", rankx);
+
+ // some of the rank-related loops below need to operate only on the ranks of a single DIMM,
+ // so create a mask for their use here
+ int dimm_rank_mask;
+ if (num_ranks == 4)
+ dimm_rank_mask = rank_mask; // should be 1111
+ else {
+ dimm_rank_mask = rank_mask & 3; // should be 01 or 11
+ if (rankx >= 2)
+ dimm_rank_mask <<= 2; // doing a rank on the second DIMM, should be 0100 or 1100
+ }
+ debug_print("DIMM rank mask: 0x%x, rank mask: 0x%x, rankx: %d\n", dimm_rank_mask, rank_mask, rankx);
+
+ ////////////////// this is the start of the BEST ROW SCORE LOOP
+
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ //int rtt_nom_ohms;
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ //rtt_nom_ohms = imp_values->rtt_nom_ohms[rtt_nom];
+
+ /* When the read ODT mask is zero the dyn_rtt_nom_mask is
+ zero than RTT_NOM will not be changing during
+ read-leveling. Since the value is fixed we only need
+ to test it once. */
+ if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
+ continue;
+
+ debug_print("N%d.LMC%d.R%d: starting RTT_NOM %d (%d)\n",
+ node, ddr_interface_num, rankx, rtt_nom, rtt_nom_ohms);
+
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+ int next_ohms = imp_values->rodt_ohms[rodt_ctl];
+
+ // skip RODT rows in mask, but *NOT* rows with too high a score;
+ // we will not use the skipped ones for printing or evaluating, but
+ // we need to allow all the non-skipped ones to be candidates for "best"
+ if (((1 << rodt_ctl) & rodt_row_skip_mask) != 0) {
+ debug_print("N%d.LMC%d.R%d: SKIPPING rodt:%d (%d) with rank_score:%d\n",
+ node, ddr_interface_num, rankx, rodt_ctl, next_ohms, next_score);
+ continue;
+ }
+ for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // this is ROFFIX-0528
+ if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
+ continue;
+
+ int next_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
+
+ if (next_score > best_rank_score) // always skip a higher score
+ continue;
+ if (next_score == best_rank_score) { // if scores are equal
+ if (next_ohms < best_rank_ohms) // always skip lower ohms
+ continue;
+ if (next_ohms == best_rank_ohms) { // if same ohms
+ if (orankx != rankx) // always skip the other rank(s)
+ continue;
+ }
+ // else next_ohms are greater, always choose it
+ }
+ // else next_score is less than current best, so always choose it
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: new best score: rank %d, rodt %d(%3d), new best %d, previous best %d(%d)\n",
+ node, ddr_interface_num, rankx, orankx, rodt_ctl, next_ohms, next_score,
+ best_rank_score, best_rank_ohms);
+ best_rank_score = next_score;
+ best_rank_rtt_nom = rtt_nom;
+ //best_rank_nom_ohms = rtt_nom_ohms;
+ best_rank_ctl = rodt_ctl;
+ best_rank_ohms = next_ohms;
+ best_rankx = orankx;
+ lmc_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
+
+ } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) */
+
+ ////////////////// this is the end of the BEST ROW SCORE LOOP
+
+ // DANGER, Will Robinson!! Abort now if we did not find a best score at all...
+ if (best_rank_score == DEFAULT_BEST_RANK_SCORE) {
+ error_print("WARNING: no best rank score found for N%d.LMC%d.R%d - resetting node...\n",
+ node, ddr_interface_num, rankx);
+ bdk_wait_usec(500000);
+ bdk_reset_chip(node);
+ }
+
+ // FIXME: relative now, but still arbitrary...
+ // halve the range if 2 DIMMs unless they are single rank...
+ int MAX_RANK_SCORE = best_rank_score;
+ MAX_RANK_SCORE += (MAX_RANK_SCORE_LIMIT / ((num_ranks > 1) ? dimm_count : 1));
+
+ if (!ecc_ena){
+ lmc_rlevel_rank.cn83xx.byte8 = lmc_rlevel_rank.cn83xx.byte0; /* ECC is not used */
+ }
+
+ // at the end, write the best row settings to the current rank
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+
+ bdk_lmcx_rlevel_rankx_t saved_rlevel_rank;
+ saved_rlevel_rank.u = lmc_rlevel_rank.u;
+
+ ////////////////// this is the start of the PRINT LOOP
+
+ // for pass==0, print current rank, pass==1 print other rank(s)
+ // this is done because we want to show each ranks RODT values together, not interlaced
+#if COUNT_RL_CANDIDATES
+ // keep separates for ranks - pass=0 target rank, pass=1 other rank on DIMM
+ int mask_skipped[2] = {0,0};
+ int score_skipped[2] = {0,0};
+ int selected_rows[2] = {0,0};
+ int zero_scores[2] = {0,0};
+#endif /* COUNT_RL_CANDIDATES */
+ for (int pass = 0; pass < 2; pass++ ) {
+ for (int orankx = 0; orankx < dimm_count * 4; orankx++) {
+ if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
+ continue;
+
+ if (((pass == 0) && (orankx != rankx)) || ((pass != 0) && (orankx == rankx)))
+ continue;
+
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ if (dyn_rtt_nom_mask == 0) {
+ print_nom_ohms = -1;
+ if (rtt_idx != min_rtt_nom_idx)
+ continue;
+ } else {
+ print_nom_ohms = imp_values->rtt_nom_ohms[rtt_nom];
+ }
+
+ // cycle through all the RODT values...
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
+ int temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
+ temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
+
+ // skip RODT rows in mask, or rows with too high a score;
+ // we will not use them for printing or evaluating...
+#if COUNT_RL_CANDIDATES
+ int skip_row;
+ if ((1 << rodt_ctl) & rodt_row_skip_mask) {
+ skip_row = WITH_RODT_SKIPPING;
+ ++mask_skipped[pass];
+ } else if (temp_score > MAX_RANK_SCORE) {
+ skip_row = WITH_RODT_SKIPPING;
+ ++score_skipped[pass];
+ } else {
+ skip_row = WITH_RODT_BLANK;
+ ++selected_rows[pass];
+ if (temp_score == 0)
+ ++zero_scores[pass];
+ }
+
+#else /* COUNT_RL_CANDIDATES */
+ int skip_row = (((1 << rodt_ctl) & rodt_row_skip_mask) || (temp_score > MAX_RANK_SCORE))
+ ? WITH_RODT_SKIPPING: WITH_RODT_BLANK;
+#endif /* COUNT_RL_CANDIDATES */
+
+ // identify and print the BEST ROW when it comes up
+ if ((skip_row == WITH_RODT_BLANK) &&
+ (best_rankx == orankx) &&
+ (best_rank_rtt_nom == rtt_nom) &&
+ (best_rank_ctl == rodt_ctl))
+ {
+ skip_row = WITH_RODT_BESTROW;
+ }
+
+ display_RL_with_RODT(node, ddr_interface_num,
+ temp_rlevel_rank, orankx, temp_score,
+ print_nom_ohms,
+ imp_values->rodt_ohms[rodt_ctl],
+ skip_row);
+
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
+ } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) { */
+ } /* for (int pass = 0; pass < 2; pass++ ) */
+#if COUNT_RL_CANDIDATES
+ VB_PRT(VBL_TME, "N%d.LMC%d.R%d: RLROWS: selected %d+%d, zero_scores %d+%d, mask_skipped %d+%d, score_skipped %d+%d\n",
+ node, ddr_interface_num, rankx,
+ selected_rows[0], selected_rows[1],
+ zero_scores[0], zero_scores[1],
+ mask_skipped[0], mask_skipped[1],
+ score_skipped[0], score_skipped[1]);
+#endif /* COUNT_RL_CANDIDATES */
+
+ ////////////////// this is the end of the PRINT LOOP
+
+ // now evaluate which bytes need adjusting
+ uint64_t byte_msk = 0x3f; // 6-bit fields
+ uint64_t best_byte, new_byte, temp_byte, orig_best_byte;
+
+ uint64_t rank_best_bytes[9]; // collect the new byte values; first init with current best for neighbor use
+ for (int byte_idx = 0, byte_sh = 0; byte_idx < 8+ecc_ena; byte_idx++, byte_sh += 6) {
+ rank_best_bytes[byte_idx] = (lmc_rlevel_rank.u >> byte_sh) & byte_msk;
+ }
+
+ ////////////////// this is the start of the BEST BYTE LOOP
+
+ for (int byte_idx = 0, byte_sh = 0; byte_idx < 8+ecc_ena; byte_idx++, byte_sh += 6) {
+ best_byte = orig_best_byte = rank_best_bytes[byte_idx];
+
+ ////////////////// this is the start of the BEST BYTE AVERAGING LOOP
+
+ // validate the initial "best" byte by looking at the average of the unskipped byte-column entries
+ // we want to do this before we go further, so we can try to start with a better initial value
+ // this is the so-called "BESTBUY" patch set
+ int sum = 0, count = 0;
+
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
+ continue;
+
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
+ int temp_score;
+ for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // average over all the ranks
+ if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
+ continue;
+ temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
+ // skip RODT rows in mask, or rows with too high a score;
+ // we will not use them for printing or evaluating...
+
+ if (!((1 << rodt_ctl) & rodt_row_skip_mask) &&
+ (temp_score <= MAX_RANK_SCORE))
+ {
+ temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
+ temp_byte = (temp_rlevel_rank.u >> byte_sh) & byte_msk;
+ sum += temp_byte;
+ count++;
+ }
+ } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
+
+ ////////////////// this is the end of the BEST BYTE AVERAGING LOOP
+
+
+ uint64_t avg_byte = divide_nint(sum, count); // FIXME: validate count and sum??
+ int avg_diff = (int)best_byte - (int)avg_byte;
+ new_byte = best_byte;
+ if (avg_diff != 0) {
+ // bump best up/dn by 1, not necessarily all the way to avg
+ new_byte = best_byte + ((avg_diff > 0) ? -1: 1);
+ }
+
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: START: Byte %d: best %d is different by %d from average %d, using %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)best_byte, avg_diff, (int)avg_byte, (int)new_byte);
+ best_byte = new_byte;
+
+ // At this point best_byte is either:
+ // 1. the original byte-column value from the best scoring RODT row, OR
+ // 2. that value bumped toward the average of all the byte-column values
+ //
+ // best_byte will not change from here on...
+
+ ////////////////// this is the start of the BEST BYTE COUNTING LOOP
+
+ // NOTE: we do this next loop separately from above, because we count relative to "best_byte"
+ // which may have been modified by the above averaging operation...
+ //
+ // Also, the above only moves toward the average by +- 1, so that we will always have a count
+ // of at least 1 for the original best byte, even if all the others are further away and not counted;
+ // this ensures we will go back to the original if no others are counted...
+ // FIXME: this could cause issue if the range of values for a byte-lane are too disparate...
+ int count_less = 0, count_same = 0, count_more = 0;
+#if FAILSAFE_CHECK
+ uint64_t count_byte = new_byte; // save the value we will count around
+#endif /* FAILSAFE_CHECK */
+#if RANK_MAJORITY
+ int rank_less = 0, rank_same = 0, rank_more = 0;
+#endif /* RANK_MAJORITY */
+
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
+ continue;
+
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
+ int temp_score;
+ for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // count over all the ranks
+ if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
+ continue;
+ temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
+ // skip RODT rows in mask, or rows with too high a score;
+ // we will not use them for printing or evaluating...
+ if (((1 << rodt_ctl) & rodt_row_skip_mask) ||
+ (temp_score > MAX_RANK_SCORE))
+ {
+ continue;
+ }
+ temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
+ temp_byte = (temp_rlevel_rank.u >> byte_sh) & byte_msk;
+
+ if (temp_byte == 0) // do not count it if illegal
+ continue;
+ else if (temp_byte == best_byte)
+ count_same++;
+ else if (temp_byte == best_byte - 1)
+ count_less++;
+ else if (temp_byte == best_byte + 1)
+ count_more++;
+ // else do not count anything more than 1 away from the best
+#if RANK_MAJORITY
+ // FIXME? count is relative to best_byte; should it be rank-based?
+ if (orankx != rankx) // rank counts only on main rank
+ continue;
+ else if (temp_byte == best_byte)
+ rank_same++;
+ else if (temp_byte == best_byte - 1)
+ rank_less++;
+ else if (temp_byte == best_byte + 1)
+ rank_more++;
+#endif /* RANK_MAJORITY */
+ } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
+
+#if RANK_MAJORITY
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: COUNT: Byte %d: orig %d now %d, more %d same %d less %d (%d/%d/%d)\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)orig_best_byte, (int)best_byte,
+ count_more, count_same, count_less,
+ rank_more, rank_same, rank_less);
+#else /* RANK_MAJORITY */
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: COUNT: Byte %d: orig %d now %d, more %d same %d less %d\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)orig_best_byte, (int)best_byte,
+ count_more, count_same, count_less);
+#endif /* RANK_MAJORITY */
+ ////////////////// this is the end of the BEST BYTE COUNTING LOOP
+
+ // choose the new byte value
+ // we need to check that there is no gap greater than 2 between adjacent bytes
+ // (adjacency depends on DIMM type)
+ // use the neighbor value to help decide
+ // initially, the rank_best_bytes[] will contain values from the chosen lowest score rank
+ new_byte = 0;
+
+ // neighbor is index-1 unless we are index 0 or index 8 (ECC)
+ int neighbor = (byte_idx == 8) ? 3 : ((byte_idx == 0) ? 1 : byte_idx - 1);
+ uint64_t neigh_byte = rank_best_bytes[neighbor];
+
+
+ // can go up or down or stay the same, so look at a numeric average to help
+ new_byte = divide_nint(((count_more * (best_byte + 1)) +
+ (count_same * (best_byte + 0)) +
+ (count_less * (best_byte - 1))),
+ max(1, (count_more + count_same + count_less)));
+
+ // use neighbor to help choose with average
+ if ((byte_idx > 0) && (_abs(neigh_byte - new_byte) > 2)) // but not for byte 0
+ {
+ uint64_t avg_pick = new_byte;
+ if ((new_byte - best_byte) != 0)
+ new_byte = best_byte; // back to best, average did not get better
+ else // avg was the same, still too far, now move it towards the neighbor
+ new_byte += (neigh_byte > new_byte) ? 1 : -1;
+
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: neighbor %d too different %d from average %d, picking %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, neighbor, (int)neigh_byte, (int)avg_pick, (int)new_byte);
+ }
+#if MAJORITY_OVER_AVG
+ // NOTE:
+ // For now, we let the neighbor processing above trump the new simple majority processing here.
+ // This is mostly because we have seen no smoking gun for a neighbor bad choice (yet?).
+ // Also note that we will ALWAYS be using byte 0 majority, because of the if clause above.
+ else {
+ // majority is dependent on the counts, which are relative to best_byte, so start there
+ uint64_t maj_byte = best_byte;
+ if ((count_more > count_same) && (count_more > count_less)) {
+ maj_byte++;
+ } else if ((count_less > count_same) && (count_less > count_more)) {
+ maj_byte--;
+ }
+ if (maj_byte != new_byte) {
+ // print only when majority choice is different from average
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: MAJORTY: Byte %d: picking majority of %d over average %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)maj_byte, (int)new_byte);
+ new_byte = maj_byte;
+ } else {
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: picking average of %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)new_byte);
+ }
+#if RANK_MAJORITY
+ // rank majority is dependent on the rank counts, which are relative to best_byte,
+ // so start there, and adjust according to the rank counts majority
+ uint64_t rank_maj = best_byte;
+ if ((rank_more > rank_same) && (rank_more > rank_less)) {
+ rank_maj++;
+ } else if ((rank_less > rank_same) && (rank_less > rank_more)) {
+ rank_maj--;
+ }
+ int rank_sum = rank_more + rank_same + rank_less;
+
+ // now, let rank majority possibly rule over the current new_byte however we got it
+ if (rank_maj != new_byte) { // only if different
+ // Here is where we decide whether to completely apply RANK_MAJORITY or not
+ // FIXME: For the moment, we do it ONLY when running 2-slot configs
+ // FIXME? or when rank_sum is big enough?
+ if ((dimm_count > 1) || (rank_sum > 2)) {
+ // print only when rank majority choice is selected
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: RANKMAJ: Byte %d: picking %d over %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)rank_maj, (int)new_byte);
+ new_byte = rank_maj;
+ } else { // FIXME: print some info when we could have chosen RANKMAJ but did not
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: RANKMAJ: Byte %d: NOT using %d over %d (best=%d,sum=%d).\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)rank_maj, (int)new_byte,
+ (int)best_byte, rank_sum);
+ }
+ }
+#endif /* RANK_MAJORITY */
+ }
+#else
+ else {
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: picking average of %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)new_byte);
+ }
+#endif
+#if FAILSAFE_CHECK
+ // one last check:
+ // if new_byte is still count_byte, BUT there was no count for that value, DO SOMETHING!!!
+ // FIXME: go back to original best byte from the best row
+ if ((new_byte == count_byte) && (count_same == 0)) {
+ new_byte = orig_best_byte;
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: FAILSAF: Byte %d: going back to original %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)new_byte);
+ }
+#endif /* FAILSAFE_CHECK */
+#if PERFECT_BITMASK_COUNTING
+ // Look at counts for "perfect" bitmasks if we had any for this byte-lane.
+ // Remember, we only counted for DDR4, so zero means none or DDR3, and we bypass this...
+ if (rank_perfect_counts[rankx].total[byte_idx] > 0) {
+ // FIXME: should be more error checking, look for ties, etc...
+ int i, delay_count, delay_value, delay_max;
+ uint32_t ties;
+ delay_value = -1;
+ delay_max = 0;
+ ties = 0;
+
+ for (i = 0; i < 32; i++) {
+ delay_count = rank_perfect_counts[rankx].count[byte_idx][i];
+ if (delay_count > 0) { // only look closer if there are any,,,
+ if (delay_count > delay_max) {
+ delay_max = delay_count;
+ delay_value = i;
+ ties = 0; // reset ties to none
+ } else if (delay_count == delay_max) {
+ if (ties == 0)
+ ties = 1UL << delay_value; // put in original value
+ ties |= 1UL << i; // add new value
+ }
+ }
+ } /* for (i = 0; i < 32; i++) */
+
+ if (delay_value >= 0) {
+ if (ties != 0) {
+ if (ties & (1UL << (int)new_byte)) {
+ // leave choice as new_byte if any tied one is the same...
+
+
+ delay_value = (int)new_byte;
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: PERFECT: Byte %d: TIES (0x%x) INCLUDED %d (%d)\n",
+ node, ddr_interface_num, rankx, byte_idx, ties, (int)new_byte, delay_max);
+ } else {
+ // FIXME: should choose a perfect one!!!
+ // FIXME: for now, leave the choice as new_byte
+ delay_value = (int)new_byte;
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: PERFECT: Byte %d: TIES (0x%x) OMITTED %d (%d)\n",
+ node, ddr_interface_num, rankx, byte_idx, ties, (int)new_byte, delay_max);
+ }
+ } /* if (ties != 0) */
+
+ if (delay_value != (int)new_byte) {
+ delay_count = rank_perfect_counts[rankx].count[byte_idx][(int)new_byte];
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: PERFECT: Byte %d: DIFF from %d (%d), USING %d (%d)\n",
+ node, ddr_interface_num, rankx, byte_idx, (int)new_byte,
+ delay_count, delay_value, delay_max);
+ new_byte = (uint64_t)delay_value; // FIXME: make this optional via envvar?
+ } else {
+ debug_print("N%d.LMC%d.R%d: PERFECT: Byte %d: SAME as %d (%d)\n",
+ node, ddr_interface_num, rankx, byte_idx, new_byte, delay_max);
+ }
+ }
+ } /* if (rank_perfect_counts[rankx].total[byte_idx] > 0) */
+ else {
+ if (ddr_type == DDR4_DRAM) { // only report when DDR4
+ // FIXME: remove or increase VBL for this output...
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: PERFECT: Byte %d: ZERO perfect bitmasks\n",
+ node, ddr_interface_num, rankx, byte_idx);
+ }
+ } /* if (rank_perfect_counts[rankx].total[byte_idx] > 0) */
+#endif /* PERFECT_BITMASK_COUNTING */
+
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SUMMARY: Byte %d: %s: orig %d now %d, more %d same %d less %d, using %d\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, "AVG", (int)orig_best_byte,
+ (int)best_byte, count_more, count_same, count_less, (int)new_byte);
+
+ // update the byte with the new value (NOTE: orig value in the CSR may not be current "best")
+ lmc_rlevel_rank.u &= ~(byte_msk << byte_sh);
+ lmc_rlevel_rank.u |= (new_byte << byte_sh);
+
+ rank_best_bytes[byte_idx] = new_byte; // save new best for neighbor use
+
+ } /* for (byte_idx = 0; byte_idx < 8+ecc_ena; byte_idx++) */
+
+ ////////////////// this is the end of the BEST BYTE LOOP
+
+ if (saved_rlevel_rank.u != lmc_rlevel_rank.u) {
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ debug_print("Adjusting Read-Leveling per-RANK settings.\n");
+ } else {
+ debug_print("Not Adjusting Read-Leveling per-RANK settings.\n");
+ }
+ display_RL_with_final(node, ddr_interface_num, lmc_rlevel_rank, rankx);
+
+#if RLEXTRAS_PATCH
+#define RLEVEL_RANKX_EXTRAS_INCR 4
+ if ((rank_mask & 0x0F) != 0x0F) { // if there are unused entries to be filled
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank = lmc_rlevel_rank; // copy the current rank
+ int byte, delay;
+ if (rankx < 3) {
+ debug_print("N%d.LMC%d.R%d: checking for RLEVEL_RANK unused entries.\n",
+ node, ddr_interface_num, rankx);
+ for (byte = 0; byte < 9; byte++) { // modify the copy in prep for writing to empty slot(s)
+ delay = get_rlevel_rank_struct(&temp_rlevel_rank, byte) + RLEVEL_RANKX_EXTRAS_INCR;
+ if (delay > (int)RLEVEL_BYTE_MSK) delay = RLEVEL_BYTE_MSK;
+ update_rlevel_rank_struct(&temp_rlevel_rank, byte, delay);
+ }
+ if (rankx == 0) { // if rank 0, write rank 1 and rank 2 here if empty
+ if (!(rank_mask & (1<<1))) { // check that rank 1 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 1);
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 1), temp_rlevel_rank.u);
+ }
+ if (!(rank_mask & (1<<2))) { // check that rank 2 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 2);
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 2), temp_rlevel_rank.u);
+ }
+ }
+ // if ranks 0, 1 or 2, write rank 3 here if empty
+ if (!(rank_mask & (1<<3))) { // check that rank 3 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 3);
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 3), temp_rlevel_rank.u);
+ }
+ }
+ }
+#endif /* RLEXTRAS_PATCH */
+ } /* for (rankx = 0; rankx < dimm_count * 4; rankx++) */
+
+ ////////////////// this is the end of the RANK MAJOR LOOP
+
+ } /* Evaluation block */
+ } /* while(rlevel_debug_loops--) */
+
+ lmc_control.s.ddr2t = save_ddr2t;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ ddr_print("%-45s : %6d\n", "DDR2T", lmc_control.s.ddr2t); /* Display final 2T value */
+
+
+ perform_ddr_init_sequence(node, rank_mask, ddr_interface_num);
+
+ for (rankx = 0; rankx < dimm_count * 4;rankx++) {
+ uint64_t value;
+ int parameter_set = 0;
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+
+ for (i=0; i<9; ++i) {
+ if ((s = lookup_env_parameter("ddr%d_rlevel_rank%d_byte%d", ddr_interface_num, rankx, i)) != NULL) {
+ parameter_set |= 1;
+ value = strtoul(s, NULL, 0);
+
+ update_rlevel_rank_struct(&lmc_rlevel_rank, i, value);
+ }
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr%d_rlevel_rank%d", ddr_interface_num, rankx)) != NULL) {
+ parameter_set |= 1;
+ value = strtoull(s, NULL, 0);
+ lmc_rlevel_rank.u = value;
+ }
+
+
+ if (bdk_is_platform(BDK_PLATFORM_ASIM)) {
+ parameter_set |= 1;
+
+ lmc_rlevel_rank.cn83xx.byte8 = 3;
+ lmc_rlevel_rank.cn83xx.byte7 = 3;
+ lmc_rlevel_rank.cn83xx.byte6 = 3;
+ lmc_rlevel_rank.cn83xx.byte5 = 3;
+ lmc_rlevel_rank.cn83xx.byte4 = 3;
+ lmc_rlevel_rank.cn83xx.byte3 = 3;
+ lmc_rlevel_rank.cn83xx.byte2 = 3;
+ lmc_rlevel_rank.cn83xx.byte1 = 3;
+ lmc_rlevel_rank.cn83xx.byte0 = 3;
+ }
+
+ if (parameter_set) {
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
+ display_RL(node, ddr_interface_num, lmc_rlevel_rank, rankx);
+ }
+ }
+ }
+
+ /* Workaround Trcd overflow by using Additive latency. */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ {
+ bdk_lmcx_modereg_params0_t lmc_modereg_params0;
+ bdk_lmcx_timing_params1_t lmc_timing_params1;
+ bdk_lmcx_control_t lmc_control;
+ int rankx;
+
+ lmc_timing_params1.u = BDK_CSR_READ(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num));
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+
+ if (lmc_timing_params1.s.trcd == 0) {
+ ddr_print("Workaround Trcd overflow by using Additive latency.\n");
+ lmc_timing_params1.s.trcd = 12; /* Hard code this to 12 and enable additive latency */
+ lmc_modereg_params0.s.al = 2; /* CL-2 */
+ lmc_control.s.pocas = 1;
+
+ ddr_print("MODEREG_PARAMS0 : 0x%016lx\n", lmc_modereg_params0.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+ ddr_print("TIMING_PARAMS1 : 0x%016lx\n", lmc_timing_params1.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num), lmc_timing_params1.u);
+
+ ddr_print("LMC_CONTROL : 0x%016lx\n", lmc_control.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ for (rankx = 0; rankx < dimm_count * 4; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ ddr4_mrw(node, ddr_interface_num, rankx, -1, 1, 0); /* MR1 */
+ }
+ }
+ }
+
+ // this is here just for output, to allow check of the Deskew settings one last time...
+ if (! disable_deskew_training) {
+ deskew_counts_t dsk_counts;
+ VB_PRT(VBL_TME, "N%d.LMC%d: Check Deskew Settings before software Write-Leveling.\n",
+ node, ddr_interface_num);
+ Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &dsk_counts, VBL_TME); // TME for FINAL
+ }
+
+
+ /* Workaround Errata 26304 (T88@2.0)
+
+ When the CSRs LMCX_DLL_CTL3[WR_DESKEW_ENA] = 1 AND
+ LMCX_PHY_CTL2[DQS[0..8]_DSK_ADJ] > 4, set
+ LMCX_EXT_CONFIG[DRIVE_ENA_BPRCH] = 1.
+ */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X)) { // only for 88XX pass 2, not 81xx or 83xx
+ bdk_lmcx_dll_ctl3_t dll_ctl3;
+ bdk_lmcx_phy_ctl2_t phy_ctl2;
+ bdk_lmcx_ext_config_t ext_config;
+ int increased_dsk_adj = 0;
+ int byte;
+
+ phy_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL2(ddr_interface_num));
+ ext_config.u = BDK_CSR_READ(node, BDK_LMCX_EXT_CONFIG(ddr_interface_num));
+ dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+
+ for (byte = 0; byte < 8; ++byte) {
+ if (!(ddr_interface_bytemask&(1<<byte)))
+ continue;
+ increased_dsk_adj |= (((phy_ctl2.u >> (byte*3)) & 0x7) > 4);
+ }
+
+ if ((dll_ctl3.s.wr_deskew_ena == 1) && increased_dsk_adj) {
+ ext_config.s.drive_ena_bprch = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_EXT_CONFIG(ddr_interface_num),
+ ext_config.u);
+ }
+ }
+
+ /*
+ * 6.9.13 DRAM Vref Training for DDR4
+ *
+ * This includes software write-leveling
+ */
+
+ { // Software Write-Leveling block
+
+ /* Try to determine/optimize write-level delays experimentally. */
+#pragma pack(push,1)
+ bdk_lmcx_wlevel_rankx_t lmc_wlevel_rank;
+ bdk_lmcx_wlevel_rankx_t lmc_wlevel_rank_hw_results;
+ int byte;
+ int delay;
+ int rankx = 0;
+ int active_rank;
+#if !DISABLE_SW_WL_PASS_2
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank;
+ int sw_wlevel_offset = 1;
+#endif
+ int sw_wlevel_enable = 1; /* FIX... Should be customizable. */
+ int interfaces;
+ int measured_vref_flag;
+ typedef enum {
+ WL_ESTIMATED = 0, /* HW/SW wleveling failed. Results
+ estimated. */
+ WL_HARDWARE = 1, /* H/W wleveling succeeded */
+ WL_SOFTWARE = 2, /* S/W wleveling passed 2 contiguous
+ settings. */
+ WL_SOFTWARE1 = 3, /* S/W wleveling passed 1 marginal
+ setting. */
+ } sw_wl_status_t;
+
+ static const char *wl_status_strings[] = {
+ "(e)",
+ " ",
+ " ",
+ "(1)"
+ };
+ int sw_wlevel_hw_default = 1; // FIXME: make H/W assist the default now
+#pragma pack(pop)
+
+ if ((s = lookup_env_parameter("ddr_sw_wlevel_hw")) != NULL) {
+ sw_wlevel_hw_default = !!strtoul(s, NULL, 0);
+ }
+
+ // cannot use hw-assist when doing 32-bit
+ if (! ddr_interface_64b) {
+ sw_wlevel_hw_default = 0;
+ }
+
+ if ((s = lookup_env_parameter("ddr_software_wlevel")) != NULL) {
+ sw_wlevel_enable = strtoul(s, NULL, 0);
+ }
+
+#if SWL_WITH_HW_ALTS_CHOOSE_SW
+ // Choose the SW algo for SWL if any HWL alternates were found
+ // NOTE: we have to do this here, and for all, since HW-assist including ECC requires ECC enable
+ for (rankx = 0; rankx < dimm_count * 4; rankx++) {
+ if (!sw_wlevel_enable)
+ break;
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ // if we are doing HW-assist, and there are alternates, switch to SW-algorithm for all
+ if (sw_wlevel_hw && hwl_alts[rankx].hwl_alt_mask) {
+ ddr_print("N%d.LMC%d.R%d: Using SW algorithm for write-leveling this rank\n",
+ node, ddr_interface_num, rankx);
+ sw_wlevel_hw_default = 0;
+ break;
+ }
+ } /* for (rankx = 0; rankx < dimm_count * 4; rankx++) */
+#endif
+
+ /* Get the measured_vref setting from the config, check for an override... */
+ /* NOTE: measured_vref=1 (ON) means force use of MEASURED Vref... */
+ // NOTE: measured VREF can only be done for DDR4
+ if (ddr_type == DDR4_DRAM) {
+ measured_vref_flag = custom_lmc_config->measured_vref;
+ if ((s = lookup_env_parameter("ddr_measured_vref")) != NULL) {
+ measured_vref_flag = !!strtoul(s, NULL, 0);
+ }
+ } else {
+ measured_vref_flag = 0; // OFF for DDR3
+ }
+
+ /* Ensure disabled ECC for DRAM tests using the SW algo, else leave it untouched */
+ if (!sw_wlevel_hw_default) {
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
+ lmc_config.s.ecc_ena = 0;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+ }
+
+#if USE_L2_WAYS_LIMIT
+ limit_l2_ways(node, 0, 0); /* Disable l2 sets for DRAM testing */
+#endif
+
+ /* We need to track absolute rank number, as well as how many
+ ** active ranks we have. Two single rank DIMMs show up as
+ ** ranks 0 and 2, but only 2 ranks are active. */
+ active_rank = 0;
+
+ interfaces = __builtin_popcount(ddr_interface_mask);
+
+#define VREF_RANGE1_LIMIT 0x33 // range1 is valid for 0x00 - 0x32
+#define VREF_RANGE2_LIMIT 0x18 // range2 is valid for 0x00 - 0x17
+// full window is valid for 0x00 to 0x4A
+// let 0x00 - 0x17 be range2, 0x18 - 0x4a be range 1
+#define VREF_LIMIT (VREF_RANGE1_LIMIT + VREF_RANGE2_LIMIT)
+#define VREF_FINAL (VREF_LIMIT - 1)
+
+ for (rankx = 0; rankx < dimm_count * 4; rankx++) {
+ uint64_t rank_addr;
+ int vref_value, final_vref_value, final_vref_range = 0;
+ int start_vref_value = 0, computed_final_vref_value = -1;
+ char best_vref_values_count, vref_values_count;
+ char best_vref_values_start, vref_values_start;
+
+ int bytes_failed;
+ sw_wl_status_t byte_test_status[9];
+ sw_wl_status_t sw_wl_rank_status = WL_HARDWARE;
+ int sw_wl_failed = 0;
+ int sw_wlevel_hw = sw_wlevel_hw_default;
+
+ if (!sw_wlevel_enable)
+ break;
+
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ ddr_print("N%d.LMC%d.R%d: Performing Software Write-Leveling %s\n",
+ node, ddr_interface_num, rankx,
+ (sw_wlevel_hw) ? "with H/W assist" : "with S/W algorithm");
+
+ if ((ddr_type == DDR4_DRAM) && (num_ranks != 4)) {
+ // always compute when we can...
+ computed_final_vref_value = compute_vref_value(node, ddr_interface_num, rankx,
+ dimm_count, num_ranks, imp_values,
+ is_stacked_die);
+ if (!measured_vref_flag) // but only use it if allowed
+ start_vref_value = VREF_FINAL; // skip all the measured Vref processing, just the final setting
+ }
+
+ /* Save off the h/w wl results */
+ lmc_wlevel_rank_hw_results.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+
+ vref_values_count = 0;
+ vref_values_start = 0;
+ best_vref_values_count = 0;
+ best_vref_values_start = 0;
+
+ /* Loop one extra time using the Final Vref value. */
+ for (vref_value = start_vref_value; vref_value < VREF_LIMIT; ++vref_value) {
+ if (ddr_type == DDR4_DRAM) {
+ if (vref_value < VREF_FINAL) {
+ int vrange, vvalue;
+ if (vref_value < VREF_RANGE2_LIMIT) {
+ vrange = 1; vvalue = vref_value;
+ } else {
+ vrange = 0; vvalue = vref_value - VREF_RANGE2_LIMIT;
+ }
+ set_vref(node, ddr_interface_num, rankx,
+ vrange, vvalue);
+ } else { /* if (vref_value < VREF_FINAL) */
+ /* Print the final Vref value first. */
+
+ /* Always print the computed first if its valid */
+ if (computed_final_vref_value >= 0) {
+ ddr_print("N%d.LMC%d.R%d: Vref Computed Summary :"
+ " %2d (0x%02x)\n",
+ node, ddr_interface_num,
+ rankx, computed_final_vref_value,
+ computed_final_vref_value);
+ }
+ if (!measured_vref_flag) { // setup to use the computed
+ best_vref_values_count = 1;
+ final_vref_value = computed_final_vref_value;
+ } else { // setup to use the measured
+ if (best_vref_values_count > 0) {
+ best_vref_values_count = max(best_vref_values_count, 2);
+#if 0
+ // NOTE: this already adjusts VREF via calculating 40% rather than 50%
+ final_vref_value = best_vref_values_start + divide_roundup((best_vref_values_count-1)*4,10);
+ ddr_print("N%d.LMC%d.R%d: Vref Training Summary :"
+ " %2d <----- %2d (0x%02x) -----> %2d range: %2d\n",
+ node, ddr_interface_num, rankx, best_vref_values_start,
+ final_vref_value, final_vref_value,
+ best_vref_values_start+best_vref_values_count-1,
+ best_vref_values_count-1);
+#else
+ final_vref_value = best_vref_values_start + divide_nint(best_vref_values_count - 1, 2);
+ if (final_vref_value < VREF_RANGE2_LIMIT) {
+ final_vref_range = 1;
+ } else {
+ final_vref_range = 0; final_vref_value -= VREF_RANGE2_LIMIT;
+ }
+ {
+ int vvlo = best_vref_values_start;
+ int vrlo;
+ if (vvlo < VREF_RANGE2_LIMIT) {
+ vrlo = 2;
+ } else {
+ vrlo = 1; vvlo -= VREF_RANGE2_LIMIT;
+ }
+
+ int vvhi = best_vref_values_start + best_vref_values_count - 1;
+ int vrhi;
+ if (vvhi < VREF_RANGE2_LIMIT) {
+ vrhi = 2;
+ } else {
+ vrhi = 1; vvhi -= VREF_RANGE2_LIMIT;
+ }
+ ddr_print("N%d.LMC%d.R%d: Vref Training Summary :"
+ " 0x%02x/%1d <----- 0x%02x/%1d -----> 0x%02x/%1d, range: %2d\n",
+ node, ddr_interface_num, rankx,
+ vvlo, vrlo,
+ final_vref_value, final_vref_range + 1,
+ vvhi, vrhi,
+ best_vref_values_count-1);
+ }
+#endif
+
+ } else {
+ /* If nothing passed use the default Vref value for this rank */
+ bdk_lmcx_modereg_params2_t lmc_modereg_params2;
+ lmc_modereg_params2.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num));
+ final_vref_value = (lmc_modereg_params2.u >> (rankx * 10 + 3)) & 0x3f;
+ final_vref_range = (lmc_modereg_params2.u >> (rankx * 10 + 9)) & 0x01;
+
+ ddr_print("N%d.LMC%d.R%d: Vref Using Default :"
+ " %2d <----- %2d (0x%02x) -----> %2d, range%1d\n",
+ node, ddr_interface_num, rankx,
+ final_vref_value, final_vref_value,
+ final_vref_value, final_vref_value, final_vref_range+1);
+ }
+ }
+
+ // allow override
+ if ((s = lookup_env_parameter("ddr%d_vref_value_%1d%1d",
+ ddr_interface_num, !!(rankx&2), !!(rankx&1))) != NULL) {
+ final_vref_value = strtoul(s, NULL, 0);
+ }
+
+ set_vref(node, ddr_interface_num, rankx, final_vref_range, final_vref_value);
+
+ } /* if (vref_value < VREF_FINAL) */
+ } /* if (ddr_type == DDR4_DRAM) */
+
+ lmc_wlevel_rank.u = lmc_wlevel_rank_hw_results.u; /* Restore the saved value */
+
+ for (byte = 0; byte < 9; ++byte)
+ byte_test_status[byte] = WL_ESTIMATED;
+
+ if (wlevel_bitmask_errors == 0) {
+
+ /* Determine address of DRAM to test for pass 1 of software write leveling. */
+ rank_addr = active_rank * (1ull << (pbank_lsb - bunk_enable + (interfaces/2)));
+ // FIXME: these now put in by test_dram_byte()
+ //rank_addr |= (ddr_interface_num<<7); /* Map address into proper interface */
+ //rank_addr = bdk_numa_get_address(node, rank_addr);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: Active Rank %d Address: 0x%lx\n",
+ node, ddr_interface_num, rankx, active_rank, rank_addr);
+
+ { // start parallel write-leveling block for delay high-order bits
+ int errors = 0;
+ int byte_delay[9];
+ uint64_t bytemask;
+ int bytes_todo;
+
+ if (ddr_interface_64b) {
+ bytes_todo = (sw_wlevel_hw) ? ddr_interface_bytemask : 0xFF;
+ bytemask = ~0ULL;
+ } else { // 32-bit, must be using SW algo, only data bytes
+ bytes_todo = 0x0f;
+ bytemask = 0x00000000ffffffffULL;
+ }
+
+ for (byte = 0; byte < 9; ++byte) {
+ if (!(bytes_todo & (1 << byte))) {
+ byte_delay[byte] = 0;
+ } else {
+ byte_delay[byte] = get_wlevel_rank_struct(&lmc_wlevel_rank, byte);
+ }
+ } /* for (byte = 0; byte < 9; ++byte) */
+
+#define WL_MIN_NO_ERRORS_COUNT 3 // FIXME? three passes without errors
+ int no_errors_count = 0;
+
+ // Change verbosity if using measured vs computed VREF or DDR3
+ // measured goes many times through SWL, computed and DDR3 only once
+ // so we want the EXHAUSTED messages at NORM for computed and DDR3,
+ // and at DEV2 for measured, just for completeness
+ int vbl_local = (measured_vref_flag) ? VBL_DEV2 : VBL_NORM;
+ uint64_t bad_bits[2];
+#if ENABLE_SW_WLEVEL_UTILIZATION
+ uint64_t sum_dram_dclk = 0, sum_dram_ops = 0;
+ uint64_t start_dram_dclk, stop_dram_dclk;
+ uint64_t start_dram_ops, stop_dram_ops;
+#endif
+ do {
+ // write the current set of WL delays
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+
+ bdk_watchdog_poke();
+
+ // do the test
+ if (sw_wlevel_hw) {
+ errors = run_best_hw_patterns(node, ddr_interface_num, rank_addr,
+ DBTRAIN_TEST, bad_bits);
+ errors &= bytes_todo; // keep only the ones we are still doing
+ } else {
+#if ENABLE_SW_WLEVEL_UTILIZATION
+ start_dram_dclk = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(ddr_interface_num));
+ start_dram_ops = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(ddr_interface_num));
+#endif
+#if USE_ORIG_TEST_DRAM_BYTE
+ errors = test_dram_byte(node, ddr_interface_num, rank_addr, bytemask, bad_bits);
+#else
+ errors = dram_tuning_mem_xor(node, ddr_interface_num, rank_addr, bytemask, bad_bits);
+#endif
+#if ENABLE_SW_WLEVEL_UTILIZATION
+ stop_dram_dclk = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(ddr_interface_num));
+ stop_dram_ops = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(ddr_interface_num));
+ sum_dram_dclk += stop_dram_dclk - start_dram_dclk;
+ sum_dram_ops += stop_dram_ops - start_dram_ops;
+#endif
+ }
+
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: S/W write-leveling TEST: returned 0x%x\n",
+ node, ddr_interface_num, rankx, errors);
+
+ // remember, errors will not be returned for byte-lanes that have maxxed out...
+ if (errors == 0) {
+ no_errors_count++; // bump
+ if (no_errors_count > 1) // bypass check/update completely
+ continue; // to end of do-while
+ } else
+ no_errors_count = 0; // reset
+
+ // check errors by byte
+ for (byte = 0; byte < 9; ++byte) {
+ if (!(bytes_todo & (1 << byte)))
+ continue;
+
+ delay = byte_delay[byte];
+ if (errors & (1 << byte)) { // yes, an error in this byte lane
+ debug_print(" byte %d delay %2d Errors\n", byte, delay);
+ // since this byte had an error, we move to the next delay value, unless maxxed out
+ delay += 8; // incr by 8 to do only delay high-order bits
+ if (delay < 32) {
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
+ debug_print(" byte %d delay %2d New\n", byte, delay);
+ byte_delay[byte] = delay;
+ } else { // reached max delay, maybe really done with this byte
+#if SWL_TRY_HWL_ALT
+ if (!measured_vref_flag && // consider an alt only for computed VREF and
+ (hwl_alts[rankx].hwl_alt_mask & (1 << byte))) // if an alt exists...
+ {
+ int bad_delay = delay & 0x6; // just orig low-3 bits
+ delay = hwl_alts[rankx].hwl_alt_delay[byte]; // yes, use it
+ hwl_alts[rankx].hwl_alt_mask &= ~(1 << byte); // clear that flag
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
+ byte_delay[byte] = delay;
+ debug_print(" byte %d delay %2d ALTERNATE\n", byte, delay);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SWL: Byte %d: %d FAIL, trying ALTERNATE %d\n",
+ node, ddr_interface_num, rankx, byte, bad_delay, delay);
+
+ } else
+#endif /* SWL_TRY_HWL_ALT */
+ {
+ unsigned bits_bad;
+ if (byte < 8) {
+ bytemask &= ~(0xffULL << (8*byte)); // test no longer, remove from byte mask
+ bits_bad = (unsigned)((bad_bits[0] >> (8 * byte)) & 0xffUL);
+ } else {
+ bits_bad = (unsigned)(bad_bits[1] & 0xffUL);
+ }
+ bytes_todo &= ~(1 << byte); // remove from bytes to do
+ byte_test_status[byte] = WL_ESTIMATED; // make sure this is set for this case
+ debug_print(" byte %d delay %2d Exhausted\n", byte, delay);
+ VB_PRT(vbl_local, "N%d.LMC%d.R%d: SWL: Byte %d (0x%02x): delay %d EXHAUSTED \n",
+ node, ddr_interface_num, rankx, byte, bits_bad, delay);
+ }
+ }
+ } else { // no error, stay with current delay, but keep testing it...
+ debug_print(" byte %d delay %2d Passed\n", byte, delay);
+ byte_test_status[byte] = WL_HARDWARE; // change status
+ }
+
+ } /* for (byte = 0; byte < 9; ++byte) */
+
+ } while (no_errors_count < WL_MIN_NO_ERRORS_COUNT);
+
+#if ENABLE_SW_WLEVEL_UTILIZATION
+ if (! sw_wlevel_hw) {
+ uint64_t percent_x10;
+ if (sum_dram_dclk == 0)
+ sum_dram_dclk = 1;
+ percent_x10 = sum_dram_ops * 1000 / sum_dram_dclk;
+ ddr_print("N%d.LMC%d.R%d: ops %lu, cycles %lu, used %lu.%lu%%\n",
+ node, ddr_interface_num, rankx, sum_dram_ops, sum_dram_dclk,
+ percent_x10 / 10, percent_x10 % 10);
+ }
+#endif
+ if (errors) {
+ debug_print("End WLEV_64 while loop: vref_value %d(0x%x), errors 0x%02x\n",
+ vref_value, vref_value, errors);
+ }
+ } // end parallel write-leveling block for delay high-order bits
+
+ if (sw_wlevel_hw) { // if we used HW-assist, we did the ECC byte when approp.
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: HW-assist SWL - no ECC estimate!!!\n",
+ node, ddr_interface_num, rankx);
+ goto no_ecc_estimate;
+ }
+
+ if ((ddr_interface_bytemask & 0xff) == 0xff) {
+ if (use_ecc) {
+ int save_byte8 = lmc_wlevel_rank.s.byte8; // save original HW delay
+ byte_test_status[8] = WL_HARDWARE; /* say it is H/W delay value */
+
+ if ((save_byte8 != lmc_wlevel_rank.s.byte3) &&
+ (save_byte8 != lmc_wlevel_rank.s.byte4))
+ {
+ // try to calculate a workable delay using SW bytes 3 and 4 and HW byte 8
+ int test_byte8 = save_byte8;
+ int test_byte8_error;
+ int byte8_error = 0x1f;
+ int adder;
+ int avg_bytes = divide_nint(lmc_wlevel_rank.s.byte3+lmc_wlevel_rank.s.byte4, 2);
+ for (adder = 0; adder<= 32; adder+=8) {
+ test_byte8_error = _abs((adder+save_byte8) - avg_bytes);
+ if (test_byte8_error < byte8_error) {
+ byte8_error = test_byte8_error;
+ test_byte8 = save_byte8 + adder;
+ }
+ }
+
+#if SW_WL_CHECK_PATCH
+ // only do the check if we are not using measured VREF
+ if (!measured_vref_flag) {
+ test_byte8 &= ~1; /* Use only even settings, rounding down... */
+
+ // do validity check on the calculated ECC delay value
+ // this depends on the DIMM type
+ if (spd_rdimm) { // RDIMM
+ if (spd_dimm_type != 5) { // but not mini-RDIMM
+ // it can be > byte4, but should never be > byte3
+ if (test_byte8 > lmc_wlevel_rank.s.byte3) {
+ byte_test_status[8] = WL_ESTIMATED; /* say it is still estimated */
+ }
+ }
+ } else { // UDIMM
+ if ((test_byte8 < lmc_wlevel_rank.s.byte3) ||
+ (test_byte8 > lmc_wlevel_rank.s.byte4))
+ { // should never be outside the byte 3-4 range
+ byte_test_status[8] = WL_ESTIMATED; /* say it is still estimated */
+ }
+ }
+ /*
+ * Report whenever the calculation appears bad.
+ * This happens if some of the original values were off, or unexpected geometry
+ * from DIMM type, or custom circuitry (NIC225E, I am looking at you!).
+ * We will trust the calculated value, and depend on later testing to catch
+ * any instances when that value is truly bad.
+ */
+ if (byte_test_status[8] == WL_ESTIMATED) { // ESTIMATED means there may be an issue
+ ddr_print("N%d.LMC%d.R%d: SWL: (%cDIMM): calculated ECC delay unexpected (%d/%d/%d)\n",
+ node, ddr_interface_num, rankx, (spd_rdimm?'R':'U'),
+ lmc_wlevel_rank.s.byte4, test_byte8, lmc_wlevel_rank.s.byte3);
+ byte_test_status[8] = WL_HARDWARE;
+ }
+ }
+#endif /* SW_WL_CHECK_PATCH */
+ lmc_wlevel_rank.s.byte8 = test_byte8 & ~1; /* Use only even settings */
+ }
+
+ if (lmc_wlevel_rank.s.byte8 != save_byte8) {
+ /* Change the status if s/w adjusted the delay */
+ byte_test_status[8] = WL_SOFTWARE; /* Estimated delay */
+ }
+ } else {
+ byte_test_status[8] = WL_HARDWARE; /* H/W delay value */
+ lmc_wlevel_rank.s.byte8 = lmc_wlevel_rank.s.byte0; /* ECC is not used */
+ }
+ } else { /* if ((ddr_interface_bytemask & 0xff) == 0xff) */
+ if (use_ecc) {
+ /* Estimate the ECC byte delay */
+ lmc_wlevel_rank.s.byte4 |= (lmc_wlevel_rank.s.byte3 & 0x38); // add hi-order to b4
+ if ((lmc_wlevel_rank.s.byte4 & 0x06) < (lmc_wlevel_rank.s.byte3 & 0x06)) // orig b4 < orig b3
+ lmc_wlevel_rank.s.byte4 += 8; // must be next clock
+ } else {
+ lmc_wlevel_rank.s.byte4 = lmc_wlevel_rank.s.byte0; /* ECC is not used */
+ }
+ /* Change the status if s/w adjusted the delay */
+ byte_test_status[4] = WL_SOFTWARE; /* Estimated delay */
+ } /* if ((ddr_interface_bytemask & 0xff) == 0xff) */
+ } /* if (wlevel_bitmask_errors == 0) */
+
+ no_ecc_estimate:
+
+ bytes_failed = 0;
+ for (byte = 0; byte < 9; ++byte) {
+ /* Don't accumulate errors for untested bytes. */
+ if (!(ddr_interface_bytemask & (1 << byte)))
+ continue;
+ bytes_failed += (byte_test_status[byte] == WL_ESTIMATED);
+ }
+
+ /* Vref training loop is only used for DDR4 */
+ if (ddr_type != DDR4_DRAM)
+ break;
+
+ if (bytes_failed == 0) {
+ if (vref_values_count == 0) {
+ vref_values_start = vref_value;
+ }
+ ++vref_values_count;
+ if (vref_values_count > best_vref_values_count) {
+ best_vref_values_count = vref_values_count;
+ best_vref_values_start = vref_values_start;
+ debug_print("N%d.LMC%d.R%d: Vref Training (%2d) : 0x%02x <----- ???? -----> 0x%02x\n",
+ node, ddr_interface_num,
+ rankx, vref_value, best_vref_values_start,
+ best_vref_values_start+best_vref_values_count-1);
+ }
+ } else {
+ vref_values_count = 0;
+ debug_print("N%d.LMC%d.R%d: Vref Training (%2d) : failed\n",
+ node, ddr_interface_num,
+ rankx, vref_value);
+ }
+ } /* for (vref_value=0; vref_value<VREF_LIMIT; ++vref_value) */
+
+ /* Determine address of DRAM to test for pass 2 and final test of software write leveling. */
+ rank_addr = active_rank * (1ull << (pbank_lsb - bunk_enable + (interfaces/2)));
+ rank_addr |= (ddr_interface_num<<7); /* Map address into proper interface */
+ rank_addr = bdk_numa_get_address(node, rank_addr);
+ debug_print("N%d.LMC%d.R%d: Active Rank %d Address: 0x%lx\n",
+ node, ddr_interface_num, rankx, active_rank, rank_addr);
+
+ int errors;
+
+ if (bytes_failed) {
+
+#if !DISABLE_SW_WL_PASS_2
+
+ ddr_print("N%d.LMC%d.R%d: Starting SW Write-leveling pass 2\n",
+ node, ddr_interface_num, rankx);
+ sw_wl_rank_status = WL_SOFTWARE;
+
+ /* If previous s/w fixups failed then retry using s/w write-leveling. */
+ if (wlevel_bitmask_errors == 0) {
+ /* h/w succeeded but previous s/w fixups failed. So retry s/w. */
+ debug_print("N%d.LMC%d.R%d: Retrying software Write-Leveling.\n",
+ node, ddr_interface_num, rankx);
+ }
+
+ { // start parallel write-leveling block for delay low-order bits
+ int byte_delay[8];
+ int byte_passed[8];
+ uint64_t bytemask;
+ uint64_t bitmask;
+ int wl_offset;
+ int bytes_todo;
+
+ for (byte = 0; byte < 8; ++byte) {
+ byte_passed[byte] = 0;
+ }
+
+ bytes_todo = ddr_interface_bytemask;
+
+ for (wl_offset = sw_wlevel_offset; wl_offset >= 0; --wl_offset) {
+ debug_print("Starting wl_offset for-loop: %d\n", wl_offset);
+
+ bytemask = 0;
+
+ for (byte = 0; byte < 8; ++byte) {
+ byte_delay[byte] = 0;
+ if (!(bytes_todo & (1 << byte))) // this does not contain fully passed bytes
+ continue;
+
+ byte_passed[byte] = 0; // reset across passes if not fully passed
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte, 0); // all delays start at 0
+ bitmask = ((!ddr_interface_64b) && (byte == 4)) ? 0x0f: 0xff;
+ bytemask |= bitmask << (8*byte); // set the bytes bits in the bytemask
+ } /* for (byte = 0; byte < 8; ++byte) */
+
+ while (bytemask != 0) { // start a pass if there is any byte lane to test
+
+ debug_print("Starting bytemask while-loop: 0x%lx\n", bytemask);
+
+ // write this set of WL delays
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+
+ bdk_watchdog_poke();
+
+ // do the test
+ if (sw_wlevel_hw)
+ errors = run_best_hw_patterns(node, ddr_interface_num, rank_addr,
+ DBTRAIN_TEST, NULL);
+ else
+ errors = test_dram_byte(node, ddr_interface_num, rank_addr, bytemask, NULL);
+
+ debug_print("SWL pass 2: test_dram_byte returned 0x%x\n", errors);
+
+ // check errors by byte
+ for (byte = 0; byte < 8; ++byte) {
+ if (!(bytes_todo & (1 << byte)))
+ continue;
+
+ delay = byte_delay[byte];
+ if (errors & (1 << byte)) { // yes, an error
+ debug_print(" byte %d delay %2d Errors\n", byte, delay);
+ byte_passed[byte] = 0;
+ } else { // no error
+ byte_passed[byte] += 1;
+ if (byte_passed[byte] == (1 + wl_offset)) { /* Look for consecutive working settings */
+ debug_print(" byte %d delay %2d FULLY Passed\n", byte, delay);
+ if (wl_offset == 1) {
+ byte_test_status[byte] = WL_SOFTWARE;
+ } else if (wl_offset == 0) {
+ byte_test_status[byte] = WL_SOFTWARE1;
+ }
+ bytemask &= ~(0xffULL << (8*byte)); // test no longer, remove from byte mask this pass
+ bytes_todo &= ~(1 << byte); // remove completely from concern
+ continue; // on to the next byte, bypass delay updating!!
+ } else {
+ debug_print(" byte %d delay %2d Passed\n", byte, delay);
+ }
+ }
+ // error or no, here we move to the next delay value for this byte, unless done all delays
+ // only a byte that has "fully passed" will bypass around this,
+ delay += 2;
+ if (delay < 32) {
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
+ debug_print(" byte %d delay %2d New\n", byte, delay);
+ byte_delay[byte] = delay;
+ } else {
+ // reached max delay, done with this byte
+ debug_print(" byte %d delay %2d Exhausted\n", byte, delay);
+ bytemask &= ~(0xffULL << (8*byte)); // test no longer, remove from byte mask this pass
+ }
+ } /* for (byte = 0; byte < 8; ++byte) */
+ debug_print("End of for-loop: bytemask 0x%lx\n", bytemask);
+
+ } /* while (bytemask != 0) */
+ } /* for (wl_offset = sw_wlevel_offset; wl_offset >= 0; --wl_offset) */
+
+ for (byte = 0; byte < 8; ++byte) {
+ // any bytes left in bytes_todo did not pass
+ if (bytes_todo & (1 << byte)) {
+ /* Last resort. Use Rlevel settings to estimate
+ Wlevel if software write-leveling fails */
+ debug_print("Using RLEVEL as WLEVEL estimate for byte %d\n", byte);
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ rlevel_to_wlevel(&lmc_rlevel_rank, &lmc_wlevel_rank, byte);
+ }
+ } /* for (byte = 0; byte < 8; ++byte) */
+
+ sw_wl_failed = (bytes_todo != 0);
+
+ } // end parallel write-leveling block for delay low-order bits
+
+ if (use_ecc) {
+ /* ECC byte has to be estimated. Take the average of the two surrounding bytes. */
+ int test_byte8 = divide_nint(lmc_wlevel_rank.s.byte3
+ + lmc_wlevel_rank.s.byte4
+ + 2 /* round-up*/ , 2);
+ lmc_wlevel_rank.s.byte8 = test_byte8 & ~1; /* Use only even settings */
+ byte_test_status[8] = WL_ESTIMATED; /* Estimated delay */
+ } else {
+ byte_test_status[8] = WL_HARDWARE; /* H/W delay value */
+ lmc_wlevel_rank.s.byte8 = lmc_wlevel_rank.s.byte0; /* ECC is not used */
+ }
+
+ /* Set delays for unused bytes to match byte 0. */
+ for (byte=0; byte<8; ++byte) {
+ if ((ddr_interface_bytemask & (1 << byte)))
+ continue;
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte,
+ lmc_wlevel_rank.s.byte0);
+ byte_test_status[byte] = WL_SOFTWARE;
+ }
+#else /* !DISABLE_SW_WL_PASS_2 */
+ // FIXME? the big hammer, did not even try SW WL pass2, assume only chip reset will help
+ ddr_print("N%d.LMC%d.R%d: S/W write-leveling pass 1 failed\n",
+ node, ddr_interface_num, rankx);
+ sw_wl_failed = 1;
+#endif /* !DISABLE_SW_WL_PASS_2 */
+
+ } else { /* if (bytes_failed) */
+
+ // SW WL pass 1 was OK, write the settings
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+
+#if SW_WL_CHECK_PATCH
+ // do validity check on the delay values by running the test 1 more time...
+ // FIXME: we really need to check the ECC byte setting here as well,
+ // so we need to enable ECC for this test!!!
+ // if there are any errors, claim SW WL failure
+ {
+ uint64_t datamask = (ddr_interface_64b) ? 0xffffffffffffffffULL : 0x00000000ffffffffULL;
+
+ // do the test
+ if (sw_wlevel_hw) {
+ errors = run_best_hw_patterns(node, ddr_interface_num, rank_addr,
+ DBTRAIN_TEST, NULL) & 0x0ff;
+ } else {
+#if USE_ORIG_TEST_DRAM_BYTE
+ errors = test_dram_byte(node, ddr_interface_num, rank_addr, datamask, NULL);
+#else
+ errors = dram_tuning_mem_xor(node, ddr_interface_num, rank_addr, datamask, NULL);
+#endif
+ }
+
+ if (errors) {
+ ddr_print("N%d.LMC%d.R%d: Wlevel Rank Final Test errors 0x%x\n",
+ node, ddr_interface_num, rankx, errors);
+ sw_wl_failed = 1;
+ }
+ }
+#endif /* SW_WL_CHECK_PATCH */
+
+ } /* if (bytes_failed) */
+
+ // FIXME? dump the WL settings, so we get more of a clue as to what happened where
+ ddr_print("N%d.LMC%d.R%d: Wlevel Rank %#4x, 0x%016lX : %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %s\n",
+ node, ddr_interface_num, rankx,
+ lmc_wlevel_rank.s.status,
+ lmc_wlevel_rank.u,
+ lmc_wlevel_rank.s.byte8, wl_status_strings[byte_test_status[8]],
+ lmc_wlevel_rank.s.byte7, wl_status_strings[byte_test_status[7]],
+ lmc_wlevel_rank.s.byte6, wl_status_strings[byte_test_status[6]],
+ lmc_wlevel_rank.s.byte5, wl_status_strings[byte_test_status[5]],
+ lmc_wlevel_rank.s.byte4, wl_status_strings[byte_test_status[4]],
+ lmc_wlevel_rank.s.byte3, wl_status_strings[byte_test_status[3]],
+ lmc_wlevel_rank.s.byte2, wl_status_strings[byte_test_status[2]],
+ lmc_wlevel_rank.s.byte1, wl_status_strings[byte_test_status[1]],
+ lmc_wlevel_rank.s.byte0, wl_status_strings[byte_test_status[0]],
+ (sw_wl_rank_status == WL_HARDWARE) ? "" : "(s)"
+ );
+
+ // finally, check for fatal conditions: either chip reset right here, or return error flag
+ if (((ddr_type == DDR4_DRAM) && (best_vref_values_count == 0)) || sw_wl_failed) {
+ if (!ddr_disable_chip_reset) { // do chip RESET
+ error_print("INFO: Short memory test indicates a retry is needed on N%d.LMC%d.R%d. Resetting node...\n",
+ node, ddr_interface_num, rankx);
+ bdk_wait_usec(500000);
+ bdk_reset_chip(node);
+ } else { // return error flag so LMC init can be retried...
+ ddr_print("INFO: Short memory test indicates a retry is needed on N%d.LMC%d.R%d. Restarting LMC init...\n",
+ node, ddr_interface_num, rankx);
+ return 0; // 0 indicates restart possible...
+ }
+ }
+
+ active_rank++;
+ } /* for (rankx = 0; rankx < dimm_count * 4; rankx++) */
+
+ // Finalize the write-leveling settings
+ for (rankx = 0; rankx < dimm_count * 4;rankx++) {
+ uint64_t value;
+ int parameter_set = 0;
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+
+ if (bdk_is_platform(BDK_PLATFORM_ASIM)) {
+ parameter_set |= 1;
+
+ lmc_wlevel_rank.s.byte8 = 0;
+ lmc_wlevel_rank.s.byte7 = 0;
+ lmc_wlevel_rank.s.byte6 = 0;
+ lmc_wlevel_rank.s.byte5 = 0;
+ lmc_wlevel_rank.s.byte4 = 0;
+ lmc_wlevel_rank.s.byte3 = 0;
+ lmc_wlevel_rank.s.byte2 = 0;
+ lmc_wlevel_rank.s.byte1 = 0;
+ lmc_wlevel_rank.s.byte0 = 0;
+ }
+
+ for (i=0; i<9; ++i) {
+ if ((s = lookup_env_parameter("ddr%d_wlevel_rank%d_byte%d", ddr_interface_num, rankx, i)) != NULL) {
+ parameter_set |= 1;
+ value = strtoul(s, NULL, 0);
+
+ update_wlevel_rank_struct(&lmc_wlevel_rank, i, value);
+ }
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr%d_wlevel_rank%d", ddr_interface_num, rankx)) != NULL) {
+ parameter_set |= 1;
+ value = strtoull(s, NULL, 0);
+ lmc_wlevel_rank.u = value;
+ }
+
+ if (parameter_set) {
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+ display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ }
+#if WLEXTRAS_PATCH
+ if ((rank_mask & 0x0F) != 0x0F) { // if there are unused entries to be filled
+ if (rankx < 3) {
+ debug_print("N%d.LMC%d.R%d: checking for WLEVEL_RANK unused entries.\n",
+ node, ddr_interface_num, rankx);
+ if (rankx == 0) { // if rank 0, write ranks 1 and 2 here if empty
+ if (!(rank_mask & (1<<1))) { // check that rank 1 is empty
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 1), lmc_wlevel_rank.u);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 1);
+ }
+ if (!(rank_mask & (1<<2))) { // check that rank 2 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 2);
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 2), lmc_wlevel_rank.u);
+ }
+ }
+ // if rank 0, 1 or 2, write rank 3 here if empty
+ if (!(rank_mask & (1<<3))) { // check that rank 3 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 3);
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 3), lmc_wlevel_rank.u);
+ }
+ }
+ }
+#endif /* WLEXTRAS_PATCH */
+
+ } /* for (rankx = 0; rankx < dimm_count * 4;rankx++) */
+
+ /* Restore the ECC configuration */
+ if (!sw_wlevel_hw_default) {
+ lmc_config.s.ecc_ena = use_ecc;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
+ }
+
+#if USE_L2_WAYS_LIMIT
+ /* Restore the l2 set configuration */
+ if ((s = lookup_env_parameter("limit_l2_ways")) != NULL) {
+ int ways = strtoul(s, NULL, 10);
+ limit_l2_ways(node, ways, 1);
+ } else {
+ limit_l2_ways(node, bdk_l2c_get_num_assoc(node), 0);
+ }
+#endif
+
+ } // End Software Write-Leveling block
+
+#if ENABLE_DISPLAY_MPR_PAGE
+ if (ddr_type == DDR4_DRAM) {
+ Display_MPR_Page(node, rank_mask, ddr_interface_num, dimm_count, 2);
+ Display_MPR_Page(node, rank_mask, ddr_interface_num, dimm_count, 0);
+ }
+#endif
+
+#if 1 // was #ifdef CAVIUM_ONLY
+ {
+ int i;
+ int setting[9];
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+
+ for (i=0; i<9; ++i) {
+ SET_DDR_DLL_CTL3(dll90_byte_sel, ENCODE_DLL90_BYTE_SEL(i));
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
+ setting[i] = GET_DDR_DLL_CTL3(dll90_setting);
+ debug_print("%d. LMC%d_DLL_CTL3[%d] = %016lx %d\n", i, ddr_interface_num,
+ GET_DDR_DLL_CTL3(dll90_byte_sel), ddr_dll_ctl3.u, setting[i]);
+ }
+
+ VB_PRT(VBL_DEV, "N%d.LMC%d: %-36s : %5d %5d %5d %5d %5d %5d %5d %5d %5d\n",
+ node, ddr_interface_num, "DLL90 Setting 8:0",
+ setting[8], setting[7], setting[6], setting[5], setting[4],
+ setting[3], setting[2], setting[1], setting[0]);
+
+ //BDK_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), save_ddr_dll_ctl3.u);
+ }
+#endif /* CAVIUM_ONLY */
+
+ // any custom DLL read or write offsets, install them
+ // FIXME: no need to do these if we are going to auto-tune... ???
+
+ process_custom_dll_offsets(node, ddr_interface_num, "ddr_dll_write_offset",
+ custom_lmc_config->dll_write_offset, "ddr%d_dll_write_offset_byte%d", 1);
+ process_custom_dll_offsets(node, ddr_interface_num, "ddr_dll_read_offset",
+ custom_lmc_config->dll_read_offset, "ddr%d_dll_read_offset_byte%d", 2);
+
+ // we want to train write bit-deskew here...
+ if (! disable_deskew_training) {
+ if (enable_write_deskew) {
+ ddr_print("N%d.LMC%d: WRITE BIT-DESKEW feature training begins.\n",
+ node, ddr_interface_num);
+ Perform_Write_Deskew_Training(node, ddr_interface_num);
+ } /* if (enable_write_deskew) */
+ } /* if (! disable_deskew_training) */
+
+ /*
+ * 6.9.14 Final LMC Initialization
+ *
+ * Early LMC initialization, LMC write-leveling, and LMC read-leveling
+ * must be completed prior to starting this final LMC initialization.
+ *
+ * LMC hardware updates the LMC(0)_SLOT_CTL0, LMC(0)_SLOT_CTL1,
+ * LMC(0)_SLOT_CTL2 CSRs with minimum values based on the selected
+ * readleveling and write-leveling settings. Software should not write
+ * the final LMC(0)_SLOT_CTL0, LMC(0)_SLOT_CTL1, and LMC(0)_SLOT_CTL2
+ * values until after the final read-leveling and write-leveling settings
+ * are written.
+ *
+ * Software must ensure the LMC(0)_SLOT_CTL0, LMC(0)_SLOT_CTL1, and
+ * LMC(0)_SLOT_CTL2 CSR values are appropriate for this step. These CSRs
+ * select the minimum gaps between read operations and write operations
+ * of various types.
+ *
+ * Software must not reduce the values in these CSR fields below the
+ * values previously selected by the LMC hardware (during write-leveling
+ * and read-leveling steps above).
+ *
+ * All sections in this chapter may be used to derive proper settings for
+ * these registers.
+ *
+ * For minimal read latency, L2C_CTL[EF_ENA,EF_CNT] should be programmed
+ * properly. This should be done prior to the first read.
+ */
+
+#if ENABLE_SLOT_CTL_ACCESS
+ {
+ bdk_lmcx_slot_ctl0_t lmc_slot_ctl0;
+ bdk_lmcx_slot_ctl1_t lmc_slot_ctl1;
+ bdk_lmcx_slot_ctl2_t lmc_slot_ctl2;
+ bdk_lmcx_slot_ctl3_t lmc_slot_ctl3;
+
+ lmc_slot_ctl0.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL0(ddr_interface_num));
+ lmc_slot_ctl1.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL1(ddr_interface_num));
+ lmc_slot_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL2(ddr_interface_num));
+ lmc_slot_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL3(ddr_interface_num));
+
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL0", lmc_slot_ctl0.u);
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL1", lmc_slot_ctl1.u);
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL2", lmc_slot_ctl2.u);
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL3", lmc_slot_ctl3.u);
+
+ // for now, look only for SLOT_CTL1 envvar for override of contents
+ if ((s = lookup_env_parameter("ddr%d_slot_ctl1", ddr_interface_num)) != NULL) {
+ int slot_ctl1_incr = strtoul(s, NULL, 0);
+ // validate the value
+ if ((slot_ctl1_incr < 0) || (slot_ctl1_incr > 3)) { // allow 0 for printing only
+ error_print("ddr%d_slot_ctl1 illegal value (%d); must be 0-3\n",
+ ddr_interface_num, slot_ctl1_incr);
+ } else {
+
+#define INCR(csr, chip, field, incr) \
+ csr.chip.field = (csr.chip.field < (64 - incr)) ? (csr.chip.field + incr) : 63
+
+ // only print original when we are changing it!
+ if (slot_ctl1_incr)
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL1", lmc_slot_ctl1.u);
+
+ // modify all the SLOT_CTL1 fields by the increment, for now...
+ // but make sure the value will not overflow!!!
+ INCR(lmc_slot_ctl1, s, r2r_xrank_init, slot_ctl1_incr);
+ INCR(lmc_slot_ctl1, s, r2w_xrank_init, slot_ctl1_incr);
+ INCR(lmc_slot_ctl1, s, w2r_xrank_init, slot_ctl1_incr);
+ INCR(lmc_slot_ctl1, s, w2w_xrank_init, slot_ctl1_incr);
+ DRAM_CSR_WRITE(node, BDK_LMCX_SLOT_CTL1(ddr_interface_num), lmc_slot_ctl1.u);
+ lmc_slot_ctl1.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL1(ddr_interface_num));
+
+ // always print when we are changing it!
+ printf("%-45s : 0x%016lx\n", "LMC_SLOT_CTL1", lmc_slot_ctl1.u);
+ }
+ }
+ }
+#endif /* ENABLE_SLOT_CTL_ACCESS */
+ {
+ /* Clear any residual ECC errors */
+ int num_tads = 1;
+ int tad;
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_INT(ddr_interface_num), -1ULL);
+ BDK_CSR_READ(node, BDK_LMCX_INT(ddr_interface_num));
+
+ for (tad=0; tad<num_tads; tad++)
+ DRAM_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(tad), BDK_CSR_READ(node, BDK_L2C_TADX_INT_W1C(tad)));
+
+ ddr_print("%-45s : 0x%08lx\n", "LMC_INT",
+ BDK_CSR_READ(node, BDK_LMCX_INT(ddr_interface_num)));
+
+#if 0
+ // NOTE: this must be done for pass 2.x
+ // must enable ECC interrupts to get ECC error info in LMCX_INT
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
+ DRAM_CSR_WRITE(node, BDK_LMCX_INT_ENA_W1S(ddr_interface_num), -1ULL);
+ BDK_CSR_INIT(lmc_int_ena_w1s, node, BDK_LMCX_INT_ENA_W1S(ddr_interface_num));
+ ddr_print("%-45s : 0x%08lx\n", "LMC_INT_ENA_W1S", lmc_int_ena_w1s.u);
+ }
+#endif
+ }
+
+ // Now we can enable scrambling if desired...
+ {
+ bdk_lmcx_control_t lmc_control;
+ bdk_lmcx_scramble_cfg0_t lmc_scramble_cfg0;
+ bdk_lmcx_scramble_cfg1_t lmc_scramble_cfg1;
+ bdk_lmcx_scramble_cfg2_t lmc_scramble_cfg2;
+ bdk_lmcx_ns_ctl_t lmc_ns_ctl;
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ lmc_scramble_cfg0.u = BDK_CSR_READ(node, BDK_LMCX_SCRAMBLE_CFG0(ddr_interface_num));
+ lmc_scramble_cfg1.u = BDK_CSR_READ(node, BDK_LMCX_SCRAMBLE_CFG1(ddr_interface_num));
+ lmc_scramble_cfg2.u = BDK_CSR_READ(node, BDK_LMCX_SCRAMBLE_CFG2(ddr_interface_num));
+ lmc_ns_ctl.u = BDK_CSR_READ(node, BDK_LMCX_NS_CTL(ddr_interface_num));
+
+ /* Read the scramble setting from the config and see if we
+ need scrambling */
+ int use_scramble = bdk_config_get_int(BDK_CONFIG_DRAM_SCRAMBLE);
+ if (use_scramble == 2)
+ {
+ if (bdk_trust_get_level() >= BDK_TRUST_LEVEL_SIGNED)
+ use_scramble = 1;
+ else
+ use_scramble = 0;
+ }
+
+ /* Generate random values if scrambling is needed */
+ if (use_scramble)
+ {
+ lmc_scramble_cfg0.u = bdk_rng_get_random64();
+ lmc_scramble_cfg1.u = bdk_rng_get_random64();
+ lmc_scramble_cfg2.u = bdk_rng_get_random64();
+ lmc_ns_ctl.s.ns_scramble_dis = 0;
+ lmc_ns_ctl.s.adr_offset = 0;
+ lmc_control.s.scramble_ena = 1;
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr_scramble_cfg0")) != NULL) {
+ lmc_scramble_cfg0.u = strtoull(s, NULL, 0);
+ lmc_control.s.scramble_ena = 1;
+ }
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SCRAMBLE_CFG0", lmc_scramble_cfg0.u);
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG0(ddr_interface_num), lmc_scramble_cfg0.u);
+
+ if ((s = lookup_env_parameter_ull("ddr_scramble_cfg1")) != NULL) {
+ lmc_scramble_cfg1.u = strtoull(s, NULL, 0);
+ lmc_control.s.scramble_ena = 1;
+ }
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SCRAMBLE_CFG1", lmc_scramble_cfg1.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG1(ddr_interface_num), lmc_scramble_cfg1.u);
+
+ if ((s = lookup_env_parameter_ull("ddr_scramble_cfg2")) != NULL) {
+ lmc_scramble_cfg2.u = strtoull(s, NULL, 0);
+ lmc_control.s.scramble_ena = 1;
+ }
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SCRAMBLE_CFG2", lmc_scramble_cfg2.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG2(ddr_interface_num), lmc_scramble_cfg2.u);
+
+ if ((s = lookup_env_parameter_ull("ddr_ns_ctl")) != NULL) {
+ lmc_ns_ctl.u = strtoull(s, NULL, 0);
+ }
+ ddr_print("%-45s : 0x%016lx\n", "LMC_NS_CTL", lmc_ns_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_NS_CTL(ddr_interface_num), lmc_ns_ctl.u);
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ }
+
+ return(mem_size_mbytes);
+}
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h b/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h
new file mode 100644
index 0000000000..ba1060e5e0
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h
@@ -0,0 +1,97 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * Function for DDR3 init. Internal use only.
+ */
+
+extern void perform_octeon3_ddr3_sequence(bdk_node_t node, int rank_mask,
+ int ddr_interface_num, int sequence);
+extern void perform_ddr_init_sequence(bdk_node_t node, int rank_mask,
+ int ddr_interface_num);
+extern int ddr_memory_preserved(bdk_node_t node);
+
+extern int init_octeon3_ddr3_interface(bdk_node_t node,
+ const ddr_configuration_t *ddr_configuration, uint32_t ddr_hertz,
+ uint32_t cpu_hertz, uint32_t ddr_ref_hertz, int board_type,
+ int board_rev_maj, int board_rev_min, int ddr_interface_num,
+ uint32_t ddr_interface_mask);
+
+extern void
+set_vref(bdk_node_t node, int ddr_interface_num, int rank,
+ int range, int value);
+
+typedef struct {
+ unsigned char *rodt_ohms;
+ unsigned char *rtt_nom_ohms;
+ unsigned char *rtt_nom_table;
+ unsigned char *rtt_wr_ohms;
+ unsigned char *dic_ohms;
+ short *drive_strength;
+ short *dqx_strength;
+} impedence_values_t;
+
+extern impedence_values_t ddr4_impedence_values;
+
+extern int
+compute_vref_value(bdk_node_t node, int ddr_interface_num,
+ int rankx, int dimm_count, int rank_count,
+ impedence_values_t *imp_values, int is_stacked_die);
+
+extern unsigned short
+load_dac_override(int node, int ddr_interface_num,
+ int dac_value, int byte);
+extern int
+read_DAC_DBI_settings(int node, int ddr_interface_num,
+ int dac_or_dbi, int *settings);
+extern void
+display_DAC_DBI_settings(int node, int ddr_interface_num, int dac_or_dbi,
+ int ecc_ena, int *settings, char *title);
+
+#define RODT_OHMS_COUNT 8
+#define RTT_NOM_OHMS_COUNT 8
+#define RTT_NOM_TABLE_COUNT 8
+#define RTT_WR_OHMS_COUNT 8
+#define DIC_OHMS_COUNT 3
+#define DRIVE_STRENGTH_COUNT 15
+
+extern uint64_t hertz_to_psecs(uint64_t hertz);
+extern uint64_t psecs_to_mts(uint64_t psecs);
+extern uint64_t mts_to_hertz(uint64_t mts);
+extern uint64_t pretty_psecs_to_mts(uint64_t psecs);
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-internal.h b/src/vendorcode/cavium/bdk/libdram/dram-internal.h
new file mode 100644
index 0000000000..07fdbcbf54
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-internal.h
@@ -0,0 +1,201 @@
+#ifndef __DRAM_INTERNAL_H__
+#define __DRAM_INTERNAL_H__
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * This header defines all internal API for libdram. None
+ * of these functions should be called by users of the library.
+ * This is the only header that DRAM files should include
+ * from the libdram directory
+ */
+
+#include "libdram.h"
+#include "lib_octeon_shared.h"
+#include "dram-print.h"
+#include "dram-util.h"
+#include "dram-csr.h"
+#include "dram-env.h"
+#include "dram-gpio.h"
+#include "dram-spd.h"
+#include "dram-l2c.h"
+#include "dram-init-ddr3.h"
+
+#undef DRAM_CSR_WRITE_INLINE
+
+// define how many HW WL samples to take for majority voting
+// MUST BE odd!!
+// assume there should only be 2 possible values that will show up,
+// so treat ties as a problem!!!
+#define WLEVEL_LOOPS_DEFAULT 5 // NOTE: do not change this without checking the code!!!
+
+// define how many HW RL samples per rank to take
+// multiple samples will allow either:
+// 1. looking for the best sample score
+// 2. averaging the samples into a composite score
+// symbol PICK_BEST_RANK_SCORE_NOT_AVG is used to choose
+// (see dram-init-ddr3.c:
+#define RLEVEL_AVG_LOOPS_DEFAULT 3
+#define PICK_BEST_RANK_SCORE_NOT_AVG 1
+
+typedef struct {
+ int delay;
+ int loop_total;
+ int loop_count;
+ int best;
+ uint64_t bm;
+ int bmerrs;
+ int sqerrs;
+ int bestsq;
+} rlevel_byte_data_t;
+
+typedef struct {
+ uint64_t bm;
+ uint8_t mstart;
+ uint8_t width;
+ int errs;
+} rlevel_bitmask_t;
+
+#define SET_DDR_DLL_CTL3(field, expr) \
+ do { \
+ ddr_dll_ctl3.cn81xx.field = (expr); \
+ } while (0)
+
+#define ENCODE_DLL90_BYTE_SEL(byte_sel) ((byte_sel)+1)
+
+#define GET_DDR_DLL_CTL3(field) \
+ (ddr_dll_ctl3.cn81xx.field)
+
+
+#define RLEVEL_NONSEQUENTIAL_DELAY_ERROR 50
+#define RLEVEL_ADJACENT_DELAY_ERROR 30
+
+#define TWO_LMC_MASK 0x03
+#define FOUR_LMC_MASK 0x0f
+#define ONE_DIMM_MASK 0x01
+#define TWO_DIMM_MASK 0x03
+
+extern int initialize_ddr_clock(bdk_node_t node,
+ const ddr_configuration_t *ddr_configuration, uint32_t cpu_hertz,
+ uint32_t ddr_hertz, uint32_t ddr_ref_hertz, int ddr_interface_num,
+ uint32_t ddr_interface_mask);
+
+extern int test_dram_byte(bdk_node_t node, int ddr_interface_num, uint64_t p,
+ uint64_t bitmask, uint64_t *xor_data);
+extern int dram_tuning_mem_xor(bdk_node_t node, int ddr_interface_num, uint64_t p,
+ uint64_t bitmask, uint64_t *xor_data);
+
+// "mode" arg
+#define DBTRAIN_TEST 0
+#define DBTRAIN_DBI 1
+#define DBTRAIN_LFSR 2
+extern int test_dram_byte_hw(bdk_node_t node, int ddr_interface_num,
+ uint64_t p, int mode, uint64_t *xor_data);
+extern int run_best_hw_patterns(bdk_node_t node, int ddr_interface_num,
+ uint64_t p, int mode, uint64_t *xor_data);
+
+extern int get_dimm_part_number(char *buffer, bdk_node_t node,
+ const dimm_config_t *dimm_config,
+ int ddr_type);
+extern uint32_t get_dimm_serial_number(bdk_node_t node,
+ const dimm_config_t *dimm_config,
+ int ddr_type);
+
+extern int octeon_ddr_initialize(bdk_node_t node, uint32_t cpu_hertz,
+ uint32_t ddr_hertz, uint32_t ddr_ref_hertz, uint32_t ddr_interface_mask,
+ const ddr_configuration_t *ddr_configuration, uint32_t *measured_ddr_hertz,
+ int board_type, int board_rev_maj, int board_rev_min);
+
+extern uint64_t divide_nint(uint64_t dividend, uint64_t divisor);
+
+typedef enum {
+ DDR3_DRAM = 3,
+ DDR4_DRAM = 4,
+} ddr_type_t;
+
+static inline int get_ddr_type(bdk_node_t node, const dimm_config_t *dimm_config)
+{
+ int spd_ddr_type;
+
+#define DEVICE_TYPE DDR4_SPD_KEY_BYTE_DEVICE_TYPE // same for DDR3 and DDR4
+ spd_ddr_type = read_spd(node, dimm_config, DEVICE_TYPE);
+
+ debug_print("%s:%d spd_ddr_type=0x%02x\n", __FUNCTION__, __LINE__, spd_ddr_type);
+
+ /* we return only DDR4 or DDR3 */
+ return (spd_ddr_type == 0x0C) ? DDR4_DRAM : DDR3_DRAM;
+}
+
+static inline int get_dimm_ecc(bdk_node_t node, const dimm_config_t *dimm_config, int ddr_type)
+{
+#define BUS_WIDTH(t) (((t) == DDR4_DRAM) ? DDR4_SPD_MODULE_MEMORY_BUS_WIDTH : DDR3_SPD_MEMORY_BUS_WIDTH)
+
+ return !!(read_spd(node, dimm_config, BUS_WIDTH(ddr_type)) & 8);
+}
+
+static inline int get_dimm_module_type(bdk_node_t node, const dimm_config_t *dimm_config, int ddr_type)
+{
+#define MODULE_TYPE DDR4_SPD_KEY_BYTE_MODULE_TYPE // same for DDR3 and DDR4
+
+ return (read_spd(node, dimm_config, MODULE_TYPE) & 0x0F);
+}
+
+extern int common_ddr4_fixups(dram_config_t *cfg, uint32_t default_udimm_speed);
+
+#define DEFAULT_BEST_RANK_SCORE 9999999
+#define MAX_RANK_SCORE_LIMIT 99 // is this OK?
+
+unsigned short load_dll_offset(bdk_node_t node, int ddr_interface_num,
+ int dll_offset_mode, int byte_offset, int byte);
+void change_dll_offset_enable(bdk_node_t node, int ddr_interface_num, int change);
+
+extern int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune);
+extern int perform_HW_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int bytelane);
+
+extern int perform_margin_write_voltage(bdk_node_t node);
+extern int perform_margin_read_voltage(bdk_node_t node);
+
+#define LMC_DDR3_RESET_ASSERT 0
+#define LMC_DDR3_RESET_DEASSERT 1
+extern void cn88xx_lmc_ddr3_reset(bdk_node_t node, int ddr_interface_num, int reset);
+extern void perform_lmc_reset(bdk_node_t node, int ddr_interface_num);
+extern void ddr4_mrw(bdk_node_t node, int ddr_interface_num, int rank,
+ int mr_wr_addr, int mr_wr_sel, int mr_wr_bg1);
+#endif /* __DRAM_INTERNAL_H__ */
+
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-l2c.c b/src/vendorcode/cavium/bdk/libdram/dram-l2c.c
new file mode 100644
index 0000000000..11112955b2
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-l2c.c
@@ -0,0 +1,69 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "dram-internal.h"
+
+int limit_l2_ways(bdk_node_t node, int ways, int verbose)
+{
+ int ways_max = bdk_l2c_get_num_assoc(node);
+ int ways_min = 0;
+ int errors = 0;
+
+ if (ways >= ways_min && ways <= ways_max)
+ {
+ uint32_t valid_mask = (0x1 << ways_max) - 1;
+ uint32_t mask = (valid_mask << ways) & valid_mask;
+ if (verbose)
+ printf("Limiting L2 to %d ways\n", ways);
+ for (int i = 0; i < (int)bdk_get_num_cores(node); i++)
+ errors += bdk_l2c_set_core_way_partition(node, i, mask);
+ errors += bdk_l2c_set_hw_way_partition(node, mask);
+ }
+ else
+ {
+ errors++;
+ printf("ERROR: invalid limit_l2_ways %d, must be between %d and %d\n",
+ ways, ways_min, ways_max);
+ }
+ if (errors)
+ puts("ERROR limiting L2 cache ways\n");
+
+ return errors;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-l2c.h b/src/vendorcode/cavium/bdk/libdram/dram-l2c.h
new file mode 100644
index 0000000000..5d2840884b
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-l2c.h
@@ -0,0 +1,45 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * Functions for controlling L2C. Internal use only.
+ */
+
+extern int limit_l2_ways(bdk_node_t node, int ways, int verbose);
+
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-print.h b/src/vendorcode/cavium/bdk/libdram/dram-print.h
new file mode 100644
index 0000000000..94cdf92fbf
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-print.h
@@ -0,0 +1,86 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * Functions for diplaying output in libdram. Internal use only.
+ */
+
+typedef enum {
+ // low 4 bits are verbosity level
+ VBL_OFF = 0, // use this only to init dram_verbosity
+ VBL_ALL = 0, // use this only in VBL_PR() to get printf equiv
+ VBL_NORM = 1,
+ VBL_FAE = 2,
+ VBL_TME = 3,
+ VBL_DEV = 4,
+ VBL_DEV2 = 5,
+ VBL_DEV3 = 6,
+ VBL_DEV4 = 7,
+ VBL_NONE = 15, // use this only in VBL_PR() to get no printing
+ // upper 4 bits are special verbosities
+ VBL_SEQ = 16,
+ VBL_CSRS = 32,
+ VBL_SPECIAL = 48,
+ // force at least 8 bits for enum
+ VBL_LAST = 255
+} dram_verbosity_t;
+
+extern dram_verbosity_t dram_verbosity;
+
+// "level" should be 1-7, or only one of the special bits
+// let the compiler optimize the test for verbosity
+#define is_verbosity_level(level) ((int)(dram_verbosity & 0x0f) >= (level))
+#define is_verbosity_special(level) (((int)(dram_verbosity & 0xf0) & (level)) != 0)
+#define dram_is_verbose(level) (((level) & VBL_SPECIAL) ? is_verbosity_special(level) : is_verbosity_level(level))
+
+#define VB_PRT(level, format, ...) \
+ do { \
+ if (dram_is_verbose(level)) \
+ printf(format, ##__VA_ARGS__); \
+ } while (0)
+
+#define ddr_print(format, ...) VB_PRT(VBL_NORM, format, ##__VA_ARGS__)
+
+#define error_print(format, ...) printf(format, ##__VA_ARGS__)
+
+#ifdef DEBUG_DEBUG_PRINT
+ #define debug_print(format, ...) printf(format, ##__VA_ARGS__)
+#else
+ #define debug_print(format, ...) do {} while (0)
+#endif
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-spd.c b/src/vendorcode/cavium/bdk/libdram/dram-spd.c
new file mode 100644
index 0000000000..3717ca1109
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-spd.c
@@ -0,0 +1,583 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <ctype.h>
+#include "dram-internal.h"
+
+/**
+ * Read the entire contents of a DIMM SPD and store it in the device tree. The
+ * current DRAM config is also updated, so future SPD accesses used the cached
+ * copy.
+ *
+ * @param node Node the DRAM config is for
+ * @param cfg Current DRAM config. Updated with SPD data
+ * @param lmc LMC to read DIMM for
+ * @param dimm DIMM slot for SPD to read
+ *
+ * @return Zero on success, negative on failure
+ */
+int read_entire_spd(bdk_node_t node, dram_config_t *cfg, int lmc, int dimm)
+{
+ /* If pointer to data is provided, use it, otherwise read from SPD over twsi */
+ if (cfg->config[lmc].dimm_config_table[dimm].spd_ptr)
+ return 0;
+ if (!cfg->config[lmc].dimm_config_table[dimm].spd_addr)
+ return -1;
+
+ /* Figure out how to access the SPD */
+ int spd_addr = cfg->config[lmc].dimm_config_table[dimm].spd_addr;
+ int bus = spd_addr >> 12;
+ int address = spd_addr & 0x7f;
+
+ /* Figure out the size we will read */
+ int64_t dev_type = bdk_twsix_read_ia(node, bus, address, DDR4_SPD_KEY_BYTE_DEVICE_TYPE, 1, 1);
+ if (dev_type < 0)
+ return -1; /* No DIMM */
+ int spd_size = (dev_type == 0x0c) ? 512 : 256;
+
+ /* Allocate storage */
+ uint32_t *spd_buf = malloc(spd_size);
+ if (!spd_buf)
+ return -1;
+ uint32_t *ptr = spd_buf;
+
+ for (int bank = 0; bank < (spd_size >> 8); bank++)
+ {
+ /* this should only happen for DDR4, which has a second bank of 256 bytes */
+ if (bank)
+ bdk_twsix_write_ia(node, bus, 0x36 | bank, 0, 2, 1, 0);
+ int bank_size = 256;
+ for (int i = 0; i < bank_size; i += 4)
+ {
+ int64_t data = bdk_twsix_read_ia(node, bus, address, i, 4, 1);
+ if (data < 0)
+ {
+ free(spd_buf);
+ bdk_error("Failed to read SPD data at 0x%x\n", i + (bank << 8));
+ /* Restore the bank to zero */
+ if (bank)
+ bdk_twsix_write_ia(node, bus, 0x36 | 0, 0, 2, 1, 0);
+ return -1;
+ }
+ else
+ *ptr++ = bdk_be32_to_cpu(data);
+ }
+ /* Restore the bank to zero */
+ if (bank)
+ bdk_twsix_write_ia(node, bus, 0x36 | 0, 0, 2, 1, 0);
+ }
+
+ /* Store the SPD in the device tree */
+ bdk_config_set_blob(spd_size, spd_buf, BDK_CONFIG_DDR_SPD_DATA, dimm, lmc, node);
+ cfg->config[lmc].dimm_config_table[dimm].spd_ptr = (void*)spd_buf;
+
+ return 0;
+}
+
+/* Read an DIMM SPD value, either using TWSI to read it from the DIMM, or
+ * from a provided array.
+ */
+int read_spd(bdk_node_t node, const dimm_config_t *dimm_config, int spd_field)
+{
+ /* If pointer to data is provided, use it, otherwise read from SPD over twsi */
+ if (dimm_config->spd_ptr)
+ return dimm_config->spd_ptr[spd_field];
+ else if (dimm_config->spd_addr)
+ {
+ int data;
+ int bus = dimm_config->spd_addr >> 12;
+ int address = dimm_config->spd_addr & 0x7f;
+
+ /* this should only happen for DDR4, which has a second bank of 256 bytes */
+ int bank = (spd_field >> 8) & 1;
+ if (bank) {
+ bdk_twsix_write_ia(node, bus, 0x36 | bank, 0, 2, 1, 0);
+ spd_field %= 256;
+ }
+
+ data = bdk_twsix_read_ia(node, bus, address, spd_field, 1, 1);
+
+ /* Restore the bank to zero */
+ if (bank) {
+ bdk_twsix_write_ia(node, bus, 0x36 | 0, 0, 2, 1, 0);
+ }
+
+ return data;
+ }
+ else
+ return -1;
+}
+
+static uint16_t ddr3_crc16(uint8_t *ptr, int count)
+{
+ /* From DDR3 spd specification */
+ int crc, i;
+ crc = 0;
+ while (--count >= 0)
+ {
+ crc = crc ^ (int)*ptr++ << 8;
+ for (i = 0; i < 8; ++i)
+ if (crc & 0x8000)
+ crc = crc << 1 ^ 0x1021;
+ else
+ crc = crc << 1;
+ }
+ return crc & 0xFFFF;
+}
+
+static int validate_spd_checksum_ddr3(bdk_node_t node, int twsi_addr, int silent)
+{
+ uint8_t spd_data[128];
+ int crc_bytes = 126;
+ uint16_t crc_comp;
+ int i;
+ int rv;
+ int ret = 1;
+ for (i = 0; i < 128; i++)
+ {
+ rv = bdk_twsix_read_ia(node, twsi_addr >> 12, twsi_addr & 0x7f, i, 1, 1);
+ if (rv < 0)
+ return 0; /* TWSI read error */
+ spd_data[i] = (uint8_t)rv;
+ }
+ /* Check byte 0 to see how many bytes checksum is over */
+ if (spd_data[0] & 0x80)
+ crc_bytes = 117;
+
+ crc_comp = ddr3_crc16(spd_data, crc_bytes);
+
+ if (spd_data[DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE] != (crc_comp & 0xff) ||
+ spd_data[DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE] != (crc_comp >> 8))
+ {
+ if (!silent) {
+ printf("DDR3 SPD CRC error, spd addr: 0x%x, calculated crc: 0x%04x, read crc: 0x%02x%02x\n",
+ twsi_addr, crc_comp,
+ spd_data[DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE],
+ spd_data[DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE]);
+ }
+ ret = 0;
+ }
+ return ret;
+}
+
+static int validate_spd_checksum(bdk_node_t node, int twsi_addr, int silent)
+{
+ int rv;
+
+ debug_print("Validating DIMM at address 0x%x\n", twsi_addr);
+
+ if (!twsi_addr) return 1; /* return OK if we are not doing real DIMMs */
+
+ /* Look up module type to determine if DDR3 or DDR4 */
+ rv = bdk_twsix_read_ia(node, twsi_addr >> 12, twsi_addr & 0x7f, 2, 1, 1);
+
+ if (rv >= 0xB && rv <= 0xC) /* this is DDR3 or DDR4, do same */
+ return validate_spd_checksum_ddr3(node, twsi_addr, silent);
+
+ if (!silent)
+ printf("Unrecognized DIMM type: 0x%x at spd address: 0x%x\n",
+ rv, twsi_addr);
+
+ return 0;
+}
+
+
+int validate_dimm(bdk_node_t node, const dimm_config_t *dimm_config)
+{
+ int spd_addr;
+
+ spd_addr = dimm_config->spd_addr;
+
+ debug_print("Validating dimm spd addr: 0x%02x spd ptr: %x\n",
+ spd_addr, dimm_config->spd_ptr);
+
+ // if the slot is not possible
+ if (!spd_addr && !dimm_config->spd_ptr)
+ return -1;
+
+ {
+ int val0, val1;
+ int ddr_type = get_ddr_type(node, dimm_config);
+
+ switch (ddr_type)
+ {
+ case DDR3_DRAM: /* DDR3 */
+ case DDR4_DRAM: /* DDR4 */
+
+ debug_print("Validating DDR%d DIMM\n", ((dimm_type >> 2) & 3) + 1);
+
+#define DENSITY_BANKS DDR4_SPD_DENSITY_BANKS // same for DDR3 and DDR4
+#define ROW_COL_BITS DDR4_SPD_ADDRESSING_ROW_COL_BITS // same for DDR3 and DDR4
+
+ val0 = read_spd(node, dimm_config, DENSITY_BANKS);
+ val1 = read_spd(node, dimm_config, ROW_COL_BITS);
+ if (val0 < 0 && val1 < 0) {
+ debug_print("Error reading SPD for DIMM\n");
+ return 0; /* Failed to read dimm */
+ }
+ if (val0 == 0xff && val1 == 0xff) {
+ ddr_print("Blank or unreadable SPD for DIMM\n");
+ return 0; /* Blank SPD or otherwise unreadable device */
+ }
+
+ /* Don't treat bad checksums as fatal. */
+ validate_spd_checksum(node, spd_addr, 0);
+ break;
+
+ case 0x00: /* Terminator detected. Fail silently. */
+ return 0;
+
+ default:
+ debug_print("Unknown DIMM type 0x%x for DIMM @ 0x%x\n",
+ dimm_type, dimm_config->spd_addr);
+ return 0; /* Failed to read dimm */
+ }
+ }
+
+ return 1;
+}
+
+int get_dimm_part_number(char *buffer, bdk_node_t node,
+ const dimm_config_t *dimm_config,
+ int ddr_type)
+{
+ int i;
+ int c;
+ int skipping = 1;
+ int strlen = 0;
+
+#define PART_LIMIT(t) (((t) == DDR4_DRAM) ? 19 : 18)
+#define PART_NUMBER(t) (((t) == DDR4_DRAM) ? DDR4_SPD_MODULE_PART_NUMBER : DDR3_SPD_MODULE_PART_NUMBER)
+
+ int limit = PART_LIMIT(ddr_type);
+ int offset = PART_NUMBER(ddr_type);
+
+ for (i = 0; i < limit; ++i) {
+
+ c = (read_spd(node, dimm_config, offset+i) & 0xff);
+ if (c == 0) // any null, we are done
+ break;
+
+ /* Skip leading spaces. */
+ if (skipping) {
+ if (isspace(c))
+ continue;
+ else
+ skipping = 0;
+ }
+
+ /* Put non-null non-leading-space-skipped char into buffer */
+ buffer[strlen] = c;
+ ++strlen;
+ }
+
+ if (strlen > 0) {
+ i = strlen - 1; // last char put into buf
+ while (i >= 0 && isspace((int)buffer[i])) { // still in buf and a space
+ --i;
+ --strlen;
+ }
+ }
+ buffer[strlen] = 0; /* Insure that the string is terminated */
+
+ return strlen;
+}
+
+uint32_t get_dimm_serial_number(bdk_node_t node, const dimm_config_t *dimm_config, int ddr_type)
+{
+ uint32_t serial_number = 0;
+ int offset;
+
+#define SERIAL_NUMBER(t) (((t) == DDR4_DRAM) ? DDR4_SPD_MODULE_SERIAL_NUMBER : DDR3_SPD_MODULE_SERIAL_NUMBER)
+
+ offset = SERIAL_NUMBER(ddr_type);
+
+ for (int i = 0, j = 24; i < 4; ++i, j -= 8) {
+ serial_number |= ((read_spd(node, dimm_config, offset + i) & 0xff) << j);
+ }
+
+ return serial_number;
+}
+
+static uint32_t get_dimm_checksum(bdk_node_t node, const dimm_config_t *dimm_config, int ddr_type)
+{
+ uint32_t spd_chksum;
+
+#define LOWER_NIBBLE(t) (((t) == DDR4_DRAM) ? DDR4_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE : DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE)
+#define UPPER_NIBBLE(t) (((t) == DDR4_DRAM) ? DDR4_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE : DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE)
+
+ spd_chksum = 0xff & read_spd(node, dimm_config, LOWER_NIBBLE(ddr_type));
+ spd_chksum |= ((0xff & read_spd(node, dimm_config, UPPER_NIBBLE(ddr_type))) << 8);
+
+ return spd_chksum;
+}
+
+static
+void report_common_dimm(bdk_node_t node, const dimm_config_t *dimm_config, int dimm,
+ const char **dimm_types, int ddr_type, char *volt_str,
+ int ddr_interface_num, int num_ranks, int dram_width, int dimm_size_mb)
+{
+ int spd_ecc;
+ unsigned spd_module_type;
+ uint32_t serial_number;
+ char part_number[21]; /* 20 bytes plus string terminator is big enough for either */
+ char *sn_str;
+
+ spd_module_type = get_dimm_module_type(node, dimm_config, ddr_type);
+ spd_ecc = get_dimm_ecc(node, dimm_config, ddr_type);
+
+ (void) get_dimm_part_number(part_number, node, dimm_config, ddr_type);
+
+ serial_number = get_dimm_serial_number(node, dimm_config, ddr_type);
+ if ((serial_number != 0) && (serial_number != 0xffffffff)) {
+ sn_str = "s/n";
+ } else {
+ serial_number = get_dimm_checksum(node, dimm_config, ddr_type);
+ sn_str = "chksum";
+ }
+
+ // FIXME: add output of DIMM rank/width, as in: 2Rx4, 1Rx8, etc
+ printf("N%d.LMC%d.DIMM%d: %d MB, DDR%d %s %dRx%d %s, p/n: %s, %s: %u, %s\n",
+ node, ddr_interface_num, dimm, dimm_size_mb, ddr_type,
+ dimm_types[spd_module_type], num_ranks, dram_width,
+ (spd_ecc ? "ECC" : "non-ECC"), part_number,
+ sn_str, serial_number, volt_str);
+}
+
+const char *ddr3_dimm_types[16] = {
+ /* 0000 */ "Undefined",
+ /* 0001 */ "RDIMM",
+ /* 0010 */ "UDIMM",
+ /* 0011 */ "SO-DIMM",
+ /* 0100 */ "Micro-DIMM",
+ /* 0101 */ "Mini-RDIMM",
+ /* 0110 */ "Mini-UDIMM",
+ /* 0111 */ "Mini-CDIMM",
+ /* 1000 */ "72b-SO-UDIMM",
+ /* 1001 */ "72b-SO-RDIMM",
+ /* 1010 */ "72b-SO-CDIMM"
+ /* 1011 */ "LRDIMM",
+ /* 1100 */ "16b-SO-DIMM",
+ /* 1101 */ "32b-SO-DIMM",
+ /* 1110 */ "Reserved",
+ /* 1111 */ "Reserved"
+};
+
+static
+void report_ddr3_dimm(bdk_node_t node, const dimm_config_t *dimm_config,
+ int dimm, int ddr_interface_num, int num_ranks,
+ int dram_width, int dimm_size_mb)
+{
+ int spd_voltage;
+ char *volt_str;
+
+ spd_voltage = read_spd(node, dimm_config, DDR3_SPD_NOMINAL_VOLTAGE);
+ if ((spd_voltage == 0) || (spd_voltage & 3))
+ volt_str = "1.5V";
+ if (spd_voltage & 2)
+ volt_str = "1.35V";
+ if (spd_voltage & 4)
+ volt_str = "1.2xV";
+
+ report_common_dimm(node, dimm_config, dimm, ddr3_dimm_types,
+ DDR3_DRAM, volt_str, ddr_interface_num,
+ num_ranks, dram_width, dimm_size_mb);
+}
+
+const char *ddr4_dimm_types[16] = {
+ /* 0000 */ "Extended",
+ /* 0001 */ "RDIMM",
+ /* 0010 */ "UDIMM",
+ /* 0011 */ "SO-DIMM",
+ /* 0100 */ "LRDIMM",
+ /* 0101 */ "Mini-RDIMM",
+ /* 0110 */ "Mini-UDIMM",
+ /* 0111 */ "Reserved",
+ /* 1000 */ "72b-SO-RDIMM",
+ /* 1001 */ "72b-SO-UDIMM",
+ /* 1010 */ "Reserved",
+ /* 1011 */ "Reserved",
+ /* 1100 */ "16b-SO-DIMM",
+ /* 1101 */ "32b-SO-DIMM",
+ /* 1110 */ "Reserved",
+ /* 1111 */ "Reserved"
+};
+
+static
+void report_ddr4_dimm(bdk_node_t node, const dimm_config_t *dimm_config,
+ int dimm, int ddr_interface_num, int num_ranks,
+ int dram_width, int dimm_size_mb)
+{
+ int spd_voltage;
+ char *volt_str;
+
+ spd_voltage = read_spd(node, dimm_config, DDR4_SPD_MODULE_NOMINAL_VOLTAGE);
+ if ((spd_voltage == 0x01) || (spd_voltage & 0x02))
+ volt_str = "1.2V";
+ if ((spd_voltage == 0x04) || (spd_voltage & 0x08))
+ volt_str = "TBD1 V";
+ if ((spd_voltage == 0x10) || (spd_voltage & 0x20))
+ volt_str = "TBD2 V";
+
+ report_common_dimm(node, dimm_config, dimm, ddr4_dimm_types,
+ DDR4_DRAM, volt_str, ddr_interface_num,
+ num_ranks, dram_width, dimm_size_mb);
+}
+
+void report_dimm(bdk_node_t node, const dimm_config_t *dimm_config,
+ int dimm, int ddr_interface_num, int num_ranks,
+ int dram_width, int dimm_size_mb)
+{
+ int ddr_type;
+
+ /* ddr_type only indicates DDR4 or DDR3 */
+ ddr_type = get_ddr_type(node, dimm_config);
+
+ if (ddr_type == DDR4_DRAM)
+ report_ddr4_dimm(node, dimm_config, dimm, ddr_interface_num,
+ num_ranks, dram_width, dimm_size_mb);
+ else
+ report_ddr3_dimm(node, dimm_config, dimm, ddr_interface_num,
+ num_ranks, dram_width, dimm_size_mb);
+}
+
+static int
+get_ddr4_spd_speed(bdk_node_t node, const dimm_config_t *dimm_config)
+{
+ int spdMTB = 125;
+ int spdFTB = 1;
+
+ int tCKAVGmin
+ = spdMTB * read_spd(node, dimm_config, DDR4_SPD_MINIMUM_CYCLE_TIME_TCKAVGMIN)
+ + spdFTB * (signed char) read_spd(node, dimm_config, DDR4_SPD_MIN_CYCLE_TIME_FINE_TCKAVGMIN);
+
+ return pretty_psecs_to_mts(tCKAVGmin);
+}
+
+static int
+get_ddr3_spd_speed(bdk_node_t node, const dimm_config_t *dimm_config)
+{
+ int spd_mtb_dividend = 0xff & read_spd(node, dimm_config, DDR3_SPD_MEDIUM_TIMEBASE_DIVIDEND);
+ int spd_mtb_divisor = 0xff & read_spd(node, dimm_config, DDR3_SPD_MEDIUM_TIMEBASE_DIVISOR);
+ int spd_tck_min = 0xff & read_spd(node, dimm_config, DDR3_SPD_MINIMUM_CYCLE_TIME_TCKMIN);
+
+ short ftb_Dividend = read_spd(node, dimm_config, DDR3_SPD_FINE_TIMEBASE_DIVIDEND_DIVISOR) >> 4;
+ short ftb_Divisor = read_spd(node, dimm_config, DDR3_SPD_FINE_TIMEBASE_DIVIDEND_DIVISOR) & 0xf;
+
+ ftb_Divisor = (ftb_Divisor == 0) ? 1 : ftb_Divisor; /* Make sure that it is not 0 */
+
+ int mtb_psec = spd_mtb_dividend * 1000 / spd_mtb_divisor;
+ int tCKmin = mtb_psec * spd_tck_min;
+ tCKmin += ftb_Dividend *
+ (signed char) read_spd(node, dimm_config, DDR3_SPD_MINIMUM_CYCLE_TIME_FINE_TCKMIN)
+ / ftb_Divisor;
+
+ return pretty_psecs_to_mts(tCKmin);
+}
+
+static int
+speed_bin_down(int speed)
+{
+ if (speed == 2133)
+ return 1866;
+ else if (speed == 1866)
+ return 1600;
+ else
+ return speed;
+}
+
+int
+dram_get_default_spd_speed(bdk_node_t node, const ddr_configuration_t *ddr_config)
+{
+ int lmc, dimm;
+ int speed, ret_speed = 0;
+ int ddr_type = get_ddr_type(node, &ddr_config[0].dimm_config_table[0]);
+ int dimm_speed[8], dimm_count = 0;
+ int dimms_per_lmc = 0;
+
+ for (lmc = 0; lmc < 4; lmc++) {
+ for (dimm = 0; dimm < DDR_CFG_T_MAX_DIMMS; dimm++) {
+ const dimm_config_t *dimm_config = &ddr_config[lmc].dimm_config_table[dimm];
+ if (/*dimm_config->spd_addr ||*/ dimm_config->spd_ptr)
+ {
+ speed = (ddr_type == DDR4_DRAM)
+ ? get_ddr4_spd_speed(node, dimm_config)
+ : get_ddr3_spd_speed(node, dimm_config);
+ //printf("N%d.LMC%d.DIMM%d: SPD speed %d\n", node, lmc, dimm, speed);
+ dimm_speed[dimm_count] = speed;
+ dimm_count++;
+ if (lmc == 0)
+ dimms_per_lmc++;
+ }
+ }
+ }
+
+ // all DIMMs must be same speed
+ speed = dimm_speed[0];
+ for (dimm = 1; dimm < dimm_count; dimm++) {
+ if (dimm_speed[dimm] != speed) {
+ ret_speed = -1;
+ goto finish_up;
+ }
+ }
+
+ // if 2400 or greater, use 2133
+ if (speed >= 2400)
+ speed = 2133;
+
+ // use next speed down if 2DPC...
+ if (dimms_per_lmc > 1)
+ speed = speed_bin_down(speed);
+
+ // Update the in memory config to match the automatically calculated speed
+ bdk_config_set_int(speed, BDK_CONFIG_DDR_SPEED, node);
+
+ // do filtering for our jittery PLL
+ if (speed == 2133)
+ speed = 2100;
+ else if (speed == 1866)
+ speed = 1880;
+
+ // OK, return what we have...
+ ret_speed = mts_to_hertz(speed);
+
+ finish_up:
+ //printf("N%d: Returning default SPD speed %d\n", node, ret_speed);
+ return ret_speed;
+}
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-spd.h b/src/vendorcode/cavium/bdk/libdram/dram-spd.h
new file mode 100644
index 0000000000..df229f4959
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-spd.h
@@ -0,0 +1,166 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * Functions, enumarations, and structures related to DIMM SPDs.
+ * Everything in this file is internal to libdram.
+ */
+
+/* data field addresses in the DDR3 SPD eeprom */
+typedef enum ddr3_spd_addrs {
+ DDR3_SPD_BYTES_PROGRAMMED = 0,
+ DDR3_SPD_REVISION = 1,
+ DDR3_SPD_KEY_BYTE_DEVICE_TYPE = 2,
+ DDR3_SPD_KEY_BYTE_MODULE_TYPE = 3,
+ DDR3_SPD_DENSITY_BANKS = 4,
+ DDR3_SPD_ADDRESSING_ROW_COL_BITS = 5,
+ DDR3_SPD_NOMINAL_VOLTAGE = 6,
+ DDR3_SPD_MODULE_ORGANIZATION = 7,
+ DDR3_SPD_MEMORY_BUS_WIDTH = 8,
+ DDR3_SPD_FINE_TIMEBASE_DIVIDEND_DIVISOR = 9,
+ DDR3_SPD_MEDIUM_TIMEBASE_DIVIDEND = 10,
+ DDR3_SPD_MEDIUM_TIMEBASE_DIVISOR = 11,
+ DDR3_SPD_MINIMUM_CYCLE_TIME_TCKMIN = 12,
+ DDR3_SPD_CAS_LATENCIES_LSB = 14,
+ DDR3_SPD_CAS_LATENCIES_MSB = 15,
+ DDR3_SPD_MIN_CAS_LATENCY_TAAMIN = 16,
+ DDR3_SPD_MIN_WRITE_RECOVERY_TWRMIN = 17,
+ DDR3_SPD_MIN_RAS_CAS_DELAY_TRCDMIN = 18,
+ DDR3_SPD_MIN_ROW_ACTIVE_DELAY_TRRDMIN = 19,
+ DDR3_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN = 20,
+ DDR3_SPD_UPPER_NIBBLES_TRAS_TRC = 21,
+ DDR3_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN = 22,
+ DDR3_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN = 23,
+ DDR3_SPD_MIN_REFRESH_RECOVERY_LSB_TRFCMIN = 24,
+ DDR3_SPD_MIN_REFRESH_RECOVERY_MSB_TRFCMIN = 25,
+ DDR3_SPD_MIN_INTERNAL_WRITE_READ_CMD_TWTRMIN = 26,
+ DDR3_SPD_MIN_INTERNAL_READ_PRECHARGE_CMD_TRTPMIN = 27,
+ DDR3_SPD_UPPER_NIBBLE_TFAW = 28,
+ DDR3_SPD_MIN_FOUR_ACTIVE_WINDOW_TFAWMIN = 29,
+ DDR3_SPD_MINIMUM_CYCLE_TIME_FINE_TCKMIN = 34,
+ DDR3_SPD_MIN_CAS_LATENCY_FINE_TAAMIN = 35,
+ DDR3_SPD_MIN_RAS_CAS_DELAY_FINE_TRCDMIN = 36,
+ DDR3_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN = 37,
+ DDR3_SPD_MIN_ACTIVE_REFRESH_LSB_FINE_TRCMIN = 38,
+ DDR3_SPD_ADDRESS_MAPPING = 63,
+ DDR3_SPD_MODULE_SERIAL_NUMBER = 122,
+ DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE = 126,
+ DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE = 127,
+ DDR3_SPD_MODULE_PART_NUMBER = 128
+} ddr3_spd_addr_t;
+
+/* data field addresses in the DDR4 SPD eeprom */
+typedef enum ddr4_spd_addrs {
+ DDR4_SPD_BYTES_PROGRAMMED = 0,
+ DDR4_SPD_REVISION = 1,
+ DDR4_SPD_KEY_BYTE_DEVICE_TYPE = 2,
+ DDR4_SPD_KEY_BYTE_MODULE_TYPE = 3,
+ DDR4_SPD_DENSITY_BANKS = 4,
+ DDR4_SPD_ADDRESSING_ROW_COL_BITS = 5,
+ DDR4_SPD_PACKAGE_TYPE = 6,
+ DDR4_SPD_OPTIONAL_FEATURES = 7,
+ DDR4_SPD_THERMAL_REFRESH_OPTIONS = 8,
+ DDR4_SPD_OTHER_OPTIONAL_FEATURES = 9,
+ DDR4_SPD_SECONDARY_PACKAGE_TYPE = 10,
+ DDR4_SPD_MODULE_NOMINAL_VOLTAGE = 11,
+ DDR4_SPD_MODULE_ORGANIZATION = 12,
+ DDR4_SPD_MODULE_MEMORY_BUS_WIDTH = 13,
+ DDR4_SPD_MODULE_THERMAL_SENSOR = 14,
+ DDR4_SPD_RESERVED_BYTE15 = 15,
+ DDR4_SPD_RESERVED_BYTE16 = 16,
+ DDR4_SPD_TIMEBASES = 17,
+ DDR4_SPD_MINIMUM_CYCLE_TIME_TCKAVGMIN = 18,
+ DDR4_SPD_MAXIMUM_CYCLE_TIME_TCKAVGMAX = 19,
+ DDR4_SPD_CAS_LATENCIES_BYTE0 = 20,
+ DDR4_SPD_CAS_LATENCIES_BYTE1 = 21,
+ DDR4_SPD_CAS_LATENCIES_BYTE2 = 22,
+ DDR4_SPD_CAS_LATENCIES_BYTE3 = 23,
+ DDR4_SPD_MIN_CAS_LATENCY_TAAMIN = 24,
+ DDR4_SPD_MIN_RAS_CAS_DELAY_TRCDMIN = 25,
+ DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN = 26,
+ DDR4_SPD_UPPER_NIBBLES_TRAS_TRC = 27,
+ DDR4_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN = 28,
+ DDR4_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN = 29,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC1MIN = 30,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC1MIN = 31,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC2MIN = 32,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC2MIN = 33,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC4MIN = 34,
+ DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC4MIN = 35,
+ DDR4_SPD_MIN_FOUR_ACTIVE_WINDOW_MSN_TFAWMIN = 36,
+ DDR4_SPD_MIN_FOUR_ACTIVE_WINDOW_LSB_TFAWMIN = 37,
+ DDR4_SPD_MIN_ROW_ACTIVE_DELAY_SAME_TRRD_SMIN = 38,
+ DDR4_SPD_MIN_ROW_ACTIVE_DELAY_DIFF_TRRD_LMIN = 39,
+ DDR4_SPD_MIN_CAS_TO_CAS_DELAY_TCCD_LMIN = 40,
+ DDR4_SPD_MIN_CAS_TO_CAS_DELAY_FINE_TCCD_LMIN = 117,
+ DDR4_SPD_MIN_ACT_TO_ACT_DELAY_SAME_FINE_TRRD_LMIN = 118,
+ DDR4_SPD_MIN_ACT_TO_ACT_DELAY_DIFF_FINE_TRRD_SMIN = 119,
+ DDR4_SPD_MIN_ACT_TO_ACT_REFRESH_DELAY_FINE_TRCMIN = 120,
+ DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN = 121,
+ DDR4_SPD_MIN_RAS_TO_CAS_DELAY_FINE_TRCDMIN = 122,
+ DDR4_SPD_MIN_CAS_LATENCY_FINE_TAAMIN = 123,
+ DDR4_SPD_MAX_CYCLE_TIME_FINE_TCKAVGMAX = 124,
+ DDR4_SPD_MIN_CYCLE_TIME_FINE_TCKAVGMIN = 125,
+ DDR4_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE = 126,
+ DDR4_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE = 127,
+ DDR4_SPD_REFERENCE_RAW_CARD = 130,
+ DDR4_SPD_UDIMM_ADDR_MAPPING_FROM_EDGE = 131,
+ DDR4_SPD_REGISTER_MANUFACTURER_ID_LSB = 133,
+ DDR4_SPD_REGISTER_MANUFACTURER_ID_MSB = 134,
+ DDR4_SPD_REGISTER_REVISION_NUMBER = 135,
+ DDR4_SPD_RDIMM_ADDR_MAPPING_FROM_REGISTER_TO_DRAM = 136,
+ DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CTL = 137,
+ DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CK = 138,
+ DDR4_SPD_MODULE_SERIAL_NUMBER = 325,
+ DDR4_SPD_MODULE_PART_NUMBER = 329
+} ddr4_spd_addr_t;
+
+extern int read_entire_spd(bdk_node_t node, dram_config_t *cfg, int lmc, int dimm);
+extern int read_spd(bdk_node_t node, const dimm_config_t *dimm_config, int spd_field);
+
+extern int validate_dimm(bdk_node_t node, const dimm_config_t *dimm_config);
+
+extern void report_dimm(bdk_node_t node, const dimm_config_t *dimm_config,
+ int dimm, int ddr_interface_num, int num_ranks,
+ int dram_width, int dimm_size_mb);
+
+extern int dram_get_default_spd_speed(bdk_node_t node, const ddr_configuration_t *ddr_config);
+
+extern const char *ddr3_dimm_types[];
+extern const char *ddr4_dimm_types[];
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c b/src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c
new file mode 100644
index 0000000000..e0e9d4442c
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c
@@ -0,0 +1,2012 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "dram-internal.h"
+
+// if enhanced verbosity levels are defined, use them
+#if defined(VB_PRT)
+#define ddr_print2(format, ...) VB_PRT(VBL_FAE, format, ##__VA_ARGS__)
+#define ddr_print3(format, ...) VB_PRT(VBL_TME, format, ##__VA_ARGS__)
+#define ddr_print4(format, ...) VB_PRT(VBL_DEV, format, ##__VA_ARGS__)
+#define ddr_print5(format, ...) VB_PRT(VBL_DEV3, format, ##__VA_ARGS__)
+#else
+#define ddr_print2 ddr_print
+#define ddr_print4 ddr_print
+#define ddr_print5 ddr_print
+#endif
+
+static int64_t test_dram_byte_threads_done;
+static uint64_t test_dram_byte_threads_errs;
+static uint64_t test_dram_byte_lmc_errs[4];
+
+#if 0
+/*
+ * Suggested testing patterns.
+ */
+static const uint64_t test_pattern_2[] = {
+ 0xFFFFFFFFFFFFFFFFULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0xFFFFFFFFFFFFFFFFULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0xFFFFFFFFFFFFFFFFULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0xFFFFFFFFFFFFFFFFULL,
+ 0x5555555555555555ULL,
+ 0xFFFFFFFFFFFFFFFFULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xFFFFFFFFFFFFFFFFULL,
+ 0x5555555555555555ULL,
+};
+ /*
+ * or possibly
+ */
+static const uint64_t test_pattern_3[] = {
+ 0xFDFDFDFDFDFDFDFDULL,
+ 0x8787878787878787ULL,
+ 0xFEFEFEFEFEFEFEFEULL,
+ 0xC3C3C3C3C3C3C3C3ULL,
+ 0x7F7F7F7F7F7F7F7FULL,
+ 0xE1E1E1E1E1E1E1E1ULL,
+ 0xBFBFBFBFBFBFBFBFULL,
+ 0xF0F0F0F0F0F0F0F0ULL,
+ 0xDFDFDFDFDFDFDFDFULL,
+ 0x7878787878787878ULL,
+ 0xEFEFEFEFEFEFEFEFULL,
+ 0x3C3C3C3C3C3C3C3CULL,
+ 0xF7F7F7F7F7F7F7F7ULL,
+ 0x1E1E1E1E1E1E1E1EULL,
+ 0xFBFBFBFBFBFBFBFBULL,
+ 0x0F0F0F0F0F0F0F0FULL,
+};
+
+static const uint64_t test_pattern_1[] = {
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+#if 0 // only need a cacheline size
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+#endif
+};
+
+// setup default for test pattern array
+static const uint64_t *dram_tune_test_pattern = test_pattern_1;
+#endif
+
+// set this to 1 to shorten the testing to exit when all byte lanes have errors
+// having this at 0 forces the testing to take place over the entire range every iteration,
+// hopefully ensuring an even load on the memory subsystem
+#define EXIT_WHEN_ALL_LANES_HAVE_ERRORS 0
+
+#define DEFAULT_TEST_BURSTS 5 // FIXME: this is what works so far...// FIXME: was 7
+int dram_tune_use_bursts = DEFAULT_TEST_BURSTS;
+
+// dram_tune_rank_offset is used to offset the second area used in test_dram_mem_xor.
+//
+// If only a single-rank DIMM, the offset will be 256MB from the start of the first area,
+// which is more than enough for the restricted looping/address range actually tested...
+//
+// If a 2-rank DIMM, the offset will be the size of a rank's address space, so the effect
+// will be to have the first and second areas in different ranks on the same DIMM.
+//
+// So, we default this to single-rank, and it will be overridden when 2-ranks are detected.
+//
+
+// FIXME: ASSUME that we have DIMMS no less than 4GB in size
+
+// offset to first area that avoids any boot stuff in low range (below 256MB)
+#define AREA_BASE_OFFSET (1ULL << 28) // bit 28 always ON
+
+// offset to duplicate area; may coincide with rank 1 base address for 2-rank 4GB DIMM
+#define AREA_DUPE_OFFSET (1ULL << 31) // bit 31 always ON
+
+// defaults to DUPE, but will be set elsewhere to offset to next RANK if multi-rank DIMM
+static uint64_t dram_tune_rank_offset = AREA_DUPE_OFFSET; // default
+
+// defaults to 0, but will be set elsewhere to the address offset to next DIMM if multi-slot
+static uint64_t dram_tune_dimm_offset = 0; // default
+
+
+static int speed_bin_offset[3] = {25, 20, 15};
+static int speed_bin_winlen[3] = {70, 60, 60};
+
+static int
+get_speed_bin(bdk_node_t node, int lmc)
+{
+ uint32_t mts_speed = (libdram_get_freq_from_pll(node, lmc) / 1000000) * 2;
+ int ret = 0;
+
+ // FIXME: is this reasonable speed "binning"?
+ if (mts_speed >= 1700) {
+ if (mts_speed >= 2000)
+ ret = 2;
+ else
+ ret = 1;
+ }
+
+ debug_print("N%d.LMC%d: %s: returning bin %d for MTS %d\n",
+ node, lmc, __FUNCTION__, ret, mts_speed);
+
+ return ret;
+}
+
+static int is_low_risk_offset(int speed_bin, int offset)
+{
+ return (_abs(offset) <= speed_bin_offset[speed_bin]);
+}
+static int is_low_risk_winlen(int speed_bin, int winlen)
+{
+ return (winlen >= speed_bin_winlen[speed_bin]);
+}
+
+#define ENABLE_PREFETCH 0
+#define ENABLE_WBIL2 1
+#define ENABLE_SBLKDTY 0
+
+#define BDK_SYS_CVMCACHE_INV_L2 "#0,c11,c1,#1" // L2 Cache Invalidate
+#define BDK_CACHE_INV_L2(address) { asm volatile ("sys " BDK_SYS_CVMCACHE_INV_L2 ", %0" : : "r" (address)); }
+
+int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask, uint64_t *xor_data)
+{
+ uint64_t p1, p2, d1, d2;
+ uint64_t v, v1;
+ uint64_t p2offset = 0x10000000/* was: dram_tune_rank_offset; */; // FIXME?
+ uint64_t datamask;
+ uint64_t xor;
+ uint64_t i, j, k;
+ uint64_t ii;
+ int errors = 0;
+ //uint64_t index;
+ uint64_t pattern1 = bdk_rng_get_random64();
+ uint64_t pattern2 = 0;
+ uint64_t bad_bits[2] = {0,0};
+
+#if ENABLE_SBLKDTY
+ BDK_CSR_MODIFY(c, node, BDK_L2C_CTL, c.s.dissblkdty = 0);
+#endif
+
+ // Byte lanes may be clear in the mask to indicate no testing on that lane.
+ datamask = bitmask;
+
+ // final address must include LMC and node
+ p |= (lmc<<7); /* Map address into proper interface */
+ p = bdk_numa_get_address(node, p); /* Map to node */
+
+ /* Add offset to both test regions to not clobber boot stuff
+ * when running from L2 for NAND boot.
+ */
+ p += AREA_BASE_OFFSET; // make sure base is out of the way of boot
+
+#define II_INC (1ULL << 29)
+#define II_MAX (1ULL << 31)
+#define K_INC (1ULL << 14)
+#define K_MAX (1ULL << 20)
+#define J_INC (1ULL << 9)
+#define J_MAX (1ULL << 12)
+#define I_INC (1ULL << 3)
+#define I_MAX (1ULL << 7)
+
+ debug_print("N%d.LMC%d: dram_tuning_mem_xor: phys_addr=0x%lx\n",
+ node, lmc, p);
+
+#if 0
+ int ix;
+ // add this loop to fill memory with the test pattern first
+ // loops are ordered so that only entire cachelines are written
+ for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
+ for (k = 0; k < K_MAX; k += K_INC) {
+ for (j = 0; j < J_MAX; j += J_INC) {
+ p1 = p + ii + k + j;
+ p2 = p1 + p2offset;
+ for (i = 0, ix = 0; i < I_MAX; i += I_INC, ix++) {
+
+ v = dram_tune_test_pattern[ix];
+ v1 = v; // write the same thing to both areas
+
+ __bdk_dram_write64(p1 + i, v);
+ __bdk_dram_write64(p2 + i, v1);
+
+ }
+#if ENABLE_WBIL2
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
+#endif
+ }
+ }
+ } /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
+#endif
+
+#if ENABLE_PREFETCH
+ BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
+#endif
+
+ // loops are ordered so that only a single 64-bit slot is written to each cacheline at one time,
+ // then the cachelines are forced out; this should maximize read/write traffic
+ for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
+ for (k = 0; k < K_MAX; k += K_INC) {
+ for (i = 0; i < I_MAX; i += I_INC) {
+ for (j = 0; j < J_MAX; j += J_INC) {
+
+ p1 = p + ii + k + j;
+ p2 = p1 + p2offset;
+
+#if ENABLE_PREFETCH
+ if (j < (J_MAX - J_INC)) {
+ BDK_PREFETCH(p1 + J_INC, BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p2 + J_INC, BDK_CACHE_LINE_SIZE);
+ }
+#endif
+
+ v = pattern1 * (p1 + i);
+ v1 = v; // write the same thing to both areas
+
+ __bdk_dram_write64(p1 + i, v);
+ __bdk_dram_write64(p2 + i, v1);
+
+#if ENABLE_WBIL2
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
+#endif
+ }
+ }
+ }
+ } /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
+
+ BDK_DCACHE_INVALIDATE;
+
+ debug_print("N%d.LMC%d: dram_tuning_mem_xor: done INIT loop\n",
+ node, lmc);
+
+ /* Make a series of passes over the memory areas. */
+
+ for (int burst = 0; burst < 1/* was: dram_tune_use_bursts*/; burst++)
+ {
+ uint64_t this_pattern = bdk_rng_get_random64();
+ pattern2 ^= this_pattern;
+
+ /* XOR the data with a random value, applying the change to both
+ * memory areas.
+ */
+#if ENABLE_PREFETCH
+ BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
+#endif
+
+ for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
+ for (k = 0; k < K_MAX; k += K_INC) {
+ for (i = 0; i < I_MAX; i += I_INC) { // FIXME: rearranged, did not make much difference?
+ for (j = 0; j < J_MAX; j += J_INC) {
+
+ p1 = p + ii + k + j;
+ p2 = p1 + p2offset;
+
+#if ENABLE_PREFETCH
+ if (j < (J_MAX - J_INC)) {
+ BDK_PREFETCH(p1 + J_INC, BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p2 + J_INC, BDK_CACHE_LINE_SIZE);
+ }
+#endif
+
+ v = __bdk_dram_read64(p1 + i) ^ this_pattern;
+ v1 = __bdk_dram_read64(p2 + i) ^ this_pattern;
+
+#if ENABLE_WBIL2
+ BDK_CACHE_INV_L2(p1);
+ BDK_CACHE_INV_L2(p2);
+#endif
+
+ __bdk_dram_write64(p1 + i, v);
+ __bdk_dram_write64(p2 + i, v1);
+
+#if ENABLE_WBIL2
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
+#endif
+ }
+ }
+ }
+ } /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
+
+ BDK_DCACHE_INVALIDATE;
+
+ debug_print("N%d.LMC%d: dram_tuning_mem_xor: done MODIFY loop\n",
+ node, lmc);
+
+#if ENABLE_PREFETCH
+ BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
+#endif
+
+ /* Look for differences in the areas. If there is a mismatch, reset
+ * both memory locations with the same pattern. Failing to do so
+ * means that on all subsequent passes the pair of locations remain
+ * out of sync giving spurious errors.
+ */
+ // FIXME: change the loop order so that an entire cache line is compared at one time
+ // FIXME: this is so that a read error that occurs *anywhere* on the cacheline will be caught,
+ // FIXME: rather than comparing only 1 cacheline slot at a time, where an error on a different
+ // FIXME: slot will be missed that time around
+ // Does the above make sense?
+
+ for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
+ for (k = 0; k < K_MAX; k += K_INC) {
+ for (j = 0; j < J_MAX; j += J_INC) {
+
+ p1 = p + ii + k + j;
+ p2 = p1 + p2offset;
+
+#if ENABLE_PREFETCH
+ if (j < (J_MAX - J_INC)) {
+ BDK_PREFETCH(p1 + J_INC, BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p2 + J_INC, BDK_CACHE_LINE_SIZE);
+ }
+#endif
+
+ // process entire cachelines in the innermost loop
+ for (i = 0; i < I_MAX; i += I_INC) {
+
+ v = ((p1 + i) * pattern1) ^ pattern2; // FIXME: this should predict what we find...???
+ d1 = __bdk_dram_read64(p1 + i);
+ d2 = __bdk_dram_read64(p2 + i);
+
+ xor = ((d1 ^ v) | (d2 ^ v)) & datamask; // union of error bits only in active byte lanes
+
+ if (!xor)
+ continue;
+
+ // accumulate bad bits
+ bad_bits[0] |= xor;
+ //bad_bits[1] |= ~mpr_data1 & 0xffUL; // cannot do ECC here
+
+ int bybit = 1;
+ uint64_t bymsk = 0xffULL; // start in byte lane 0
+ while (xor != 0) {
+ debug_print("ERROR(%03d): [0x%016lX] [0x%016lX] expected 0x%016lX d1 %016lX d2 %016lX\n",
+ burst, p1, p2, v, d1, d2);
+ if (xor & bymsk) { // error(s) in this lane
+ errors |= bybit; // set the byte error bit
+ xor &= ~bymsk; // clear byte lane in error bits
+ datamask &= ~bymsk; // clear the byte lane in the mask
+#if EXIT_WHEN_ALL_LANES_HAVE_ERRORS
+ if (datamask == 0) { // nothing left to do
+ return errors; // completely done when errors found in all byte lanes in datamask
+ }
+#endif /* EXIT_WHEN_ALL_LANES_HAVE_ERRORS */
+ }
+ bymsk <<= 8; // move mask into next byte lane
+ bybit <<= 1; // move bit into next byte position
+ }
+ }
+#if ENABLE_WBIL2
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
+#endif
+ }
+ }
+ } /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
+
+ debug_print("N%d.LMC%d: dram_tuning_mem_xor: done TEST loop\n",
+ node, lmc);
+
+ } /* for (int burst = 0; burst < dram_tune_use_bursts; burst++) */
+
+ if (xor_data != NULL) { // send the bad bits back...
+ xor_data[0] = bad_bits[0];
+ xor_data[1] = bad_bits[1]; // let it be zeroed
+ }
+
+#if ENABLE_SBLKDTY
+ BDK_CSR_MODIFY(c, node, BDK_L2C_CTL, c.s.dissblkdty = 1);
+#endif
+
+ return errors;
+}
+
+#undef II_INC
+#undef II_MAX
+
+#define EXTRACT(v, lsb, width) (((v) >> (lsb)) & ((1ull << (width)) - 1))
+#define LMCNO(address, xbits) (EXTRACT(address, 7, xbits) ^ EXTRACT(address, 20, xbits) ^ EXTRACT(address, 12, xbits))
+
+static int dram_tuning_mem_xor2(uint64_t p, uint64_t bitmask, int xbits)
+{
+ uint64_t p1, p2, d1, d2;
+ uint64_t v, vpred;
+ uint64_t p2offset = dram_tune_rank_offset; // FIXME?
+ uint64_t datamask;
+ uint64_t xor;
+ uint64_t ii;
+ uint64_t pattern1 = bdk_rng_get_random64();
+ uint64_t pattern2 = 0;
+ int errors = 0;
+ int errs_by_lmc[4] = { 0,0,0,0 };
+ int lmc;
+ uint64_t vbase, vincr;
+
+ // Byte lanes may be clear in the mask to indicate no testing on that lane.
+ datamask = bitmask;
+
+ /* Add offset to both test regions to not clobber boot stuff
+ * when running from L2 for NAND boot.
+ */
+ p += AREA_BASE_OFFSET; // make sure base is out of the way of boot
+
+ // move the multiplies outside the loop
+ vbase = p * pattern1;
+ vincr = 8 * pattern1;
+
+#define II_INC (1ULL << 3)
+#define II_MAX (1ULL << 22) // stop where the core ID bits start
+
+ // walk the memory areas by 8-byte words
+ v = vbase;
+ for (ii = 0; ii < II_MAX; ii += II_INC) {
+
+ p1 = p + ii;
+ p2 = p1 + p2offset;
+
+ __bdk_dram_write64(p1, v);
+ __bdk_dram_write64(p2, v);
+
+ v += vincr;
+ }
+
+ __bdk_dram_flush_to_mem_range(p , p + II_MAX);
+ __bdk_dram_flush_to_mem_range(p + p2offset, p + p2offset + II_MAX);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Make a series of passes over the memory areas. */
+
+ for (int burst = 0; burst < dram_tune_use_bursts; burst++)
+ {
+ uint64_t this_pattern = bdk_rng_get_random64();
+ pattern2 ^= this_pattern;
+
+ /* XOR the data with a random value, applying the change to both
+ * memory areas.
+ */
+#if 0
+ BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
+#endif
+ for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
+
+ p1 = p + ii;
+ p2 = p1 + p2offset;
+
+ d1 = __bdk_dram_read64(p1) ^ this_pattern;
+ d2 = __bdk_dram_read64(p2) ^ this_pattern;
+
+ __bdk_dram_write64(p1, d1);
+ __bdk_dram_write64(p2, d2);
+
+ }
+ __bdk_dram_flush_to_mem_range(p , p + II_MAX);
+ __bdk_dram_flush_to_mem_range(p + p2offset, p + p2offset + II_MAX);
+ BDK_DCACHE_INVALIDATE;
+
+ /* Look for differences in the areas. If there is a mismatch, reset
+ * both memory locations with the same pattern. Failing to do so
+ * means that on all subsequent passes the pair of locations remain
+ * out of sync giving spurious errors.
+ */
+#if 0
+ BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
+#endif
+ vpred = vbase;
+ for (ii = 0; ii < II_MAX; ii += II_INC) {
+
+ p1 = p + ii;
+ p2 = p1 + p2offset;
+
+ v = vpred ^ pattern2; // this should predict what we find...
+ d1 = __bdk_dram_read64(p1);
+ d2 = __bdk_dram_read64(p2);
+ vpred += vincr;
+
+ xor = ((d1 ^ v) | (d2 ^ v)) & datamask; // union of error bits only in active byte lanes
+ if (!xor) // no errors
+ continue;
+
+ lmc = LMCNO(p1, xbits); // FIXME: LMC should be SAME for p1 and p2!!!
+ if (lmc != (int)LMCNO(p2, xbits)) {
+ printf("ERROR: LMCs for addresses [0x%016lX] (%lld) and [0x%016lX] (%lld) differ!!!\n",
+ p1, LMCNO(p1, xbits), p2, LMCNO(p2, xbits));
+ }
+ int bybit = 1;
+ uint64_t bymsk = 0xffULL; // start in byte lane 0
+ while (xor != 0) {
+ debug_print("ERROR(%03d): [0x%016lX] [0x%016lX] expected 0x%016lX d1 %016lX d2 %016lX\n",
+ burst, p1, p2, v, d1, d2);
+ if (xor & bymsk) { // error(s) in this lane
+ errs_by_lmc[lmc] |= bybit; // set the byte error bit in the LMCs errors
+ errors |= bybit; // set the byte error bit
+ xor &= ~bymsk; // clear byte lane in error bits
+ //datamask &= ~bymsk; // clear the byte lane in the mask
+ }
+ bymsk <<= 8; // move mask into next byte lane
+ bybit <<= 1; // move bit into next byte position
+ } /* while (xor != 0) */
+ } /* for (ii = 0; ii < II_MAX; ii += II_INC) */
+ } /* for (int burst = 0; burst < dram_tune_use_bursts; burst++) */
+
+ // update the global LMC error states
+ for (lmc = 0; lmc < 4; lmc++) {
+ if (errs_by_lmc[lmc]) {
+ bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_lmc_errs[lmc], errs_by_lmc[lmc]);
+ }
+ }
+
+ return errors;
+}
+
+#if 0
+static int dram_tuning_mem_rows(uint64_t p, uint64_t bitmask)
+{
+ uint64_t p1, p2, d1, d2;
+ uint64_t v, v1;
+ uint64_t p2offset = dram_tune_rank_offset; // FIXME?
+ uint64_t datamask;
+ uint64_t xor;
+ int i, j, k, ii;
+ int errors = 0;
+ int index;
+ uint64_t pattern1 = 0; // FIXME: maybe this could be from a table?
+ uint64_t pattern2;
+
+ // Byte lanes may be clear in the mask to indicate no testing on that lane.
+ datamask = bitmask;
+
+ /* Add offset to both test regions to not clobber boot stuff
+ * when running from L2 for NAND boot.
+ */
+ p += 0x10000000; // FIXME? was: 0x4000000; // make sure base is out of the way of cores for tuning
+
+ pattern2 = pattern1;
+ for (k = 0; k < (1 << 20); k += (1 << 14)) {
+ for (j = 0; j < (1 << 12); j += (1 << 9)) {
+ for (i = 0; i < (1 << 7); i += 8) {
+ index = i + j + k;
+ p1 = p + index;
+ p2 = p1 + p2offset;
+
+ v = pattern2;
+ v1 = v; // write the same thing to same slot in both cachelines
+ pattern2 = ~pattern2; // flip bits for next slots
+
+ __bdk_dram_write64(p1, v);
+ __bdk_dram_write64(p2, v1);
+ }
+#if 1
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
+#endif
+ }
+ }
+
+#if 0
+ __bdk_dram_flush_to_mem_range(p, p + (1ULL << 20)); // max_addr is start + where k stops...
+ __bdk_dram_flush_to_mem_range(p + p2offset, p + p2offset + (1ULL << 20)); // max_addr is start + where k stops...
+#endif
+ BDK_DCACHE_INVALIDATE;
+
+ /* Make a series of passes over the memory areas. */
+
+ for (int burst = 0; burst < dram_tune_use_bursts; burst++)
+ {
+ /* just read and flip the bits applying the change to both
+ * memory areas.
+ */
+ for (k = 0; k < (1 << 20); k += (1 << 14)) {
+ for (j = 0; j < (1 << 12); j += (1 << 9)) {
+ for (i = 0; i < (1 << 7); i += 8) {
+ index = i + j + k;
+ p1 = p + index;
+ p2 = p1 + p2offset;
+
+ v = ~__bdk_dram_read64(p1);
+ v1 = ~__bdk_dram_read64(p2);
+
+ __bdk_dram_write64(p1, v);
+ __bdk_dram_write64(p2, v1);
+ }
+#if 1
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
+#endif
+ }
+ }
+
+#if 0
+ __bdk_dram_flush_to_mem_range(p, p + (1ULL << 20)); // max_addr is start + where k stops...
+ __bdk_dram_flush_to_mem_range(p + p2offset, p + p2offset + (1ULL << 20)); // max_addr is start + where k stops...
+#endif
+ BDK_DCACHE_INVALIDATE;
+
+ /* Look for differences in the areas. If there is a mismatch, reset
+ * both memory locations with the same pattern. Failing to do so
+ * means that on all subsequent passes the pair of locations remain
+ * out of sync giving spurious errors.
+ */
+
+ // FIXME: change the loop order so that an entire cache line is compared at one time
+ // FIXME: this is so that a read error that occurs *anywhere* on the cacheline will be caught,
+ // FIXME: rather than comparing only 1 cacheline slot at a time, where an error on a different
+ // FIXME: slot will be missed that time around
+ // Does the above make sense?
+
+ pattern2 = ~pattern1; // slots have been flipped by the above loop
+
+ for (k = 0; k < (1 << 20); k += (1 << 14)) {
+ for (j = 0; j < (1 << 12); j += (1 << 9)) {
+ for (i = 0; i < (1 << 7); i += 8) {
+ index = i + j + k;
+ p1 = p + index;
+ p2 = p1 + p2offset;
+
+ v = pattern2; // FIXME: this should predict what we find...???
+ d1 = __bdk_dram_read64(p1);
+ d2 = __bdk_dram_read64(p2);
+ pattern2 = ~pattern2; // flip for next slot
+
+ xor = ((d1 ^ v) | (d2 ^ v)) & datamask; // union of error bits only in active byte lanes
+
+ int bybit = 1;
+ uint64_t bymsk = 0xffULL; // start in byte lane 0
+ while (xor != 0) {
+ debug_print("ERROR(%03d): [0x%016lX] [0x%016lX] expected 0x%016lX d1 %016lX d2 %016lX\n",
+ burst, p1, p2, v, d1, d2);
+ if (xor & bymsk) { // error(s) in this lane
+ errors |= bybit; // set the byte error bit
+ xor &= ~bymsk; // clear byte lane in error bits
+ datamask &= ~bymsk; // clear the byte lane in the mask
+#if EXIT_WHEN_ALL_LANES_HAVE_ERRORS
+ if (datamask == 0) { // nothing left to do
+ return errors; // completely done when errors found in all byte lanes in datamask
+ }
+#endif /* EXIT_WHEN_ALL_LANES_HAVE_ERRORS */
+ }
+ bymsk <<= 8; // move mask into next byte lane
+ bybit <<= 1; // move bit into next byte position
+ }
+ }
+ }
+ }
+ pattern1 = ~pattern1; // flip the starting pattern for the next burst
+
+ } /* for (int burst = 0; burst < dram_tune_use_bursts; burst++) */
+ return errors;
+}
+#endif
+
+// cores to use
+#define DEFAULT_USE_CORES 44 // FIXME: was (1 << CORE_BITS)
+int dram_tune_use_cores = DEFAULT_USE_CORES; // max cores to use, override available
+int dram_tune_max_cores; // max cores available on a node
+#define CORE_SHIFT 22 // FIXME: offset into rank_address passed to test_dram_byte
+
+typedef void (*__dram_tuning_thread_t)(int arg, void *arg1);
+
+typedef struct
+{
+ bdk_node_t node;
+ int64_t num_lmcs;
+ uint64_t byte_mask;
+} test_dram_byte_info_t;
+
+static void dram_tuning_thread(int arg, void *arg1)
+{
+ test_dram_byte_info_t *test_info = arg1;
+ int core = arg;
+ uint64_t errs;
+ bdk_node_t node = test_info->node;
+ int num_lmcs, lmc;
+#if 0
+ num_lmcs = test_info->num_lmcs;
+ // map core numbers into hopefully equal groups per LMC
+ lmc = core % num_lmcs;
+#else
+ // FIXME: this code should allow running all the cores on a single LMC...
+ // if incoming num_lmcs > 0, then use as normal; if < 0 remap to a single LMC
+ if (test_info->num_lmcs >= 0) {
+ num_lmcs = test_info->num_lmcs;
+ // map core numbers into hopefully equal groups per LMC
+ lmc = core % num_lmcs;
+ } else {
+ num_lmcs = 1;
+ // incoming num_lmcs is (desired LMC - 10)
+ lmc = 10 + test_info->num_lmcs;
+ }
+#endif
+ uint64_t base_address = 0/* was: (lmc << 7); now done by callee */;
+ uint64_t bytemask = test_info->byte_mask;
+
+ /* Figure out our work memory range.
+ *
+ * Note: base_address above just provides the physical offset which determines
+ * specific LMC portions of the address space and does not have the node bits set.
+ */
+ //was: base_address = bdk_numa_get_address(node, base_address); // map to node // now done by callee
+ base_address |= (core << CORE_SHIFT); // FIXME: also put full core into address
+ if (dram_tune_dimm_offset) { // if multi-slot in some way, choose a DIMM for the core
+ base_address |= (core & (1 << (num_lmcs >> 1))) ? dram_tune_dimm_offset : 0;
+ }
+
+ debug_print("Node %d, core %d, Testing area 1 at 0x%011lx, area 2 at 0x%011lx\n",
+ node, core, base_address + AREA_BASE_OFFSET,
+ base_address + AREA_BASE_OFFSET + dram_tune_rank_offset);
+
+ errs = dram_tuning_mem_xor(node, lmc, base_address, bytemask, NULL);
+ //errs = dram_tuning_mem_rows(base_address, bytemask);
+
+ /* Report that we're done */
+ debug_print("Core %d on LMC %d node %d done with test_dram_byte with 0x%lx errs\n",
+ core, lmc, node, errs);
+
+ if (errs) {
+ bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_threads_errs, errs);
+ bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_lmc_errs[lmc], errs);
+ }
+
+ bdk_atomic_add64_nosync(&test_dram_byte_threads_done, 1);
+
+ return;
+}
+
+static void dram_tuning_thread2(int arg, void *arg1)
+{
+ test_dram_byte_info_t *test_info = arg1;
+ int core = arg;
+ uint64_t errs;
+ bdk_node_t node = test_info->node;
+ int num_lmcs = test_info->num_lmcs;
+
+ uint64_t base_address = 0; //
+ uint64_t bytemask = test_info->byte_mask;
+
+ /* Figure out our work memory range.
+ *
+ * Note: base_address above just provides the physical offset which determines
+ * specific portions of the address space and does not have the node bits set.
+ */
+ base_address = bdk_numa_get_address(node, base_address); // map to node
+ base_address |= (core << CORE_SHIFT); // FIXME: also put full core into address
+ if (dram_tune_dimm_offset) { // if multi-slot in some way, choose a DIMM for the core
+ base_address |= (core & 1) ? dram_tune_dimm_offset : 0;
+ }
+
+ debug_print("Node %d, core %d, Testing area 1 at 0x%011lx, area 2 at 0x%011lx\n",
+ node, core, base_address + AREA_BASE_OFFSET,
+ base_address + AREA_BASE_OFFSET + dram_tune_rank_offset);
+
+ errs = dram_tuning_mem_xor2(base_address, bytemask, (num_lmcs >> 1)); // 4->2, 2->1, 1->0
+ //errs = dram_tuning_mem_rows(base_address, bytemask);
+
+ /* Report that we're done */
+ debug_print("Core %d on LMC %d node %d done with test_dram_byte with 0x%lx errs\n",
+ core, lmc, node, errs);
+
+ if (errs) {
+ bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_threads_errs, errs);
+ // FIXME: this will have been done already in the called test routine
+ //bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_lmc_errs[lmc], errs);
+ }
+
+ bdk_atomic_add64_nosync(&test_dram_byte_threads_done, 1);
+
+ return;
+}
+
+static int dram_tune_use_xor2 = 1; // FIXME: do NOT default to original mem_xor (LMC-based) code
+
+static int
+run_dram_tuning_threads(bdk_node_t node, int num_lmcs, uint64_t bytemask)
+{
+ test_dram_byte_info_t test_dram_byte_info;
+ test_dram_byte_info_t *test_info = &test_dram_byte_info;
+ int total_count = 0;
+ __dram_tuning_thread_t thread_p = (dram_tune_use_xor2) ? dram_tuning_thread2 : dram_tuning_thread;
+
+ test_info->node = node;
+ test_info->num_lmcs = num_lmcs;
+ test_info->byte_mask = bytemask;
+
+ // init some global data
+ bdk_atomic_set64(&test_dram_byte_threads_done, 0);
+ bdk_atomic_set64((int64_t *)&test_dram_byte_threads_errs, 0);
+ bdk_atomic_set64((int64_t *)&test_dram_byte_lmc_errs[0], 0);
+ bdk_atomic_set64((int64_t *)&test_dram_byte_lmc_errs[1], 0);
+ bdk_atomic_set64((int64_t *)&test_dram_byte_lmc_errs[2], 0);
+ bdk_atomic_set64((int64_t *)&test_dram_byte_lmc_errs[3], 0);
+
+ /* Start threads for cores on the node */
+ if (bdk_numa_exists(node)) {
+ debug_print("Starting %d threads for test_dram_byte\n", dram_tune_use_cores);
+ for (int core = 0; core < dram_tune_use_cores; core++) {
+ if (bdk_thread_create(node, 0, thread_p, core, (void *)test_info, 0)) {
+ bdk_error("Failed to create thread %d for test_dram_byte\n", core);
+ } else {
+ total_count++;
+ }
+ }
+ }
+
+#if 0
+ /* Wait for threads to finish */
+ while (bdk_atomic_get64(&test_dram_byte_threads_done) < total_count)
+ bdk_thread_yield();
+#else
+#define TIMEOUT_SECS 5 // FIXME: long enough so a pass for a given setting will not print
+ /* Wait for threads to finish, with progress */
+ int cur_count;
+ uint64_t cur_time;
+ uint64_t period = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) * TIMEOUT_SECS; // FIXME?
+ uint64_t timeout = bdk_clock_get_count(BDK_CLOCK_TIME) + period;
+ do {
+ bdk_thread_yield();
+ cur_count = bdk_atomic_get64(&test_dram_byte_threads_done);
+ cur_time = bdk_clock_get_count(BDK_CLOCK_TIME);
+ if (cur_time >= timeout) {
+ printf("Waiting for %d cores\n", total_count - cur_count);
+ timeout = cur_time + period;
+ }
+ } while (cur_count < total_count);
+#endif
+
+ // NOTE: this is the summary of errors across all LMCs
+ return (int)bdk_atomic_get64((int64_t *)&test_dram_byte_threads_errs);
+}
+
+/* These variables count the number of ECC errors. They should only be accessed atomically */
+extern int64_t __bdk_dram_ecc_single_bit_errors[];
+extern int64_t __bdk_dram_ecc_double_bit_errors[];
+
+#if 0
+// make the tuning test callable as a standalone
+int
+bdk_run_dram_tuning_test(int node)
+{
+ int num_lmcs = __bdk_dram_get_num_lmc(node);
+ const char *s;
+ int lmc, byte;
+ int errors;
+ uint64_t start_dram_dclk[4], start_dram_ops[4];
+ int save_use_bursts;
+
+ // check for the cores on this node, abort if not more than 1 // FIXME?
+ dram_tune_max_cores = bdk_get_num_running_cores(node);
+ if (dram_tune_max_cores < 2) {
+ //bdk_init_cores(node, 0);
+ printf("N%d: ERROR: not enough cores to run the DRAM tuning test.\n", node);
+ return 0;
+ }
+
+ // but use only a certain number of cores, at most what is available
+ if ((s = getenv("ddr_tune_use_cores")) != NULL) {
+ dram_tune_use_cores = strtoul(s, NULL, 0);
+ if (dram_tune_use_cores <= 0) // allow 0 or negative to mean all
+ dram_tune_use_cores = dram_tune_max_cores;
+ }
+ if (dram_tune_use_cores > dram_tune_max_cores)
+ dram_tune_use_cores = dram_tune_max_cores;
+
+ // save the original bursts, so we can replace it with a better number for just testing
+ save_use_bursts = dram_tune_use_bursts;
+ dram_tune_use_bursts = 1500; // FIXME: hard code bursts for the test here...
+
+ // allow override of the test repeats (bursts) per thread create
+ if ((s = getenv("ddr_tune_use_bursts")) != NULL) {
+ dram_tune_use_bursts = strtoul(s, NULL, 10);
+ }
+
+ // allow override of the test mem_xor algorithm
+ if ((s = getenv("ddr_tune_use_xor2")) != NULL) {
+ dram_tune_use_xor2 = !!strtoul(s, NULL, 10);
+ }
+
+ // FIXME? consult LMC0 only
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(0));
+ if (lmcx_config.s.rank_ena) { // replace the default offset when there is more than 1 rank...
+ dram_tune_rank_offset = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena + (num_lmcs/2));
+ ddr_print("N%d: run_dram_tuning_test: changing rank offset to 0x%lx\n", node, dram_tune_rank_offset);
+ }
+ if (lmcx_config.s.init_status & 0x0c) { // bit 2 or 3 set indicates 2 DIMMs
+ dram_tune_dimm_offset = 1ull << (28 + lmcx_config.s.pbank_lsb + (num_lmcs/2));
+ ddr_print("N%d: run_dram_tuning_test: changing dimm offset to 0x%lx\n", node, dram_tune_dimm_offset);
+ }
+ int ddr_interface_64b = !lmcx_config.s.mode32b;
+
+ // construct the bytemask
+ int bytes_todo = (ddr_interface_64b) ? 0xff : 0x0f; // FIXME: hack?
+ uint64_t bytemask = 0;
+ for (byte = 0; byte < 8; ++byte) {
+ uint64_t bitmask;
+ if (bytes_todo & (1 << byte)) {
+ bitmask = ((!ddr_interface_64b) && (byte == 4)) ? 0x0f: 0xff;
+ bytemask |= bitmask << (8*byte); // set the bytes bits in the bytemask
+ }
+ } /* for (byte = 0; byte < 8; ++byte) */
+
+ // print current working values
+ ddr_print("N%d: run_dram_tuning_test: max %d cores, use %d cores, use %d bursts.\n",
+ node, dram_tune_max_cores, dram_tune_use_cores, dram_tune_use_bursts);
+
+ // do the setup on active LMCs
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+ // record start cycle CSRs here for utilization measure
+ start_dram_dclk[lmc] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc));
+ start_dram_ops[lmc] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc));
+#if 0
+ bdk_atomic_set64(&__bdk_dram_ecc_single_bit_errors[lmc], 0);
+ bdk_atomic_set64(&__bdk_dram_ecc_double_bit_errors[lmc], 0);
+#else
+ __bdk_dram_ecc_single_bit_errors[lmc] = 0;
+ __bdk_dram_ecc_double_bit_errors[lmc] = 0;
+#endif
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ bdk_watchdog_poke();
+
+ // run the test(s)
+ // only 1 call should be enough, let the bursts, etc, control the load...
+ errors = run_dram_tuning_threads(node, num_lmcs, bytemask);
+
+ /* Check ECC error counters after the test */
+ int64_t ecc_single = 0;
+ int64_t ecc_double = 0;
+ int64_t ecc_single_errs[4];
+ int64_t ecc_double_errs[4];
+
+ // finally, print the utilizations all together, and sum the ECC errors
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+ uint64_t dclk_diff = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc)) - start_dram_dclk[lmc];
+ uint64_t ops_diff = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc)) - start_dram_ops[lmc];
+ uint64_t percent_x10 = ops_diff * 1000 / dclk_diff;
+ printf("N%d.LMC%d: ops %lu, cycles %lu, used %lu.%lu%%\n",
+ node, lmc, ops_diff, dclk_diff, percent_x10 / 10, percent_x10 % 10);
+
+ ecc_single += (ecc_single_errs[lmc] = bdk_atomic_get64(&__bdk_dram_ecc_single_bit_errors[lmc]));
+ ecc_double += (ecc_double_errs[lmc] = bdk_atomic_get64(&__bdk_dram_ecc_double_bit_errors[lmc]));
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ /* Always print any ECC errors */
+ if (ecc_single || ecc_double) {
+ printf("Test \"%s\": ECC errors, %ld/%ld/%ld/%ld corrected, %ld/%ld/%ld/%ld uncorrected\n",
+ "DRAM Tuning Test",
+ ecc_single_errs[0], ecc_single_errs[1], ecc_single_errs[2], ecc_single_errs[3],
+ ecc_double_errs[0], ecc_double_errs[1], ecc_double_errs[2], ecc_double_errs[3]);
+ }
+ if (errors || ecc_double || ecc_single) {
+ printf("Test \"%s\": FAIL: %ld single, %ld double, %d compare errors\n",
+ "DRAM Tuning Test", ecc_single, ecc_double, errors);
+ }
+
+ // restore bursts
+ dram_tune_use_bursts = save_use_bursts;
+
+ return (errors + ecc_double + ecc_single);
+}
+#endif /* 0 */
+
+#define DEFAULT_SAMPLE_GRAN 3 // sample for errors every N offset values
+#define MIN_BYTE_OFFSET -63
+#define MAX_BYTE_OFFSET +63
+int dram_tune_use_gran = DEFAULT_SAMPLE_GRAN;
+
+static int
+auto_set_dll_offset(bdk_node_t node, int dll_offset_mode,
+ int num_lmcs, int ddr_interface_64b,
+ int do_tune)
+{
+ int byte_offset;
+ //unsigned short result[9];
+ int byte;
+ int byte_delay_start[4][9];
+ int byte_delay_count[4][9];
+ uint64_t byte_delay_windows [4][9];
+ int byte_delay_best_start[4][9];
+ int byte_delay_best_count[4][9];
+ //int this_rodt;
+ uint64_t ops_sum[4], dclk_sum[4];
+ uint64_t start_dram_dclk[4], stop_dram_dclk[4];
+ uint64_t start_dram_ops[4], stop_dram_ops[4];
+ int errors, tot_errors;
+ int lmc;
+ char *mode_str = (dll_offset_mode == 2) ? "Read" : "Write";
+ int mode_is_read = (dll_offset_mode == 2);
+ char *mode_blk = (dll_offset_mode == 2) ? " " : "";
+ int start_offset, end_offset, incr_offset;
+
+ int speed_bin = get_speed_bin(node, 0); // FIXME: just get from LMC0?
+ int low_risk_count = 0, needs_review_count = 0;
+
+ if (dram_tune_use_gran != DEFAULT_SAMPLE_GRAN) {
+ ddr_print2("N%d: Changing sample granularity from %d to %d\n",
+ node, DEFAULT_SAMPLE_GRAN, dram_tune_use_gran);
+ }
+ // ensure sample is taken at 0
+ start_offset = MIN_BYTE_OFFSET - (MIN_BYTE_OFFSET % dram_tune_use_gran);
+ end_offset = MAX_BYTE_OFFSET - (MAX_BYTE_OFFSET % dram_tune_use_gran);
+ incr_offset = dram_tune_use_gran;
+
+ memset(ops_sum, 0, sizeof(ops_sum));
+ memset(dclk_sum, 0, sizeof(dclk_sum));
+ memset(byte_delay_start, 0, sizeof(byte_delay_start));
+ memset(byte_delay_count, 0, sizeof(byte_delay_count));
+ memset(byte_delay_windows, 0, sizeof(byte_delay_windows));
+ memset(byte_delay_best_start, 0, sizeof(byte_delay_best_start));
+ memset(byte_delay_best_count, 0, sizeof(byte_delay_best_count));
+
+ // FIXME? consult LMC0 only
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(0));
+ if (lmcx_config.s.rank_ena) { // replace the default offset when there is more than 1 rank...
+ dram_tune_rank_offset = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena + (num_lmcs/2));
+ ddr_print2("N%d: Tuning multiple ranks per DIMM (rank offset 0x%lx).\n", node, dram_tune_rank_offset);
+ }
+ if (lmcx_config.s.init_status & 0x0c) { // bit 2 or 3 set indicates 2 DIMMs
+ dram_tune_dimm_offset = 1ull << (28 + lmcx_config.s.pbank_lsb + (num_lmcs/2));
+ ddr_print2("N%d: Tuning multiple DIMMs per channel (DIMM offset 0x%lx)\n", node, dram_tune_dimm_offset);
+ }
+
+ // FIXME? do this for LMC0 only
+ //BDK_CSR_INIT(comp_ctl2, node, BDK_LMCX_COMP_CTL2(0));
+ //this_rodt = comp_ctl2.s.rodt_ctl;
+
+ // construct the bytemask
+ int bytes_todo = (ddr_interface_64b) ? 0xff : 0x0f;
+ uint64_t bytemask = 0;
+ for (byte = 0; byte < 8; ++byte) {
+ if (bytes_todo & (1 << byte)) {
+ bytemask |= 0xfful << (8*byte); // set the bytes bits in the bytemask
+ }
+ } /* for (byte = 0; byte < 8; ++byte) */
+
+ // now loop through selected legal values for the DLL byte offset...
+
+ for (byte_offset = start_offset; byte_offset <= end_offset; byte_offset += incr_offset) {
+
+ // do the setup on active LMCs
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+ change_dll_offset_enable(node, lmc, 0);
+
+ // set all byte lanes at once
+ load_dll_offset(node, lmc, dll_offset_mode, byte_offset, 10 /* All bytes at once */);
+ // but then clear the ECC byte lane so it should be neutral for the test...
+ load_dll_offset(node, lmc, dll_offset_mode, 0, 8);
+
+ change_dll_offset_enable(node, lmc, 1);
+
+ // record start cycle CSRs here for utilization measure
+ start_dram_dclk[lmc] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc));
+ start_dram_ops[lmc] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc));
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ bdk_watchdog_poke();
+
+ // run the test(s)
+ // only 1 call should be enough, let the bursts, etc, control the load...
+ tot_errors = run_dram_tuning_threads(node, num_lmcs, bytemask);
+
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+ // record stop cycle CSRs here for utilization measure
+ stop_dram_dclk[lmc] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc));
+ stop_dram_ops[lmc] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc));
+
+ // accumulate...
+ ops_sum[lmc] += stop_dram_ops[lmc] - start_dram_ops[lmc];
+ dclk_sum[lmc] += stop_dram_dclk[lmc] - start_dram_dclk[lmc];
+
+ errors = test_dram_byte_lmc_errs[lmc];
+
+ // check errors by byte, but not ECC
+ for (byte = 0; byte < 8; ++byte) {
+ if (!(bytes_todo & (1 << byte))) // is this byte lane to be done
+ continue; // no
+
+ byte_delay_windows[lmc][byte] <<= 1; // always put in a zero
+ if (errors & (1 << byte)) { // yes, an error in this byte lane
+ byte_delay_count[lmc][byte] = 0; // stop now always
+ } else { // no error in this byte lane
+ if (byte_delay_count[lmc][byte] == 0) { // first success, set run start
+ byte_delay_start[lmc][byte] = byte_offset;
+ }
+ byte_delay_count[lmc][byte] += incr_offset; // bump run length
+
+ if (byte_delay_count[lmc][byte] > byte_delay_best_count[lmc][byte]) {
+ byte_delay_best_count[lmc][byte] = byte_delay_count[lmc][byte];
+ byte_delay_best_start[lmc][byte] = byte_delay_start[lmc][byte];
+ }
+ byte_delay_windows[lmc][byte] |= 1ULL; // for pass, put in a 1
+ }
+ } /* for (byte = 0; byte < 8; ++byte) */
+
+ // only print when there are errors and verbose...
+ if (errors) {
+ debug_print("DLL %s Offset Test %3d: errors 0x%x\n",
+ mode_str, byte_offset, errors);
+ }
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ } /* for (byte_offset=-63; byte_offset<63; byte_offset += incr_offset) */
+
+ // done with testing, load up and/or print out the offsets we found...
+
+ // only when margining...
+ if (!do_tune) {
+ printf(" \n");
+ printf("-------------------------------------\n");
+#if 0
+ uint32_t mts_speed = (libdram_get_freq_from_pll(node, 0) * 2) / 1000000; // FIXME: sample LMC0
+ printf("N%d: Starting %s Timing Margining for %d MT/s.\n", node, mode_str, mts_speed);
+#else
+ printf("N%d: Starting %s Timing Margining.\n", node, mode_str);
+#endif
+ printf(" \n");
+ } /* if (!do_tune) */
+
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+#if 1
+ // FIXME FIXME
+ // FIXME: this just makes ECC always show 0
+ byte_delay_best_start[lmc][8] = start_offset;
+ byte_delay_best_count[lmc][8] = end_offset - start_offset + incr_offset;
+#endif
+
+ // disable offsets while we load...
+ change_dll_offset_enable(node, lmc, 0);
+
+ // only when margining...
+ if (!do_tune) {
+ // print the heading
+ printf(" \n");
+ printf("N%d.LMC%d: %s Timing Margin %s : ", node, lmc, mode_str, mode_blk);
+ printf(" ECC/8 ");
+ for (byte = 7; byte >= 0; byte--) {
+ printf(" Byte %d ", byte);
+ }
+ printf("\n");
+ } /* if (!do_tune) */
+
+ // print and load the offset values
+ // print the windows bit arrays
+ // only when margining...
+ if (!do_tune) {
+ printf("N%d.LMC%d: DLL %s Offset Amount %s : ", node, lmc, mode_str, mode_blk);
+ } else {
+ ddr_print("N%d.LMC%d: SW DLL %s Offset Amount %s : ", node, lmc, mode_str, mode_blk);
+ }
+ for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
+
+ int count = byte_delay_best_count[lmc][byte];
+ if (count == 0)
+ count = incr_offset; // should make non-tested ECC byte come out 0
+
+ byte_offset = byte_delay_best_start[lmc][byte] +
+ ((count - incr_offset) / 2); // adj by incr
+
+ if (!do_tune) { // do counting and special flag if margining
+ int will_need_review = !is_low_risk_winlen(speed_bin, (count - incr_offset)) &&
+ !is_low_risk_offset(speed_bin, byte_offset);
+
+ printf("%10d%c", byte_offset, (will_need_review) ? '<' :' ');
+
+ if (will_need_review)
+ needs_review_count++;
+ else
+ low_risk_count++;
+ } else { // if just tuning, make the printout less lengthy
+ ddr_print("%5d ", byte_offset);
+ }
+
+ // FIXME? should we be able to override this?
+ if (mode_is_read) // for READ offsets, always store what we found
+ load_dll_offset(node, lmc, dll_offset_mode, byte_offset, byte);
+ else // for WRITE offsets, always store 0
+ load_dll_offset(node, lmc, dll_offset_mode, 0, byte);
+
+ }
+ if (!do_tune) {
+ printf("\n");
+ } else {
+ ddr_print("\n");
+ }
+
+
+ // re-enable the offsets now that we are done loading
+ change_dll_offset_enable(node, lmc, 1);
+
+ // only when margining...
+ if (!do_tune) {
+ // print the window sizes
+ printf("N%d.LMC%d: DLL %s Window Length %s : ", node, lmc, mode_str, mode_blk);
+ for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
+ int count = byte_delay_best_count[lmc][byte];
+ if (count == 0)
+ count = incr_offset; // should make non-tested ECC byte come out 0
+
+ // do this again since the "needs review" test is an AND...
+ byte_offset = byte_delay_best_start[lmc][byte] +
+ ((count - incr_offset) / 2); // adj by incr
+
+ int will_need_review = !is_low_risk_winlen(speed_bin, (count - incr_offset)) &&
+ !is_low_risk_offset(speed_bin, byte_offset);
+
+ printf("%10d%c", count - incr_offset, (will_need_review) ? '<' :' ');
+ }
+ printf("\n");
+
+ // print the window extents
+ printf("N%d.LMC%d: DLL %s Window Bounds %s : ", node, lmc, mode_str, mode_blk);
+ for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
+ int start = byte_delay_best_start[lmc][byte];
+ int count = byte_delay_best_count[lmc][byte];
+ if (count == 0)
+ count = incr_offset; // should make non-tested ECC byte come out 0
+ printf(" %3d to%3d ", start,
+ start + count - incr_offset);
+ }
+ printf("\n");
+#if 0
+ // FIXME: should have a way to force these out...
+ // print the windows bit arrays
+ printf("N%d.LMC%d: DLL %s Window Bitmap%s : ", node, lmc, mode_str, mode_blk);
+ for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
+ printf("%010lx ", byte_delay_windows[lmc][byte]);
+ }
+ printf("\n");
+#endif
+ } /* if (!do_tune) */
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ // only when margining...
+ if (!do_tune) {
+ // print the Summary line(s) here
+ printf(" \n");
+ printf("N%d: %s Timing Margining Summary : %s ", node, mode_str,
+ (needs_review_count > 0) ? "Needs Review" : "Low Risk");
+ if (needs_review_count > 0)
+ printf("(%d)", needs_review_count);
+ printf("\n");
+
+ // FIXME??? want to print here: "N0: %s Offsets have been applied already"
+
+ printf("-------------------------------------\n");
+ printf(" \n");
+ } /* if (!do_tune) */
+
+ // FIXME: we probably want this only when doing verbose...
+ // finally, print the utilizations all together
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+ uint64_t percent_x10 = ops_sum[lmc] * 1000 / dclk_sum[lmc];
+ ddr_print2("N%d.LMC%d: ops %lu, cycles %lu, used %lu.%lu%%\n",
+ node, lmc, ops_sum[lmc], dclk_sum[lmc], percent_x10 / 10, percent_x10 % 10);
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ // FIXME: only when verbose, or only when there are errors?
+ // run the test one last time
+ // print whether there are errors or not, but only when verbose...
+ bdk_watchdog_poke();
+ debug_print("N%d: %s: Start running test one last time\n", node, __FUNCTION__);
+ tot_errors = run_dram_tuning_threads(node, num_lmcs, bytemask);
+ debug_print("N%d: %s: Finished running test one last time\n", node, __FUNCTION__);
+ if (tot_errors)
+ ddr_print2("%s Timing Final Test: errors 0x%x\n", mode_str, tot_errors);
+
+ return (do_tune) ? tot_errors : !!(needs_review_count > 0);
+}
+
+#define USE_L2_WAYS_LIMIT 0 // non-zero to enable L2 ways limiting
+
+/*
+ * Automatically adjust the DLL offset for the data bytes
+ */
+int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
+{
+ int ddr_interface_64b;
+ int save_ecc_ena[4];
+ bdk_lmcx_config_t lmc_config;
+ int lmc, num_lmcs = __bdk_dram_get_num_lmc(node);
+ const char *s;
+#if USE_L2_WAYS_LIMIT
+ int ways, ways_print = 0;
+#endif
+#if 0
+ int dram_tune_use_rodt = -1, save_rodt[4];
+ bdk_lmcx_comp_ctl2_t comp_ctl2;
+#endif
+ int loops = 1, loop;
+ uint64_t orig_coremask;
+ int errs = 0;
+
+ // enable any non-running cores on this node
+ orig_coremask = bdk_get_running_coremask(node);
+ ddr_print4("N%d: %s: Starting cores (mask was 0x%lx)\n",
+ node, __FUNCTION__, orig_coremask);
+ bdk_init_cores(node, ~0ULL & ~orig_coremask);
+ dram_tune_max_cores = bdk_get_num_running_cores(node);
+
+ // but use only a certain number of cores, at most what is available
+ if ((s = getenv("ddr_tune_use_cores")) != NULL) {
+ dram_tune_use_cores = strtoul(s, NULL, 0);
+ if (dram_tune_use_cores <= 0) // allow 0 or negative to mean all
+ dram_tune_use_cores = dram_tune_max_cores;
+ }
+ if (dram_tune_use_cores > dram_tune_max_cores)
+ dram_tune_use_cores = dram_tune_max_cores;
+
+ // see if we want to do the tuning more than once per LMC...
+ if ((s = getenv("ddr_tune_use_loops"))) {
+ loops = strtoul(s, NULL, 0);
+ }
+
+ // see if we want to change the granularity of the byte_offset sampling
+ if ((s = getenv("ddr_tune_use_gran"))) {
+ dram_tune_use_gran = strtoul(s, NULL, 0);
+ }
+
+ // allow override of the test repeats (bursts) per thread create
+ if ((s = getenv("ddr_tune_use_bursts")) != NULL) {
+ dram_tune_use_bursts = strtoul(s, NULL, 10);
+ }
+
+#if 0
+ // allow override of Read ODT setting just during the tuning run(s)
+ if ((s = getenv("ddr_tune_use_rodt")) != NULL) {
+ int temp = strtoul(s, NULL, 10);
+ // validity check
+ if (temp >= 0 && temp <= 7)
+ dram_tune_use_rodt = temp;
+ }
+#endif
+
+#if 0
+ // allow override of the test pattern
+ // FIXME: a bit simplistic...
+ if ((s = getenv("ddr_tune_use_pattern")) != NULL) {
+ int patno = strtoul(s, NULL, 10);
+ if (patno == 2)
+ dram_tune_test_pattern = test_pattern_2;
+ else if (patno == 3)
+ dram_tune_test_pattern = test_pattern_3;
+ else // all other values use default
+ dram_tune_test_pattern = test_pattern_1;
+ }
+#endif
+
+ // allow override of the test mem_xor algorithm
+ if ((s = getenv("ddr_tune_use_xor2")) != NULL) {
+ dram_tune_use_xor2 = !!strtoul(s, NULL, 10);
+ }
+
+ // print current working values
+ ddr_print2("N%d: Tuning will use %d cores of max %d cores, and use %d repeats.\n",
+ node, dram_tune_use_cores, dram_tune_max_cores,
+ dram_tune_use_bursts);
+
+#if USE_L2_WAYS_LIMIT
+ // see if L2 ways are limited
+ if ((s = lookup_env_parameter("limit_l2_ways")) != NULL) {
+ ways = strtoul(s, NULL, 10);
+ ways_print = 1;
+ } else {
+ ways = bdk_l2c_get_num_assoc(node);
+ }
+#endif
+
+#if 0
+ // if RODT is to be overridden during tuning, note change
+ if (dram_tune_use_rodt >= 0) {
+ ddr_print("N%d: using RODT %d for tuning.\n",
+ node, dram_tune_use_rodt);
+ }
+#endif
+
+ // FIXME? get flag from LMC0 only
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(0));
+ ddr_interface_64b = !lmc_config.s.mode32b;
+
+ // do setup for each active LMC
+ debug_print("N%d: %s: starting LMCs setup.\n", node, __FUNCTION__);
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+
+#if 0
+ // if RODT change, save old and set new here...
+ if (dram_tune_use_rodt >= 0) {
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
+ save_rodt[lmc] = comp_ctl2.s.rodt_ctl;
+ comp_ctl2.s.rodt_ctl = dram_tune_use_rodt;
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(lmc), comp_ctl2.u);
+ BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
+ }
+#endif
+ /* Disable ECC for DRAM tests */
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ save_ecc_ena[lmc] = lmc_config.s.ecc_ena;
+ lmc_config.s.ecc_ena = 0;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+#if USE_L2_WAYS_LIMIT
+ /* Disable l2 sets for DRAM testing */
+ limit_l2_ways(node, 0, ways_print);
+#endif
+
+ // testing is done on all LMCs simultaneously
+ // FIXME: for now, loop here to show what happens multiple times
+ for (loop = 0; loop < loops; loop++) {
+ /* Perform DLL offset tuning */
+ errs = auto_set_dll_offset(node, dll_offset_mode, num_lmcs, ddr_interface_64b, do_tune);
+ }
+
+#if USE_L2_WAYS_LIMIT
+ /* Restore the l2 set configuration */
+ limit_l2_ways(node, ways, ways_print);
+#endif
+
+ // perform cleanup on all active LMCs
+ debug_print("N%d: %s: starting LMCs cleanup.\n", node, __FUNCTION__);
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+
+ /* Restore ECC for DRAM tests */
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ lmc_config.s.ecc_ena = save_ecc_ena[lmc];
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+#if 0
+ // if RODT change, restore old here...
+ if (dram_tune_use_rodt >= 0) {
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
+ comp_ctl2.s.rodt_ctl = save_rodt[lmc];
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(lmc), comp_ctl2.u);
+ BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
+ }
+#endif
+ // finally, see if there are any read offset overrides after tuning
+ // FIXME: provide a way to do write offsets also??
+ if (dll_offset_mode == 2) {
+ for (int by = 0; by < 9; by++) {
+ if ((s = lookup_env_parameter("ddr%d_tune_byte%d", lmc, by)) != NULL) {
+ int dllro = strtoul(s, NULL, 10);
+ change_dll_offset_enable(node, lmc, 0);
+ load_dll_offset(node, lmc, /* read */2, dllro, by);
+ change_dll_offset_enable(node, lmc, 1);
+ }
+ }
+ }
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ // finish up...
+
+#if 0
+ // if RODT was overridden during tuning, note restore
+ if (dram_tune_use_rodt >= 0) {
+ ddr_print("N%d: restoring RODT %d after tuning.\n",
+ node, save_rodt[0]); // FIXME? use LMC0
+ }
+#endif
+
+ // put any cores on this node, that were not running at the start, back into reset
+ uint64_t reset_coremask = bdk_get_running_coremask(node) & ~orig_coremask;
+ if (reset_coremask) {
+ ddr_print4("N%d: %s: Stopping cores 0x%lx\n", node, __FUNCTION__,
+ reset_coremask);
+ bdk_reset_cores(node, reset_coremask);
+ } else {
+ ddr_print4("N%d: %s: leaving cores set to 0x%lx\n", node, __FUNCTION__,
+ orig_coremask);
+ }
+
+ return errs;
+
+} /* perform_dll_offset_tuning */
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+///// HW-assist byte DLL offset tuning //////
+
+#if 1
+// setup defaults for byte test pattern array
+// take these first two from the HRM section 6.9.13
+static const uint64_t byte_pattern_0[] = {
+ 0xFFAAFFFFFF55FFFFULL, // GP0
+ 0x55555555AAAAAAAAULL, // GP1
+ 0xAA55AAAAULL, // GP2
+};
+static const uint64_t byte_pattern_1[] = {
+ 0xFBF7EFDFBF7FFEFDULL, // GP0
+ 0x0F1E3C78F0E1C387ULL, // GP1
+ 0xF0E1BF7FULL, // GP2
+};
+// this is from Andrew via LFSR with PRBS=0xFFFFAAAA
+static const uint64_t byte_pattern_2[] = {
+ 0xEE55AADDEE55AADDULL, // GP0
+ 0x55AADDEE55AADDEEULL, // GP1
+ 0x55EEULL, // GP2
+};
+// this is from Mike via LFSR with PRBS=0x4A519909
+static const uint64_t byte_pattern_3[] = {
+ 0x0088CCEE0088CCEEULL, // GP0
+ 0xBB552211BB552211ULL, // GP1
+ 0xBB00ULL, // GP2
+};
+
+static const uint64_t *byte_patterns[] = {
+ byte_pattern_0, byte_pattern_1, byte_pattern_2, byte_pattern_3 // FIXME: use all we have
+};
+#define NUM_BYTE_PATTERNS ((int)(sizeof(byte_patterns)/sizeof(uint64_t *)))
+
+#define DEFAULT_BYTE_BURSTS 32 // FIXME: this is what what the longest test usually has
+int dram_tune_byte_bursts = DEFAULT_BYTE_BURSTS;
+#endif
+
+static void
+setup_hw_pattern(bdk_node_t node, int lmc, const uint64_t *pattern_p)
+{
+ /*
+ 3) Setup GENERAL_PURPOSE[0-2] registers with the data pattern of choice.
+ a. GENERAL_PURPOSE0[DATA<63:0>] – sets the initial lower (rising edge) 64 bits of data.
+ b. GENERAL_PURPOSE1[DATA<63:0>] – sets the initial upper (falling edge) 64 bits of data.
+ c. GENERAL_PURPOSE2[DATA<15:0>] – sets the initial lower (rising edge <7:0>) and upper
+ (falling edge <15:8>) ECC data.
+ */
+ DRAM_CSR_WRITE(node, BDK_LMCX_GENERAL_PURPOSE0(lmc), pattern_p[0]);
+ DRAM_CSR_WRITE(node, BDK_LMCX_GENERAL_PURPOSE1(lmc), pattern_p[1]);
+ DRAM_CSR_WRITE(node, BDK_LMCX_GENERAL_PURPOSE2(lmc), pattern_p[2]);
+}
+
+#define DEFAULT_PRBS 0xFFFFAAAAUL /* FIXME: maybe try 0x4A519909UL */
+
+static void
+setup_lfsr_pattern(bdk_node_t node, int lmc, uint64_t data)
+{
+ uint32_t prbs;
+ const char *s;
+
+ if ((s = getenv("ddr_lfsr_prbs"))) {
+ prbs = strtoul(s, NULL, 0);
+ } else
+ prbs = DEFAULT_PRBS; // FIXME: from data arg?
+
+ /*
+ 2) DBTRAIN_CTL[LFSR_PATTERN_SEL] = 1
+ here data comes from the LFSR generating a PRBS pattern
+ CHAR_CTL.EN = 0
+ CHAR_CTL.SEL = 0; // for PRBS
+ CHAR_CTL.DR = 1;
+ CHAR_CTL.PRBS = setup for whatever type of PRBS to send
+ CHAR_CTL.SKEW_ON = 1;
+ */
+ BDK_CSR_INIT(char_ctl, node, BDK_LMCX_CHAR_CTL(lmc));
+ char_ctl.s.en = 0;
+ char_ctl.s.sel = 0;
+ char_ctl.s.dr = 1;
+ char_ctl.s.prbs = prbs;
+ char_ctl.s.skew_on = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CHAR_CTL(lmc), char_ctl.u);
+}
+
+int
+choose_best_hw_patterns(bdk_node_t node, int lmc, int mode)
+{
+ int new_mode = mode;
+ const char *s;
+
+ switch (mode) {
+ case DBTRAIN_TEST: // always choose LFSR if chip supports it
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX)) {
+ int lfsr_enable = 1;
+ if ((s = getenv("ddr_allow_lfsr"))) { // override?
+ lfsr_enable = !!strtoul(s, NULL, 0);
+ }
+ if (lfsr_enable)
+ new_mode = DBTRAIN_LFSR;
+ }
+ break;
+ case DBTRAIN_DBI: // possibly can allow LFSR use?
+ break;
+ case DBTRAIN_LFSR: // forced already
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX)) {
+ ddr_print("ERROR: illegal HW assist mode %d\n", mode);
+ new_mode = DBTRAIN_TEST;
+ }
+ break;
+ default:
+ ddr_print("ERROR: unknown HW assist mode %d\n", mode);
+ }
+
+ if (new_mode != mode)
+ VB_PRT(VBL_DEV2, "choose_best_hw_patterns: changing mode %d to %d\n", mode, new_mode);
+
+ return new_mode;
+}
+
+int
+run_best_hw_patterns(bdk_node_t node, int lmc, uint64_t phys_addr,
+ int mode, uint64_t *xor_data)
+{
+ int pattern;
+ const uint64_t *pattern_p;
+ int errs, errors = 0;
+
+ // FIXME? always choose LFSR if chip supports it???
+ mode = choose_best_hw_patterns(node, lmc, mode);
+
+ if (mode == DBTRAIN_LFSR) {
+ setup_lfsr_pattern(node, lmc, 0);
+ errors = test_dram_byte_hw(node, lmc, phys_addr, mode, xor_data);
+ VB_PRT(VBL_DEV2, "%s: LFSR at A:0x%012lx errors 0x%x\n",
+ __FUNCTION__, phys_addr, errors);
+ } else {
+ for (pattern = 0; pattern < NUM_BYTE_PATTERNS; pattern++) {
+ pattern_p = byte_patterns[pattern];
+ setup_hw_pattern(node, lmc, pattern_p);
+
+ errs = test_dram_byte_hw(node, lmc, phys_addr, mode, xor_data);
+
+ VB_PRT(VBL_DEV2, "%s: PATTERN %d at A:0x%012lx errors 0x%x\n",
+ __FUNCTION__, pattern, phys_addr, errs);
+
+ errors |= errs;
+ } /* for (pattern = 0; pattern < NUM_BYTE_PATTERNS; pattern++) */
+ }
+ return errors;
+}
+
+static void
+hw_assist_test_dll_offset(bdk_node_t node, int dll_offset_mode,
+ int lmc, int bytelane)
+{
+ int byte_offset, new_best_offset[9];
+ int rank_delay_start[4][9];
+ int rank_delay_count[4][9];
+ int rank_delay_best_start[4][9];
+ int rank_delay_best_count[4][9];
+ int errors[4], off_errors, tot_errors;
+ int num_lmcs = __bdk_dram_get_num_lmc(node);
+ int rank_mask, rankx, active_ranks;
+ int pattern;
+ const uint64_t *pattern_p;
+ int byte;
+ char *mode_str = (dll_offset_mode == 2) ? "Read" : "Write";
+ int pat_best_offset[9];
+ uint64_t phys_addr;
+ int pat_beg, pat_end;
+ int rank_beg, rank_end;
+ int byte_lo, byte_hi;
+ uint64_t hw_rank_offset;
+ // FIXME? always choose LFSR if chip supports it???
+ int mode = choose_best_hw_patterns(node, lmc, DBTRAIN_TEST);
+
+ if (bytelane == 0x0A) { // all bytelanes
+ byte_lo = 0;
+ byte_hi = 8;
+ } else { // just 1
+ byte_lo = byte_hi = bytelane;
+ }
+
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(lmc));
+ rank_mask = lmcx_config.s.init_status;
+ // this should be correct for 1 or 2 ranks, 1 or 2 DIMMs
+ hw_rank_offset = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena + (num_lmcs/2));
+
+ debug_print("N%d: %s: starting LMC%d with rank offset 0x%lx\n",
+ node, __FUNCTION__, lmc, hw_rank_offset);
+
+ // start of pattern loop
+ // we do the set of tests for each pattern supplied...
+
+ memset(new_best_offset, 0, sizeof(new_best_offset));
+ for (pattern = 0; pattern < NUM_BYTE_PATTERNS; pattern++) {
+
+ memset(pat_best_offset, 0, sizeof(pat_best_offset));
+
+ if (mode == DBTRAIN_TEST) {
+ pattern_p = byte_patterns[pattern];
+ setup_hw_pattern(node, lmc, pattern_p);
+ } else {
+ setup_lfsr_pattern(node, lmc, 0);
+ }
+
+ // now loop through all legal values for the DLL byte offset...
+
+#define BYTE_OFFSET_INCR 3 // FIXME: make this tunable?
+
+ tot_errors = 0;
+
+ memset(rank_delay_count, 0, sizeof(rank_delay_count));
+ memset(rank_delay_start, 0, sizeof(rank_delay_start));
+ memset(rank_delay_best_count, 0, sizeof(rank_delay_best_count));
+ memset(rank_delay_best_start, 0, sizeof(rank_delay_best_start));
+
+ for (byte_offset = -63; byte_offset < 64; byte_offset += BYTE_OFFSET_INCR) {
+
+ // do the setup on the active LMC
+ // set the bytelanes DLL offsets
+ change_dll_offset_enable(node, lmc, 0);
+ load_dll_offset(node, lmc, dll_offset_mode, byte_offset, bytelane); // FIXME? bytelane?
+ change_dll_offset_enable(node, lmc, 1);
+
+ bdk_watchdog_poke();
+
+ // run the test on each rank
+ // only 1 call per rank should be enough, let the bursts, loops, etc, control the load...
+
+ off_errors = 0; // errors for this byte_offset, all ranks
+
+ active_ranks = 0;
+
+ for (rankx = 0; rankx < 4; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ phys_addr = hw_rank_offset * active_ranks;
+ // FIXME: now done by test_dram_byte_hw()
+ //phys_addr |= (lmc << 7);
+ //phys_addr = bdk_numa_get_address(node, phys_addr); // map to node
+
+ active_ranks++;
+
+ // NOTE: return is a now a bitmask of the erroring bytelanes..
+ errors[rankx] = test_dram_byte_hw(node, lmc, phys_addr, mode, NULL);
+
+ for (byte = byte_lo; byte <= byte_hi; byte++) { // do bytelane(s)
+
+ // check errors
+ if (errors[rankx] & (1 << byte)) { // yes, an error in the byte lane in this rank
+ off_errors |= (1 << byte);
+
+ ddr_print5("N%d.LMC%d.R%d: Bytelane %d DLL %s Offset Test %3d: Address 0x%012lx errors 0x%x\n",
+ node, lmc, rankx, bytelane, mode_str,
+ byte_offset, phys_addr, errors[rankx]);
+
+ if (rank_delay_count[rankx][byte] > 0) { // had started run
+ ddr_print5("N%d.LMC%d.R%d: Bytelane %d DLL %s Offset Test %3d: stopping a run here\n",
+ node, lmc, rankx, bytelane, mode_str, byte_offset);
+ rank_delay_count[rankx][byte] = 0; // stop now
+ }
+ // FIXME: else had not started run - nothing else to do?
+ } else { // no error in the byte lane
+ if (rank_delay_count[rankx][byte] == 0) { // first success, set run start
+ ddr_print5("N%d.LMC%d.R%d: Bytelane %d DLL %s Offset Test %3d: starting a run here\n",
+ node, lmc, rankx, bytelane, mode_str, byte_offset);
+ rank_delay_start[rankx][byte] = byte_offset;
+ }
+ rank_delay_count[rankx][byte] += BYTE_OFFSET_INCR; // bump run length
+
+ // is this now the biggest window?
+ if (rank_delay_count[rankx][byte] > rank_delay_best_count[rankx][byte]) {
+ rank_delay_best_count[rankx][byte] = rank_delay_count[rankx][byte];
+ rank_delay_best_start[rankx][byte] = rank_delay_start[rankx][byte];
+ debug_print("N%d.LMC%d.R%d: Bytelane %d DLL %s Offset Test %3d: updating best to %d/%d\n",
+ node, lmc, rankx, bytelane, mode_str, byte_offset,
+ rank_delay_best_start[rankx][byte], rank_delay_best_count[rankx][byte]);
+ }
+ }
+ } /* for (byte = byte_lo; byte <= byte_hi; byte++) */
+ } /* for (rankx = 0; rankx < 4; rankx++) */
+
+ tot_errors |= off_errors;
+
+ } /* for (byte_offset = -63; byte_offset < 64; byte_offset += BYTE_OFFSET_INCR) */
+
+ // now choose the best byte_offsets for this pattern according to the best windows of the tested ranks
+ // calculate offset by constructing an average window from the rank windows
+ for (byte = byte_lo; byte <= byte_hi; byte++) {
+
+ pat_beg = -999;
+ pat_end = 999;
+
+ for (rankx = 0; rankx < 4; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ rank_beg = rank_delay_best_start[rankx][byte];
+ pat_beg = max(pat_beg, rank_beg);
+ rank_end = rank_beg + rank_delay_best_count[rankx][byte] - BYTE_OFFSET_INCR;
+ pat_end = min(pat_end, rank_end);
+
+ ddr_print5("N%d.LMC%d.R%d: Bytelane %d DLL %s Offset Test: Rank Window %3d:%3d\n",
+ node, lmc, rankx, bytelane, mode_str, rank_beg, rank_end);
+
+ } /* for (rankx = 0; rankx < 4; rankx++) */
+
+ pat_best_offset[byte] = (pat_end + pat_beg) / 2;
+ ddr_print4("N%d.LMC%d: Bytelane %d DLL %s Offset Test: Pattern %d Average %3d\n",
+ node, lmc, byte, mode_str, pattern, pat_best_offset[byte]);
+
+#if 0
+ // FIXME: next print the window counts
+ sprintf(sbuffer, "N%d.LMC%d Pattern %d: DLL %s Offset Count ",
+ node, lmc, pattern, mode_str);
+ printf("%-45s : ", sbuffer);
+ printf(" %3d", byte_delay_best_count);
+ printf("\n");
+#endif
+
+ new_best_offset[byte] += pat_best_offset[byte]; // sum the pattern averages
+ } /* for (byte = byte_lo; byte <= byte_hi; byte++) */
+ } /* for (pattern = 0; pattern < NUM_BYTE_PATTERNS; pattern++) */
+ // end of pattern loop
+
+ ddr_print("N%d.LMC%d: HW DLL %s Offset Amount : ",
+ node, lmc, mode_str);
+
+ for (byte = byte_hi; byte >= byte_lo; --byte) { // print in decending byte index order
+ new_best_offset[byte] = divide_nint(new_best_offset[byte], NUM_BYTE_PATTERNS); // create the new average NINT
+
+ // print the best offsets from all patterns
+
+ if (bytelane == 0x0A) // print just the offset of all the bytes
+ ddr_print("%5d ", new_best_offset[byte]);
+ else
+ ddr_print("(byte %d) %5d ", byte, new_best_offset[byte]);
+
+
+#if 1
+ // done with testing, load up the best offsets we found...
+ change_dll_offset_enable(node, lmc, 0); // disable offsets while we load...
+ load_dll_offset(node, lmc, dll_offset_mode, new_best_offset[byte], byte);
+ change_dll_offset_enable(node, lmc, 1); // re-enable the offsets now that we are done loading
+#endif
+ } /* for (byte = byte_hi; byte >= byte_lo; --byte) */
+
+ ddr_print("\n");
+
+#if 0
+ // run the test one last time
+ // print whether there are errors or not, but only when verbose...
+ tot_errors = run_test_dram_byte_threads(node, num_lmcs, bytemask);
+ printf("N%d.LMC%d: Bytelane %d DLL %s Offset Final Test: errors 0x%x\n",
+ node, lmc, bytelane, mode_str, tot_errors);
+#endif
+}
+
+/*
+ * Automatically adjust the DLL offset for the selected bytelane using hardware-assist
+ */
+int perform_HW_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int bytelane)
+{
+ int save_ecc_ena[4];
+ bdk_lmcx_config_t lmc_config;
+ int lmc, num_lmcs = __bdk_dram_get_num_lmc(node);
+ const char *s;
+ //bdk_lmcx_comp_ctl2_t comp_ctl2;
+ int loops = 1, loop;
+
+ // see if we want to do the tuning more than once per LMC...
+ if ((s = getenv("ddr_tune_ecc_loops"))) {
+ loops = strtoul(s, NULL, 0);
+ }
+
+ // allow override of the test repeats (bursts)
+ if ((s = getenv("ddr_tune_byte_bursts")) != NULL) {
+ dram_tune_byte_bursts = strtoul(s, NULL, 10);
+ }
+
+ // print current working values
+ ddr_print2("N%d: H/W Tuning for bytelane %d will use %d loops, %d bursts, and %d patterns.\n",
+ node, bytelane, loops, dram_tune_byte_bursts,
+ NUM_BYTE_PATTERNS);
+
+ // FIXME? get flag from LMC0 only
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(0));
+
+ // do once for each active LMC
+
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+
+ ddr_print4("N%d: H/W Tuning: starting LMC%d bytelane %d tune.\n", node, lmc, bytelane);
+
+ /* Enable ECC for the HW tests */
+ // NOTE: we do enable ECC, but the HW tests used will not generate "visible" errors
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ save_ecc_ena[lmc] = lmc_config.s.ecc_ena;
+ lmc_config.s.ecc_ena = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+
+ // testing is done on a single LMC at a time
+ // FIXME: for now, loop here to show what happens multiple times
+ for (loop = 0; loop < loops; loop++) {
+ /* Perform DLL offset tuning */
+ //auto_set_dll_offset(node, 1 /* 1=write */, lmc, bytelane);
+ hw_assist_test_dll_offset(node, 2 /* 2=read */, lmc, bytelane);
+ }
+
+ // perform cleanup on active LMC
+ ddr_print4("N%d: H/W Tuning: finishing LMC%d bytelane %d tune.\n", node, lmc, bytelane);
+
+ /* Restore ECC for DRAM tests */
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ lmc_config.s.ecc_ena = save_ecc_ena[lmc];
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+
+ // finally, see if there are any read offset overrides after tuning
+ for (int by = 0; by < 9; by++) {
+ if ((s = lookup_env_parameter("ddr%d_tune_byte%d", lmc, by)) != NULL) {
+ int dllro = strtoul(s, NULL, 10);
+ change_dll_offset_enable(node, lmc, 0);
+ load_dll_offset(node, lmc, 2 /* 2=read */, dllro, by);
+ change_dll_offset_enable(node, lmc, 1);
+ }
+ }
+
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ // finish up...
+
+ return 0;
+
+} /* perform_HW_dll_offset_tuning */
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-util.h b/src/vendorcode/cavium/bdk/libdram/dram-util.h
new file mode 100644
index 0000000000..f8ab6c1552
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/dram-util.h
@@ -0,0 +1,96 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * Small utility functions for use by libdram internally. These
+ * are not meant for users's of the libdram API.
+ */
+
+/**
+ * Standard min(a,b) macro
+ */
+#define min(X, Y) \
+ ({ typeof (X) __x = (X); \
+ typeof (Y) __y = (Y); \
+ (__x < __y) ? __x : __y; })
+
+/**
+ * Standard max(a,b) macro
+ */
+#define max(X, Y) \
+ ({ typeof (X) __x = (X); typeof(Y) __y = (Y); \
+ (__x > __y) ? __x : __y; })
+
+/**
+ * Absolute value of an integer
+ *
+ * @param v
+ *
+ * @return
+ */
+static inline int64_t _abs(int64_t v)
+{
+ return (v < 0) ? -v : v;
+}
+
+/**
+ * Sign of an integer
+ *
+ * @param v
+ *
+ * @return
+ */
+static inline int64_t _sign(int64_t v)
+{
+ return v < 0;
+}
+
+/**
+ * Divide and round results up to the next higher integer.
+ *
+ * @param dividend
+ * @param divisor
+ *
+ * @return
+ */
+static inline uint64_t divide_roundup(uint64_t dividend, uint64_t divisor)
+{
+ return (dividend + divisor - 1) / divisor;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c b/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c
new file mode 100644
index 0000000000..cdc799744f
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c
@@ -0,0 +1,2165 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/* $Revision: 102369 $ */
+
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-l2c.h"
+#include "dram-internal.h"
+
+/* Define DDR_DEBUG to debug the DDR interface. This also enables the
+** output necessary for review by Cavium Inc., Inc. */
+/* #define DDR_DEBUG */
+
+
+static int global_ddr_clock_initialized = 0;
+static int global_ddr_memory_preserved = 0;
+
+#if 1
+uint64_t max_p1 = 0UL;
+#endif
+
+/*
+ * SDRAM Physical Address (figure 6-2 from the HRM)
+ * 7 6 3 2 0
+ * +---------+----+----------------------+---------------+--------+---+------+-----+
+ * | Dimm |Rank| Row | Col | Bank | C | Col | Bus |
+ * +---------+----+----------------------+---------------+--------+---+------+-----+
+ * | ^ | | ^ | | |
+ * 0 or 1 | | 12-18 bits | 6-8 bits | 1 or 2 bits
+ * bit | 0 or 1 bit LMC_CONFIG[ROW_LSB]+X | (X=1 or 2, resp)
+ * | |
+ * LMC_CONFIG[PBANK_LSB]+X 3 or 4 bits
+ *
+ * Bus = Selects the byte on the 72-bit DDR3 bus
+ * Col = Column Address for the memory part (10-12 bits)
+ * C = Selects the LMC that services the reference
+ * (2 bits for 4 LMC mode, 1 bit for 2 LMC mode; X=width)
+ * Bank = Bank Address for the memory part (DDR3=3 bits, DDR4=3 or 4 bits)
+ * Row = Row Address for the memory part (12-18 bits)
+ * Rank = Optional Rank Address for dual-rank DIMMs
+ * (present when LMC_CONFIG[RANK_ENA] is set)
+ * Dimm = Optional DIMM address (preseent with more than 1 DIMM)
+ */
+
+
+/**
+ * Divide and round results to the nearest integer.
+ *
+ * @param dividend
+ * @param divisor
+ *
+ * @return
+ */
+uint64_t divide_nint(uint64_t dividend, uint64_t divisor)
+{
+ uint64_t quotent, remainder;
+ quotent = dividend / divisor;
+ remainder = dividend % divisor;
+ return quotent + ((remainder * 2) >= divisor);
+}
+
+/* Sometimes the pass/fail results for all possible delay settings
+ * determined by the read-leveling sequence is too forgiving. This
+ * usually occurs for DCLK speeds below 300 MHz. As a result the
+ * passing range is exaggerated. This function accepts the bitmask
+ * results from the sequence and truncates the passing range to a
+ * reasonable range and recomputes the proper deskew setting.
+ */
+
+/* Default ODT config must disable ODT */
+/* Must be const (read only) so that the structure is in flash */
+const dimm_odt_config_t disable_odt_config[] = {
+ /* DDR4 needs an additional field in the struct (odt_mask2) */
+ /* DIMMS ODT_ENA ODT_MASK ODT_MASK1 ODT_MASK2 QS_DIC RODT_CTL */
+ /* ===== ======= ======== ========= ========= ====== ======== */
+ /* 1 */ { 0, 0x0000, {.u = 0x0000}, {.u = 0x0000}, 0, 0x0000 },
+ /* 2 */ { 0, 0x0000, {.u = 0x0000}, {.u = 0x0000}, 0, 0x0000 },
+ /* 3 */ { 0, 0x0000, {.u = 0x0000}, {.u = 0x0000}, 0, 0x0000 },
+ /* 4 */ { 0, 0x0000, {.u = 0x0000}, {.u = 0x0000}, 0, 0x0000 },
+};
+/* Memory controller setup function */
+static int init_octeon_dram_interface(bdk_node_t node,
+ const ddr_configuration_t *ddr_configuration,
+ uint32_t ddr_hertz,
+ uint32_t cpu_hertz,
+ uint32_t ddr_ref_hertz,
+ int board_type,
+ int board_rev_maj,
+ int board_rev_min,
+ int ddr_interface_num,
+ uint32_t ddr_interface_mask)
+{
+ uint32_t mem_size_mbytes = 0;
+ int lmc_restart_retries = 0;
+
+ const char *s;
+ if ((s = lookup_env_parameter("ddr_timing_hertz")) != NULL)
+ ddr_hertz = strtoul(s, NULL, 0);
+
+ restart_lmc_init:
+
+ /* Poke the watchdog timer so it doesn't expire during DRAM init */
+ bdk_watchdog_poke();
+
+ mem_size_mbytes = init_octeon3_ddr3_interface(node,
+ ddr_configuration,
+ ddr_hertz,
+ cpu_hertz,
+ ddr_ref_hertz,
+ board_type,
+ board_rev_maj,
+ board_rev_min,
+ ddr_interface_num,
+ ddr_interface_mask);
+#define DEFAULT_RESTART_RETRIES 3
+ if (mem_size_mbytes == 0) { // means restart is possible
+ if (lmc_restart_retries < DEFAULT_RESTART_RETRIES) {
+ lmc_restart_retries++;
+ ddr_print("N%d.LMC%d Configuration problem: attempting LMC reset and init restart %d\n",
+ node, ddr_interface_num, lmc_restart_retries);
+ // re-assert RESET first, as that is the assumption of the init code
+ if (!ddr_memory_preserved(node))
+ cn88xx_lmc_ddr3_reset(node, ddr_interface_num, LMC_DDR3_RESET_ASSERT);
+ goto restart_lmc_init;
+ } else {
+ error_print("INFO: N%d.LMC%d Configuration: fatal problem remains after %d LMC init retries - Resetting node...\n",
+ node, ddr_interface_num, lmc_restart_retries);
+ bdk_wait_usec(500000);
+ bdk_reset_chip(node);
+ }
+ }
+
+ error_print("N%d.LMC%d Configuration Completed: %d MB\n",
+ node, ddr_interface_num, mem_size_mbytes);
+ return mem_size_mbytes;
+}
+
+#define DO_LIKE_RANDOM_XOR 1
+
+#if !DO_LIKE_RANDOM_XOR
+/*
+ * Suggested testing patterns.
+ *
+ * 0xFFFF_FFFF_FFFF_FFFF
+ * 0xAAAA_AAAA_AAAA_AAAA
+ * 0xFFFF_FFFF_FFFF_FFFF
+ * 0xAAAA_AAAA_AAAA_AAAA
+ * 0x5555_5555_5555_5555
+ * 0xAAAA_AAAA_AAAA_AAAA
+ * 0xFFFF_FFFF_FFFF_FFFF
+ * 0xAAAA_AAAA_AAAA_AAAA
+ * 0xFFFF_FFFF_FFFF_FFFF
+ * 0x5555_5555_5555_5555
+ * 0xFFFF_FFFF_FFFF_FFFF
+ * 0x5555_5555_5555_5555
+ * 0xAAAA_AAAA_AAAA_AAAA
+ * 0x5555_5555_5555_5555
+ * 0xFFFF_FFFF_FFFF_FFFF
+ * 0x5555_5555_5555_5555
+ *
+ * or possibly
+ *
+ * 0xFDFD_FDFD_FDFD_FDFD
+ * 0x8787_8787_8787_8787
+ * 0xFEFE_FEFE_FEFE_FEFE
+ * 0xC3C3_C3C3_C3C3_C3C3
+ * 0x7F7F_7F7F_7F7F_7F7F
+ * 0xE1E1_E1E1_E1E1_E1E1
+ * 0xBFBF_BFBF_BFBF_BFBF
+ * 0xF0F0_F0F0_F0F0_F0F0
+ * 0xDFDF_DFDF_DFDF_DFDF
+ * 0x7878_7878_7878_7878
+ * 0xEFEF_EFEF_EFEF_EFEF
+ * 0x3C3C_3C3C_3C3C_3C3C
+ * 0xF7F7_F7F7_F7F7_F7F7
+ * 0x1E1E_1E1E_1E1E_1E1E
+ * 0xFBFB_FBFB_FBFB_FBFB
+ * 0x0F0F_0F0F_0F0F_0F0F
+ */
+
+static const uint64_t test_pattern[] = {
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+ 0xAAAAAAAAAAAAAAAAULL,
+ 0x5555555555555555ULL,
+};
+#endif /* !DO_LIKE_RANDOM_XOR */
+
+int test_dram_byte(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask, uint64_t *xor_data)
+{
+ uint64_t p1, p2, d1, d2;
+ uint64_t v, v1;
+ uint64_t p2offset = 0x10000000;
+ uint64_t datamask;
+ uint64_t xor;
+ int i, j, k;
+ int errors = 0;
+ int index;
+#if DO_LIKE_RANDOM_XOR
+ uint64_t pattern1 = bdk_rng_get_random64();
+ uint64_t this_pattern;
+#endif
+ uint64_t bad_bits[2] = {0,0};
+
+ // When doing in parallel, the caller must provide full 8-byte bitmask.
+ // Byte lanes may be clear in the mask to indicate no testing on that lane.
+ datamask = bitmask;
+
+ // final address must include LMC and node
+ p |= (lmc<<7); /* Map address into proper interface */
+ p = bdk_numa_get_address(node, p); /* Map to node */
+
+ // Not on THUNDER: p |= 1ull<<63;
+
+ /* Add offset to both test regions to not clobber boot stuff
+ * when running from L2.
+ */
+ p += 0x10000000; // FIXME? was: 0x4000000; // make sure base is out of the way of boot
+
+ /* The loop ranges and increments walk through a range of addresses avoiding bits that alias
+ * to different memory interfaces (LMCs) on the CN88XX; ie we want to limit activity to a
+ * single memory channel.
+ */
+
+ /* Store something into each location first */
+ // NOTE: the ordering of loops is purposeful: fill full cachelines and flush
+ for (k = 0; k < (1 << 20); k += (1 << 14)) {
+ for (j = 0; j < (1 << 12); j += (1 << 9)) {
+ for (i = 0; i < (1 << 7); i += 8) {
+ index = i + j + k;
+ p1 = p + index;
+ p2 = p1 + p2offset;
+
+#if DO_LIKE_RANDOM_XOR
+ v = pattern1 * p1;
+ v1 = v; // write the same thing to both areas
+#else
+ v = 0ULL;
+ v1 = v;
+#endif
+ __bdk_dram_write64(p1, v);
+ __bdk_dram_write64(p2, v1);
+
+ /* Write back and invalidate the cache lines
+ *
+ * For OCX we cannot limit the number of L2 ways
+ * so instead we just write back and invalidate
+ * the L2 cache lines. This is not possible
+ * when booting remotely, however so this is
+ * only enabled for U-Boot right now.
+ * Potentially the BDK can also take advantage
+ * of this.
+ */
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
+ }
+ }
+ }
+
+ BDK_DCACHE_INVALIDATE;
+
+#if DO_LIKE_RANDOM_XOR
+ this_pattern = bdk_rng_get_random64();
+#endif
+
+ // modify the contents of each location in some way
+ // NOTE: the ordering of loops is purposeful: modify full cachelines and flush
+ for (k = 0; k < (1 << 20); k += (1 << 14)) {
+ for (j = 0; j < (1 << 12); j += (1 << 9)) {
+ for (i = 0; i < (1 << 7); i += 8) {
+ index = i + j + k;
+ p1 = p + index;
+ p2 = p1 + p2offset;
+#if DO_LIKE_RANDOM_XOR
+ v = __bdk_dram_read64(p1) ^ this_pattern;
+ v1 = __bdk_dram_read64(p2) ^ this_pattern;
+#else
+ v = test_pattern[index%(sizeof(test_pattern)/sizeof(uint64_t))];
+ v &= datamask;
+ v1 = ~v;
+#endif
+
+ debug_print("[0x%016llX]: 0x%016llX, [0x%016llX]: 0x%016llX\n",
+ p1, v, p2, v1);
+
+ __bdk_dram_write64(p1, v);
+ __bdk_dram_write64(p2, v1);
+
+ /* Write back and invalidate the cache lines
+ *
+ * For OCX we cannot limit the number of L2 ways
+ * so instead we just write back and invalidate
+ * the L2 cache lines. This is not possible
+ * when booting remotely, however so this is
+ * only enabled for U-Boot right now.
+ * Potentially the BDK can also take advantage
+ * of this.
+ */
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
+ }
+ }
+ }
+
+ BDK_DCACHE_INVALIDATE;
+
+ // test the contents of each location by predicting what should be there
+ // NOTE: the ordering of loops is purposeful: test full cachelines to detect
+ // an error occuring in any slot thereof
+ for (k = 0; k < (1 << 20); k += (1 << 14)) {
+ for (j = 0; j < (1 << 12); j += (1 << 9)) {
+ for (i = 0; i < (1 << 7); i += 8) {
+ index = i + j + k;
+ p1 = p + index;
+ p2 = p1 + p2offset;
+#if DO_LIKE_RANDOM_XOR
+ v = (p1 * pattern1) ^ this_pattern; // FIXME: this should predict what we find...???
+ d1 = __bdk_dram_read64(p1);
+ d2 = __bdk_dram_read64(p2);
+#else
+ v = test_pattern[index%(sizeof(test_pattern)/sizeof(uint64_t))];
+ d1 = __bdk_dram_read64(p1);
+ d2 = ~__bdk_dram_read64(p2);
+#endif
+ debug_print("[0x%016llX]: 0x%016llX, [0x%016llX]: 0x%016llX\n",
+ p1, d1, p2, d2);
+
+ xor = ((d1 ^ v) | (d2 ^ v)) & datamask; // union of error bits only in active byte lanes
+
+ if (!xor)
+ continue;
+
+ // accumulate bad bits
+ bad_bits[0] |= xor;
+ //bad_bits[1] |= ~mpr_data1 & 0xffUL; // cannot do ECC here
+
+ int bybit = 1;
+ uint64_t bymsk = 0xffULL; // start in byte lane 0
+ while (xor != 0) {
+ debug_print("ERROR: [0x%016llX] [0x%016llX] expected 0x%016llX xor %016llX\n",
+ p1, p2, v, xor);
+ if (xor & bymsk) { // error(s) in this lane
+ errors |= bybit; // set the byte error bit
+ xor &= ~bymsk; // clear byte lane in error bits
+ datamask &= ~bymsk; // clear the byte lane in the mask
+ if (datamask == 0) { // nothing left to do
+ goto done_now; // completely done when errors found in all byte lanes in datamask
+ }
+ }
+ bymsk <<= 8; // move mask into next byte lane
+ bybit <<= 1; // move bit into next byte position
+ }
+ }
+ }
+ }
+
+ done_now:
+ if (xor_data != NULL) { // send the bad bits back...
+ xor_data[0] = bad_bits[0];
+ xor_data[1] = bad_bits[1]; // let it be zeroed
+ }
+ return errors;
+}
+
+// NOTE: "mode" argument:
+// DBTRAIN_TEST: for testing using GP patterns, includes ECC
+// DBTRAIN_DBI: for DBI deskew training behavior (uses GP patterns)
+// DBTRAIN_LFSR: for testing using LFSR patterns, includes ECC
+// NOTE: trust the caller to specify the correct/supported mode
+//
+int test_dram_byte_hw(bdk_node_t node, int ddr_interface_num,
+ uint64_t p, int mode, uint64_t *xor_data)
+{
+ uint64_t p1;
+ uint64_t k;
+ int errors = 0;
+
+ uint64_t mpr_data0, mpr_data1;
+ uint64_t bad_bits[2] = {0,0};
+
+ int node_address, lmc, dimm;
+ int prank, lrank;
+ int bank, row, col;
+ int save_or_dis;
+ int byte;
+ int ba_loop, ba_bits;
+
+ bdk_lmcx_rlevel_ctl_t rlevel_ctl;
+ bdk_lmcx_dbtrain_ctl_t dbtrain_ctl;
+
+ int bank_errs;
+
+ // FIXME: K iterations set to 4 for now.
+ // FIXME: decrement to increase interations.
+ // FIXME: must be no less than 22 to stay above an LMC hash field.
+ int kshift = 26;
+ const char *s;
+
+ // allow override default setting for kshift
+ if ((s = getenv("ddr_tune_set_kshift")) != NULL) {
+ int temp = strtoul(s, NULL, 0);
+ if ((temp < 22) || (temp > 27)) {
+ ddr_print("N%d.LMC%d: ILLEGAL override of kshift to %d, using default %d\n",
+ node, ddr_interface_num, temp, kshift);
+ } else {
+ VB_PRT(VBL_DEV2, "N%d.LMC%d: overriding kshift (%d) to %d\n",
+ node, ddr_interface_num, kshift, temp);
+ kshift = temp;
+ }
+ }
+
+ /*
+ 1) Make sure that RLEVEL_CTL[OR_DIS] = 0.
+ */
+ rlevel_ctl.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num));
+ save_or_dis = rlevel_ctl.s.or_dis;
+ rlevel_ctl.s.or_dis = 0; /* or_dis must be disabled for this sequence */
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num), rlevel_ctl.u);
+
+ /*
+ NOTE: this step done in the calling routine(s)
+ 3) Setup GENERAL_PURPOSE[0-2] registers with the data pattern of choice.
+ a. GENERAL_PURPOSE0[DATA<63:0>] – sets the initial lower (rising edge) 64 bits of data.
+ b. GENERAL_PURPOSE1[DATA<63:0>] – sets the initial upper (falling edge) 64 bits of data.
+ c. GENERAL_PURPOSE2[DATA<15:0>] – sets the initial lower (rising edge <7:0>) and upper
+ (falling edge <15:8>) ECC data.
+ */
+
+ // final address must include LMC and node
+ p |= (ddr_interface_num << 7); /* Map address into proper interface */
+ p = bdk_numa_get_address(node, p); /* Map to node */
+
+ /*
+ * Add base offset to both test regions to not clobber u-boot stuff
+ * when running from L2 for NAND boot.
+ */
+ p += 0x10000000; // offset to 256MB
+
+ errors = 0;
+
+ bdk_dram_address_extract_info(p, &node_address, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
+ VB_PRT(VBL_DEV2, "test_dram_byte_hw: START at A:0x%012lx, N%d L%d D%d R%d/%d B%1x Row:%05x Col:%05x\n",
+ p, node_address, lmc, dimm, prank, lrank, bank, row, col);
+
+ // only check once per call, and ignore if no match...
+ if ((int)node != node_address) {
+ error_print("ERROR: Node address mismatch; ignoring...\n");
+ return 0;
+ }
+ if (lmc != ddr_interface_num) {
+ error_print("ERROR: LMC address mismatch\n");
+ return 0;
+ }
+
+ /*
+ 7) Set PHY_CTL[PHY_RESET] = 1 (LMC automatically clears this as it’s a one-shot operation).
+ This is to get into the habit of resetting PHY’s SILO to the original 0 location.
+ */
+ BDK_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ phy_ctl.s.phy_reset = 1);
+
+ /* Walk through a range of addresses avoiding bits that alias
+ * interfaces on the CN88XX.
+ */
+
+ // FIXME: want to try to keep the K increment from affecting the LMC via hash,
+ // FIXME: so keep it above bit 21
+ // NOTE: we also want to keep k less than the base offset of bit 28 (256MB)
+
+ for (k = 0; k < (1UL << 28); k += (1UL << kshift)) {
+
+ // FIXME: the sequence will interate over 1/2 cacheline
+ // FIXME: for each unit specified in "read_cmd_count",
+ // FIXME: so, we setup each sequence to do the max cachelines it can
+
+ p1 = p + k;
+
+ bdk_dram_address_extract_info(p1, &node_address, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
+ VB_PRT(VBL_DEV3, "test_dram_byte_hw: NEXT interation at A:0x%012lx, N%d L%d D%d R%d/%d B%1x Row:%05x Col:%05x\n",
+ p1, node_address, lmc, dimm, prank, lrank, bank, row, col);
+
+ /*
+ 2) Setup the fields of the CSR DBTRAIN_CTL as follows:
+ a. COL, ROW, BA, BG, PRANK points to the starting point of the address.
+ You can just set them to all 0.
+ b. RW_TRAIN – set this to 1.
+ c. TCCD_L – set this to 0.
+ d. READ_CMD_COUNT – instruct the sequence to the how many writes/reads.
+ It is 5 bits field, so set to 31 of maximum # of r/w.
+ */
+ dbtrain_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DBTRAIN_CTL(ddr_interface_num));
+ dbtrain_ctl.s.column_a = col;
+ dbtrain_ctl.s.row_a = row;
+ dbtrain_ctl.s.bg = (bank >> 2) & 3;
+ dbtrain_ctl.s.prank = (dimm * 2) + prank; // FIXME?
+ dbtrain_ctl.s.lrank = lrank; // FIXME?
+ dbtrain_ctl.s.activate = (mode == DBTRAIN_DBI);
+ dbtrain_ctl.s.write_ena = 1;
+ dbtrain_ctl.s.read_cmd_count = 31; // max count pass 1.x
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) // added 81xx and 83xx
+ dbtrain_ctl.s.cmd_count_ext = 3; // max count pass 2.x
+ else
+ dbtrain_ctl.s.cmd_count_ext = 0; // max count pass 1.x
+ dbtrain_ctl.s.rw_train = 1;
+ dbtrain_ctl.s.tccd_sel = (mode == DBTRAIN_DBI);
+
+ // LFSR should only be on when chip supports it...
+ dbtrain_ctl.s.lfsr_pattern_sel = (mode == DBTRAIN_LFSR) ? 1 : 0;
+
+ bank_errs = 0;
+
+ // for each address, iterate over the 4 "banks" in the BA
+ for (ba_loop = 0, ba_bits = bank & 3;
+ ba_loop < 4;
+ ba_loop++, ba_bits = (ba_bits + 1) & 3)
+ {
+ dbtrain_ctl.s.ba = ba_bits;
+ DRAM_CSR_WRITE(node, BDK_LMCX_DBTRAIN_CTL(ddr_interface_num), dbtrain_ctl.u);
+
+ VB_PRT(VBL_DEV3, "test_dram_byte_hw: DBTRAIN: Pr:%d Lr:%d Bg:%d Ba:%d Row:%05x Col:%05x\n",
+ dbtrain_ctl.s.prank, dbtrain_ctl.s.lrank,
+ dbtrain_ctl.s.bg, dbtrain_ctl.s.ba, row, col);
+ /*
+ 4) Kick off the sequence (SEQ_CTL[SEQ_SEL] = 14, SEQ_CTL[INIT_START] = 1).
+ 5) Poll on SEQ_CTL[SEQ_COMPLETE] for completion.
+ */
+ perform_octeon3_ddr3_sequence(node, prank, ddr_interface_num, 14);
+
+ /*
+ 6) Read MPR_DATA0 and MPR_DATA1 for results:
+ a. MPR_DATA0[MPR_DATA<63:0>] – comparison results for DQ63:DQ0.
+ (1 means MATCH, 0 means FAIL).
+ b. MPR_DATA1[MPR_DATA<7:0>] – comparison results for ECC bit7:0.
+ */
+ mpr_data0 = BDK_CSR_READ(node, BDK_LMCX_MPR_DATA0(ddr_interface_num));
+ mpr_data1 = BDK_CSR_READ(node, BDK_LMCX_MPR_DATA1(ddr_interface_num));
+
+ /*
+ 7) Set PHY_CTL[PHY_RESET] = 1 (LMC automatically clears this as it’s a one-shot operation).
+ This is to get into the habit of resetting PHY’s SILO to the original 0 location.
+ */
+ BDK_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ phy_ctl.s.phy_reset = 1);
+
+ if (mode == DBTRAIN_DBI)
+ continue; // bypass any error checking or updating when DBI mode
+
+ // data bytes
+ if (~mpr_data0) {
+ for (byte = 0; byte < 8; byte++) {
+ if ((~mpr_data0 >> (8 * byte)) & 0xffUL)
+ bank_errs |= (1 << byte);
+ }
+ // accumulate bad bits
+ bad_bits[0] |= ~mpr_data0;
+ }
+
+ // include ECC byte errors
+ if (~mpr_data1 & 0xffUL) {
+ bank_errs |= (1 << 8);
+ bad_bits[1] |= ~mpr_data1 & 0xffUL;
+ }
+
+ } /* for (int ba_loop = 0; ba_loop < 4; ba_loop++) */
+
+ errors |= bank_errs;
+
+ } /* end for (k=...) */
+
+ rlevel_ctl.s.or_dis = save_or_dis;
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num), rlevel_ctl.u);
+
+ if ((mode != DBTRAIN_DBI) && (xor_data != NULL)) { // send the bad bits back...
+ xor_data[0] = bad_bits[0];
+ xor_data[1] = bad_bits[1];
+ }
+
+ return errors;
+}
+
+static void set_ddr_memory_preserved(bdk_node_t node)
+{
+ global_ddr_memory_preserved |= 0x1 << node;
+
+}
+int ddr_memory_preserved(bdk_node_t node)
+{
+ return (global_ddr_memory_preserved & (0x1 << node)) != 0;
+}
+
+void perform_ddr_init_sequence(bdk_node_t node, int rank_mask,
+ int ddr_interface_num)
+{
+ const char *s;
+ int ddr_init_loops = 1;
+ int rankx;
+
+ if ((s = lookup_env_parameter("ddr%d_init_loops", ddr_interface_num)) != NULL)
+ ddr_init_loops = strtoul(s, NULL, 0);
+
+ while (ddr_init_loops--) {
+ for (rankx = 0; rankx < 8; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ perform_octeon3_ddr3_sequence(node, (1 << rankx),
+ ddr_interface_num, 0); /* power-up/init */
+
+ bdk_wait_usec(1000); /* Wait a while. */
+
+ if ((s = lookup_env_parameter("ddr_sequence1")) != NULL) {
+ int sequence1;
+ sequence1 = strtoul(s, NULL, 0);
+ perform_octeon3_ddr3_sequence(node, (1 << rankx),
+ ddr_interface_num, sequence1);
+ }
+
+ if ((s = lookup_env_parameter("ddr_sequence2")) != NULL) {
+ int sequence2;
+ sequence2 = strtoul(s, NULL, 0);
+ perform_octeon3_ddr3_sequence(node, (1 << rankx),
+ ddr_interface_num, sequence2);
+ }
+ }
+ }
+}
+
+static void set_ddr_clock_initialized(bdk_node_t node, int ddr_interface, int inited_flag)
+{
+ int bit = node * 8 + ddr_interface;
+ if (inited_flag)
+ global_ddr_clock_initialized |= (0x1 << bit);
+ else
+ global_ddr_clock_initialized &= ~(0x1 << bit);
+}
+static int ddr_clock_initialized(bdk_node_t node, int ddr_interface)
+{
+ int bit = node * 8 + ddr_interface;
+ return (!!(global_ddr_clock_initialized & (0x1 << bit)));
+}
+
+
+static void cn78xx_lmc_dreset_init (bdk_node_t node, int ddr_interface_num)
+{
+ /*
+ * This is the embodiment of the 6.9.4 LMC DRESET Initialization section below.
+ *
+ * The remainder of this section describes the sequence for LMCn.
+ *
+ * 1. If not done already, write LMC(0..3)_DLL_CTL2 to its reset value
+ * (except without changing the LMC(0..3)_DLL_CTL2[INTF_EN] value from
+ * that set in the prior Step 3), including LMC(0..3)_DLL_CTL2[DRESET] = 1.
+ *
+ * 2. Without changing any other LMC(0..3)_DLL_CTL2 fields, write
+ * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] = 1.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL2(ddr_interface_num),
+ c.s.dll_bringup = 1);
+
+ /*
+ * 3. Read LMC(0..3)_DLL_CTL2 and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL2(ddr_interface_num));
+
+ /*
+ * 4. Wait for a minimum of 10 LMC CK cycles.
+ */
+
+ bdk_wait_usec(1);
+
+ /*
+ * 5. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
+ * LMC(0..3)_DLL_CTL2[QUAD_DLL_ENA] = 1.
+ * LMC(0..3)_DLL_CTL2[QUAD_DLL_ENA] must not change after this point
+ * without restarting the LMCn DRESET initialization sequence.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL2(ddr_interface_num),
+ c.s.quad_dll_ena = 1);
+
+ /*
+ * 6. Read LMC(0..3)_DLL_CTL2 and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL2(ddr_interface_num));
+
+ /*
+ * 7. Wait a minimum of 10 us.
+ */
+
+ bdk_wait_usec(10);
+
+ /*
+ * 8. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
+ * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] = 0.
+ * LMC(0..3)_DLL_CTL2[DLL_BRINGUP] must not change after this point
+ * without restarting the LMCn DRESET initialization sequence.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL2(ddr_interface_num),
+ c.s.dll_bringup = 0);
+
+ /*
+ * 9. Read LMC(0..3)_DLL_CTL2 and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL2(ddr_interface_num));
+
+ /*
+ * 10. Without changing any other fields in LMC(0..3)_DLL_CTL2, write
+ * LMC(0..3)_DLL_CTL2[DRESET] = 0.
+ * LMC(0..3)_DLL_CTL2[DRESET] must not change after this point without
+ * restarting the LMCn DRESET initialization sequence.
+ *
+ * After completing LMCn DRESET initialization, all LMC CSRs may be
+ * accessed. Prior to completing LMC DRESET initialization, only
+ * LMC(0..3)_DDR_PLL_CTL, LMC(0..3)_DLL_CTL2, LMC(0..3)_RESET_CTL, and
+ * LMC(0..3)_COMP_CTL2 LMC CSRs can be accessed.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL2(ddr_interface_num),
+ c.s.dreset = 0);
+
+ /*
+ * NEW STEP - necessary for O73, O78 P2.0, O75, and T88 P2.0
+ * McBuggin: #24821
+ *
+ * 11. Wait for a minimum of 10 LMC CK cycles.
+ */
+
+ bdk_wait_usec(1);
+}
+
+/*static*/ void cn88xx_lmc_ddr3_reset(bdk_node_t node, int ddr_interface_num, int reset)
+{
+ /*
+ * 4. Deassert DDRn_RESET_L pin by writing LMC(0..3)_RESET_CTL[DDR3RST] = 1
+ * without modifying any other LMC(0..3)_RESET_CTL fields.
+ * 5. Read LMC(0..3)_RESET_CTL and wait for the result.
+ * 6. Wait a minimum of 500us. This guarantees the necessary T = 500us
+ * delay between DDRn_RESET_L deassertion and DDRn_DIMM*_CKE* assertion.
+ */
+ ddr_print("LMC%d %s DDR_RESET_L\n", ddr_interface_num,
+ (reset == LMC_DDR3_RESET_DEASSERT) ? "De-asserting" : "Asserting");
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_RESET_CTL(ddr_interface_num),
+ c.cn8.ddr3rst = reset);
+ BDK_CSR_READ(node, BDK_LMCX_RESET_CTL(ddr_interface_num));
+ bdk_wait_usec(500);
+}
+
+int initialize_ddr_clock(bdk_node_t node,
+ const ddr_configuration_t *ddr_configuration,
+ uint32_t cpu_hertz,
+ uint32_t ddr_hertz,
+ uint32_t ddr_ref_hertz,
+ int ddr_interface_num,
+ uint32_t ddr_interface_mask
+ )
+{
+ const char *s;
+
+ if (ddr_clock_initialized(node, ddr_interface_num))
+ return 0;
+
+ if (!ddr_clock_initialized(node, 0)) { /* Do this once */
+ int i;
+ bdk_lmcx_reset_ctl_t reset_ctl;
+ /* Check to see if memory is to be preserved and set global flag */
+ for (i=3; i>=0; --i) {
+ if ((ddr_interface_mask & (1 << i)) == 0)
+ continue;
+ reset_ctl.u = BDK_CSR_READ(node, BDK_LMCX_RESET_CTL(i));
+ if (reset_ctl.s.ddr3psv == 1) {
+ ddr_print("LMC%d Preserving memory\n", i);
+ set_ddr_memory_preserved(node);
+
+ /* Re-initialize flags */
+ reset_ctl.cn8.ddr3pwarm = 0;
+ reset_ctl.cn8.ddr3psoft = 0;
+ reset_ctl.s.ddr3psv = 0;
+ DRAM_CSR_WRITE(node, BDK_LMCX_RESET_CTL(i), reset_ctl.u);
+ }
+ }
+ }
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX)) {
+
+ bdk_lmcx_ddr_pll_ctl_t ddr_pll_ctl;
+ const dimm_config_t *dimm_config_table = ddr_configuration->dimm_config_table;
+
+ /* ddr_type only indicates DDR4 or DDR3 */
+ int ddr_type = get_ddr_type(node, &dimm_config_table[0]);
+
+ /*
+ * 6.9 LMC Initialization Sequence
+ *
+ * There are 14 parts to the LMC initialization procedure:
+ *
+ * 1. LMC interface enable initialization
+ *
+ * 2. DDR PLL initialization
+ *
+ * 3. LMC CK initialization
+ *
+ * 4. LMC DRESET initialization
+ *
+ * 5. LMC CK local initialization
+ *
+ * 6. LMC RESET initialization
+ *
+ * 7. Early LMC initialization
+ *
+ * 8. LMC offset training
+ *
+ * 9. LMC internal Vref training
+ *
+ * 10. LMC deskew training
+ *
+ * 11. LMC write leveling
+ *
+ * 12. LMC read leveling
+ *
+ * 13. DRAM Vref Training for DDR4
+ *
+ * 14. Final LMC initialization
+ *
+ * CN88XX supports two modes:
+ *
+ * ­ two-LMC mode: both LMCs 2/3 must not be enabled
+ * (LMC2/3_DLL_CTL2[DRESET] must be set to 1 and LMC2/3_DLL_CTL2[INTF_EN]
+ * must be set to 0) and both LMCs 0/1 must be enabled).
+ *
+ * ­ four-LMC mode: all four LMCs 0..3 must be enabled.
+ *
+ * Steps 4 and 6..14 should each be performed for each enabled LMC (either
+ * twice or four times). Steps 1..3 and 5 are more global in nature and
+ * each must be executed exactly once (not once per LMC) each time the
+ * DDR PLL changes or is first brought up. Steps 1..3 and 5 need not be
+ * performed if the DDR PLL is stable.
+ *
+ * Generally, the steps are performed in order. The exception is that the
+ * CK local initialization (step 5) must be performed after some DRESET
+ * initializations (step 4) and before other DRESET initializations when
+ * the DDR PLL is brought up or changed. (The CK local initialization
+ * uses information from some LMCs to bring up the other local CKs.) The
+ * following text describes these ordering requirements in more detail.
+ *
+ * Following any chip reset, the DDR PLL must be brought up, and all 14
+ * steps should be executed. Subsequently, it is possible to execute only
+ * steps 4 and 6..14, or to execute only steps 8..14.
+ *
+ * The remainder of this section covers these initialization steps in
+ * sequence.
+ */
+
+ if (ddr_interface_num == 0) { /* Do this once */
+ bdk_lmcx_dll_ctl2_t dll_ctl2;
+ int loop_interface_num;
+
+ /*
+ * 6.9.1 LMC Interface-Enable Initialization
+ *
+ * LMC interface-enable initialization (Step 1) must be performed only
+ * once, not once per LMC in four-LMC mode. This step is not required
+ * in two-LMC mode.
+ *
+ * Perform the following three substeps for the LMC interface-enable
+ * initialization:
+ *
+ * 1. Without changing any other LMC2_DLL_CTL2 fields (LMC(0..3)_DLL_CTL2
+ * should be at their reset values after Step 1), write
+ * LMC2_DLL_CTL2[INTF_EN] = 1 if four-LMC mode is desired.
+ *
+ * 2. Without changing any other LMC3_DLL_CTL2 fields, write
+ * LMC3_DLL_CTL2[INTF_EN] = 1 if four-LMC mode is desired.
+ *
+ * 3. Read LMC2_DLL_CTL2 and wait for the result.
+ *
+ * The LMC2_DLL_CTL2[INTF_EN] and LMC3_DLL_CTL2[INTF_EN] values should
+ * not be changed by software from this point.
+ *
+ */
+
+ /* Put all LMCs into DRESET here; these are the reset values... */
+ for (loop_interface_num = 0; loop_interface_num < 4; ++loop_interface_num) {
+ if ((ddr_interface_mask & (1 << loop_interface_num)) == 0)
+ continue;
+
+ dll_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL2(loop_interface_num));
+
+ dll_ctl2.s.byp_setting = 0;
+ dll_ctl2.s.byp_sel = 0;
+ dll_ctl2.s.quad_dll_ena = 0;
+ dll_ctl2.s.dreset = 1;
+ dll_ctl2.s.dll_bringup = 0;
+ dll_ctl2.s.intf_en = 0;
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL2(loop_interface_num), dll_ctl2.u);
+ }
+
+ /* Now set INTF_EN for *ONLY* LMC2/3 if they are to be active on 88XX. */
+ /* Do *NOT* touch LMC0/1 INTF_EN=0 setting on 88XX. */
+ /* But we do have to set LMC1 INTF_EN=1 on 83XX if we want it active... */
+ /* Note that 81xx has only LMC0 so the mask should reflect that. */
+ for (loop_interface_num = (CAVIUM_IS_MODEL(CAVIUM_CN83XX)) ? 1 : 2;
+ loop_interface_num < 4; ++loop_interface_num) {
+ if ((ddr_interface_mask & (1 << loop_interface_num)) == 0)
+ continue;
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL2(loop_interface_num),
+ c.s.intf_en = 1);
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL2(loop_interface_num));
+ }
+
+ /*
+ * 6.9.2 DDR PLL Initialization
+ *
+ * DDR PLL initialization (Step 2) must be performed for each chip reset
+ * and whenever the DDR clock speed changes. This step needs to be
+ * performed only once, not once per LMC.
+ *
+ * Perform the following eight substeps to initialize the DDR PLL:
+ *
+ * 1. If not done already, write all fields in LMC(0..1)_DDR_PLL_CTL and
+ * LMC(0..1)_DLL_CTL2 to their reset values, including:
+ *
+ * .. LMC0_DDR_PLL_CTL[DDR_DIV_RESET] = 1
+ * .. LMC0_DLL_CTL2[DRESET] = 1
+ *
+ * This substep is not necessary after a chip reset.
+ *
+ */
+
+ ddr_pll_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(0));
+
+ ddr_pll_ctl.cn83xx.reset_n = 0;
+ ddr_pll_ctl.cn83xx.ddr_div_reset = 1;
+ ddr_pll_ctl.cn83xx.phy_dcok = 0;
+ ddr_pll_ctl.cn83xx.dclk_invert = 0;
+
+ // allow override of LMC0 desired setting for DCLK_INVERT
+ if ((s = lookup_env_parameter("ddr0_set_dclk_invert")) != NULL) {
+ ddr_pll_ctl.cn83xx.dclk_invert = !!strtoul(s, NULL, 0);
+ ddr_print("LMC0: override DDR_PLL_CTL[dclk_invert] to %d\n",
+ ddr_pll_ctl.cn83xx.dclk_invert);
+ }
+
+ // always write LMC0 CSR, it must be active
+ DRAM_CSR_WRITE(node, BDK_LMCX_DDR_PLL_CTL(0), ddr_pll_ctl.u);
+ ddr_print("%-45s : 0x%016lx\n", "LMC0: DDR_PLL_CTL", ddr_pll_ctl.u);
+
+ // only when LMC1 is active
+ // NOTE: 81xx has only 1 LMC, and 83xx can operate in 1-LMC mode
+ if (ddr_interface_mask & 0x2) {
+
+ ddr_pll_ctl.cn83xx.dclk_invert ^= 1; /* DEFAULT: Toggle dclk_invert from LMC0 */
+
+ // allow override of LMC1 desired setting for DCLK_INVERT
+ if ((s = lookup_env_parameter("ddr1_set_dclk_invert")) != NULL) {
+ ddr_pll_ctl.cn83xx.dclk_invert = !!strtoul(s, NULL, 0);
+ ddr_print("LMC1: override DDR_PLL_CTL[dclk_invert] to %d\n",
+ ddr_pll_ctl.cn83xx.dclk_invert);
+ }
+
+ // always write LMC1 CSR when it is active
+ DRAM_CSR_WRITE(node, BDK_LMCX_DDR_PLL_CTL(1), ddr_pll_ctl.u);
+ ddr_print("%-45s : 0x%016lx\n", "LMC1: DDR_PLL_CTL", ddr_pll_ctl.u);
+ }
+
+ /*
+ * 2. If the current DRAM contents are not preserved (see
+ * LMC(0..3)_RESET_ CTL[DDR3PSV]), this is also an appropriate time to
+ * assert the RESET# pin of the DDR3/DDR4 DRAM parts. If desired, write
+ * LMC0_RESET_ CTL[DDR3RST] = 0 without modifying any other
+ * LMC0_RESET_CTL fields to assert the DDR_RESET_L pin. No action is
+ * required here to assert DDR_RESET_L following a chip reset. Refer to
+ * Section 6.9.6. Do this for all enabled LMCs.
+ */
+
+ for (loop_interface_num = 0;
+ ( !ddr_memory_preserved(node)) && loop_interface_num < 4;
+ ++loop_interface_num)
+ {
+
+ if ((ddr_interface_mask & (1 << loop_interface_num)) == 0)
+ continue;
+
+ cn88xx_lmc_ddr3_reset(node, loop_interface_num, LMC_DDR3_RESET_ASSERT);
+ }
+
+ /*
+ * 3. Without changing any other LMC0_DDR_PLL_CTL values, write LMC0_DDR_
+ * PLL_CTL[CLKF] with a value that gives a desired DDR PLL speed. The
+ * LMC0_DDR_PLL_CTL[CLKF] value should be selected in conjunction with
+ * the post-scalar divider values for LMC (LMC0_DDR_PLL_CTL[DDR_PS_EN])
+ * so that the desired LMC CK speeds are is produced (all enabled LMCs
+ * must run the same speed). Section 5.14 describes
+ * LMC0_DDR_PLL_CTL[CLKF] and LMC0_DDR_PLL_CTL[DDR_PS_EN] programmings
+ * that produce the desired LMC CK speed. Section 6.9.3 describes LMC CK
+ * initialization, which can be done separately from the DDR PLL
+ * initialization described in this section.
+ *
+ * The LMC0_DDR_PLL_CTL[CLKF] value must not change after this point
+ * without restarting this SDRAM PLL initialization sequence.
+ */
+
+ {
+ /* CLKF = (DCLK * (CLKR+1) * EN(1, 2, 3, 4, 5, 6, 7, 8, 10, 12))/DREF - 1 */
+ int en_idx, save_en_idx, best_en_idx=0;
+ uint64_t clkf, clkr, max_clkf = 127;
+ uint64_t best_clkf=0, best_clkr=0;
+ uint64_t best_pll_MHz = 0;
+ uint64_t pll_MHz;
+ uint64_t min_pll_MHz = 800;
+ uint64_t max_pll_MHz = 5000;
+ uint64_t error;
+ uint64_t best_error;
+ uint64_t best_calculated_ddr_hertz = 0;
+ uint64_t calculated_ddr_hertz = 0;
+ uint64_t orig_ddr_hertz = ddr_hertz;
+ static const int _en[] = {1, 2, 3, 4, 5, 6, 7, 8, 10, 12};
+ int override_pll_settings;
+ int new_bwadj;
+
+ error = best_error = ddr_hertz; /* Init to max error */
+
+ ddr_print("DDR Reference Hertz = %d\n", ddr_ref_hertz);
+
+ while (best_error == ddr_hertz) {
+
+ for (clkr = 0; clkr < 4; ++clkr) {
+ for (en_idx=sizeof(_en)/sizeof(int)-1; en_idx>=0; --en_idx) {
+ save_en_idx = en_idx;
+ clkf = ((ddr_hertz) * (clkr+1) * (_en[save_en_idx]));
+ clkf = divide_nint(clkf, ddr_ref_hertz) - 1;
+ pll_MHz = ddr_ref_hertz * (clkf+1) / (clkr+1) / 1000000;
+ calculated_ddr_hertz = ddr_ref_hertz * (clkf + 1) / ((clkr + 1) * (_en[save_en_idx]));
+ error = ddr_hertz - calculated_ddr_hertz;
+
+ if ((pll_MHz < min_pll_MHz) || (pll_MHz > max_pll_MHz)) continue;
+ if (clkf > max_clkf) continue; /* PLL requires clkf to be limited */
+ if (_abs(error) > _abs(best_error)) continue;
+
+ VB_PRT(VBL_TME, "clkr: %2lu, en[%d]: %2d, clkf: %4lu, pll_MHz: %4lu, ddr_hertz: %8lu, error: %8ld\n",
+ clkr, save_en_idx, _en[save_en_idx], clkf, pll_MHz, calculated_ddr_hertz, error);
+
+ /* Favor the highest PLL frequency. */
+ if ((_abs(error) < _abs(best_error)) || (pll_MHz > best_pll_MHz)) {
+ best_pll_MHz = pll_MHz;
+ best_calculated_ddr_hertz = calculated_ddr_hertz;
+ best_error = error;
+ best_clkr = clkr;
+ best_clkf = clkf;
+ best_en_idx = save_en_idx;
+ }
+ }
+ }
+
+ override_pll_settings = 0;
+
+ if ((s = lookup_env_parameter("ddr_pll_clkr")) != NULL) {
+ best_clkr = strtoul(s, NULL, 0);
+ override_pll_settings = 1;
+ }
+ if ((s = lookup_env_parameter("ddr_pll_clkf")) != NULL) {
+ best_clkf = strtoul(s, NULL, 0);
+ override_pll_settings = 1;
+ }
+ if ((s = lookup_env_parameter("ddr_pll_en_idx")) != NULL) {
+ best_en_idx = strtoul(s, NULL, 0);
+ override_pll_settings = 1;
+ }
+
+ if (override_pll_settings) {
+ best_pll_MHz = ddr_ref_hertz * (best_clkf+1) / (best_clkr+1) / 1000000;
+ best_calculated_ddr_hertz = ddr_ref_hertz * (best_clkf + 1) / ((best_clkr + 1) * (_en[best_en_idx]));
+ best_error = ddr_hertz - best_calculated_ddr_hertz;
+ }
+
+ ddr_print("clkr: %2lu, en[%d]: %2d, clkf: %4lu, pll_MHz: %4lu, ddr_hertz: %8lu, error: %8ld <==\n",
+ best_clkr, best_en_idx, _en[best_en_idx], best_clkf, best_pll_MHz,
+ best_calculated_ddr_hertz, best_error);
+
+ /* Try lowering the frequency if we can't get a working configuration */
+ if (best_error == ddr_hertz) {
+ if (ddr_hertz < orig_ddr_hertz - 10000000)
+ break;
+ ddr_hertz -= 1000000;
+ best_error = ddr_hertz;
+ }
+
+ } /* while (best_error == ddr_hertz) */
+
+
+ if (best_error == ddr_hertz) {
+ error_print("ERROR: Can not compute a legal DDR clock speed configuration.\n");
+ return(-1);
+ }
+
+ new_bwadj = (best_clkf + 1) / 10;
+ VB_PRT(VBL_TME, "bwadj: %2d\n", new_bwadj);
+
+ if ((s = lookup_env_parameter("ddr_pll_bwadj")) != NULL) {
+ new_bwadj = strtoul(s, NULL, 0);
+ VB_PRT(VBL_TME, "bwadj: %2d\n", new_bwadj);
+ }
+
+ for (loop_interface_num = 0; loop_interface_num<2; ++loop_interface_num) {
+ if ((ddr_interface_mask & (1 << loop_interface_num)) == 0)
+ continue;
+
+ // make sure we preserve any settings already there
+ ddr_pll_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num));
+ ddr_print("LMC%d: DDR_PLL_CTL : 0x%016lx\n",
+ loop_interface_num, ddr_pll_ctl.u);
+
+ ddr_pll_ctl.cn83xx.ddr_ps_en = best_en_idx;
+ ddr_pll_ctl.cn83xx.clkf = best_clkf;
+ ddr_pll_ctl.cn83xx.clkr = best_clkr;
+ ddr_pll_ctl.cn83xx.reset_n = 0;
+ ddr_pll_ctl.cn83xx.bwadj = new_bwadj;
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num), ddr_pll_ctl.u);
+ ddr_print("LMC%d: DDR_PLL_CTL : 0x%016lx\n",
+ loop_interface_num, ddr_pll_ctl.u);
+ }
+ }
+
+
+ for (loop_interface_num = 0; loop_interface_num<4; ++loop_interface_num) {
+ if ((ddr_interface_mask & (1 << loop_interface_num)) == 0)
+ continue;
+
+ /*
+ * 4. Read LMC0_DDR_PLL_CTL and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num));
+
+ /*
+ * 5. Wait a minimum of 3 us.
+ */
+
+ bdk_wait_usec(3); /* Wait 3 us */
+
+ /*
+ * 6. Write LMC0_DDR_PLL_CTL[RESET_N] = 1 without changing any other
+ * LMC0_DDR_PLL_CTL values.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num),
+ c.cn83xx.reset_n = 1);
+
+ /*
+ * 7. Read LMC0_DDR_PLL_CTL and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num));
+
+ /*
+ * 8. Wait a minimum of 25 us.
+ */
+
+ bdk_wait_usec(25); /* Wait 25 us */
+
+ } /* for (loop_interface_num = 0; loop_interface_num<4; ++loop_interface_num) */
+
+ for (loop_interface_num = 0; loop_interface_num<4; ++loop_interface_num) {
+ if ((ddr_interface_mask & (1 << loop_interface_num)) == 0)
+ continue;
+ /*
+ * 6.9.3 LMC CK Initialization
+ *
+ * DDR PLL initialization must be completed prior to starting LMC CK
+ * initialization.
+ *
+ * Perform the following substeps to initialize the LMC CK. Perform
+ * substeps 1..3 for both LMC0 and LMC1.
+ *
+ * 1. Without changing any other LMC(0..3)_DDR_PLL_CTL values, write
+ * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 1 and
+ * LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN] with the appropriate value to get the
+ * desired LMC CK speed. Section 5.14 discusses CLKF and DDR_PS_EN
+ * programmings. The LMC(0..3)_DDR_PLL_CTL[DDR_PS_EN] must not change
+ * after this point without restarting this LMC CK initialization
+ * sequence.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num),
+ c.cn83xx.ddr_div_reset = 1);
+
+ /*
+ * 2. Without changing any other fields in LMC(0..3)_DDR_PLL_CTL, write
+ * LMC(0..3)_DDR_PLL_CTL[DDR4_MODE] = 0.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num),
+ c.cn83xx.ddr4_mode = (ddr_type == DDR4_DRAM) ? 1 : 0);
+
+ /*
+ * 3. Read LMC(0..3)_DDR_PLL_CTL and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num));
+
+ /*
+ * 4. Wait a minimum of 1 us.
+ */
+
+ bdk_wait_usec(1); /* Wait 1 us */
+
+ /*
+ * 5. Without changing any other fields in LMC(0..3)_DDR_PLL_CTL, write
+ * LMC(0..3)_DDR_PLL_CTL[PHY_DCOK] = 1.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num),
+ c.cn83xx.phy_dcok = 1);
+
+ /*
+ * 6. Read LMC(0..3)_DDR_PLL_CTL and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num));
+
+ /*
+ * 7. Wait a minimum of 20 us.
+ */
+
+ bdk_wait_usec(20); /* Wait 20 us */
+
+ /*
+ * 8. Without changing any other LMC(0..3)_COMP_CTL2 values, write
+ * LMC(0..3)_COMP_CTL2[CK_CTL,CONTROL_CTL,CMD_CTL] to the desired
+ * DDR*_CK_*_P control and command signals drive strength.
+ */
+
+ {
+ bdk_lmcx_comp_ctl2_t comp_ctl2;
+ const ddr3_custom_config_t *custom_lmc_config = &ddr_configuration->custom_lmc_config;
+
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(loop_interface_num));
+
+ comp_ctl2.s.dqx_ctl = 4; /* Default 4=34.3 ohm */
+ comp_ctl2.s.ck_ctl =
+ (custom_lmc_config->ck_ctl == 0) ? 4 : custom_lmc_config->ck_ctl; /* Default 4=34.3 ohm */
+ comp_ctl2.s.cmd_ctl =
+ (custom_lmc_config->cmd_ctl == 0) ? 4 : custom_lmc_config->cmd_ctl; /* Default 4=34.3 ohm */
+
+ comp_ctl2.s.rodt_ctl = 0x4; /* 60 ohm */
+
+ // These need to be done here, not later in Step 6.9.7.
+ // NOTE: these are/will be specific to a chip; for now, set to 0
+ // should we provide overrides for these?
+ comp_ctl2.s.ntune_offset = 0;
+ comp_ctl2.s.ptune_offset = 0;
+
+ // now do any overrides...
+ if ((s = lookup_env_parameter("ddr_ck_ctl")) != NULL) {
+ comp_ctl2.s.ck_ctl = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_cmd_ctl")) != NULL) {
+ comp_ctl2.s.cmd_ctl = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dqx_ctl")) != NULL) {
+ comp_ctl2.s.dqx_ctl = strtoul(s, NULL, 0);
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(loop_interface_num), comp_ctl2.u);
+ }
+
+ /*
+ * 9. Read LMC(0..3)_DDR_PLL_CTL and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num));
+
+ /*
+ * 10. Wait a minimum of 200 ns.
+ */
+
+ bdk_wait_usec(1); /* Wait 1 us */
+
+ /*
+ * 11. Without changing any other LMC(0..3)_DDR_PLL_CTL values, write
+ * LMC(0..3)_DDR_PLL_CTL[DDR_DIV_RESET] = 0.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num),
+ c.cn83xx.ddr_div_reset = 0);
+
+ /*
+ * 12. Read LMC(0..3)_DDR_PLL_CTL and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num));
+
+ /*
+ * 13. Wait a minimum of 200 ns.
+ */
+ bdk_wait_usec(1); /* Wait 1 us */
+
+ } /* for (loop_interface_num = 0; loop_interface_num<4; ++loop_interface_num) */
+
+ } /* if (ddr_interface_num == 0) */ /* Do this once */
+
+ if (ddr_interface_num == 0) { /* Do this once */
+ bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
+
+ /*
+ * 6.9.4 LMC DRESET Initialization
+ *
+ * All of the DDR PLL, LMC global CK, and LMC interface enable
+ * initializations must be completed prior to starting this LMC DRESET
+ * initialization (Step 4).
+ *
+ * This LMC DRESET step is done for all enabled LMCs.
+ *
+ * There are special constraints on the ordering of DRESET initialization
+ * (Steps 4) and CK local initialization (Step 5) whenever CK local
+ * initialization must be executed. CK local initialization must be
+ * executed whenever the DDR PLL is being brought up (for each chip reset
+ * and whenever the DDR clock speed changes).
+ *
+ * When Step 5 must be executed in the two-LMC mode case:
+ * ­ LMC0 DRESET initialization must occur before Step 5.
+ * ­ LMC1 DRESET initialization must occur after Step 5.
+ *
+ * When Step 5 must be executed in the four-LMC mode case:
+ * ­ LMC2 and LMC3 DRESET initialization must occur before Step 5.
+ * ­ LMC0 and LMC1 DRESET initialization must occur after Step 5.
+ */
+
+ if ((ddr_interface_mask == 0x1) || (ddr_interface_mask == 0x3)) {
+ /* ONE-LMC MODE FOR 81XX AND 83XX BEFORE STEP 5 */
+ /* TWO-LMC MODE BEFORE STEP 5 */
+ cn78xx_lmc_dreset_init(node, 0);
+
+ } else if (ddr_interface_mask == 0xf) {
+ /* FOUR-LMC MODE BEFORE STEP 5 */
+ cn78xx_lmc_dreset_init(node, 2);
+ cn78xx_lmc_dreset_init(node, 3);
+ }
+
+ /*
+ * 6.9.5 LMC CK Local Initialization
+ *
+ * All of DDR PLL, LMC global CK, and LMC interface-enable
+ * initializations must be completed prior to starting this LMC CK local
+ * initialization (Step 5).
+ *
+ * LMC CK Local initialization must be performed for each chip reset and
+ * whenever the DDR clock speed changes. This step needs to be performed
+ * only once, not once per LMC.
+ *
+ * There are special constraints on the ordering of DRESET initialization
+ * (Steps 4) and CK local initialization (Step 5) whenever CK local
+ * initialization must be executed. CK local initialization must be
+ * executed whenever the DDR PLL is being brought up (for each chip reset
+ * and whenever the DDR clock speed changes).
+ *
+ * When Step 5 must be executed in the two-LMC mode case:
+ * ­ LMC0 DRESET initialization must occur before Step 5.
+ * ­ LMC1 DRESET initialization must occur after Step 5.
+ *
+ * When Step 5 must be executed in the four-LMC mode case:
+ * ­ LMC2 and LMC3 DRESET initialization must occur before Step 5.
+ * ­ LMC0 and LMC1 DRESET initialization must occur after Step 5.
+ *
+ * LMC CK local initialization is different depending on whether two-LMC
+ * or four-LMC modes are desired.
+ */
+
+ if (ddr_interface_mask == 0x3) {
+ /*
+ * 6.9.5.1 LMC CK Local Initialization for Two-LMC Mode
+ *
+ * 1. Write LMC0_DLL_CTL3 to its reset value. (Note that
+ * LMC0_DLL_CTL3[DLL_90_BYTE_SEL] = 0x2 .. 0x8 should also work.)
+ */
+
+ ddr_dll_ctl3.u = 0;
+ ddr_dll_ctl3.s.dclk90_recal_dis = 1;
+ ddr_dll_ctl3.s.dll90_byte_sel = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(0), ddr_dll_ctl3.u);
+
+ /*
+ * 2. Read LMC0_DLL_CTL3 and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(0));
+
+ /*
+ * 3. Without changing any other fields in LMC0_DLL_CTL3, write
+ * LMC0_DLL_CTL3[DCLK90_FWD] = 1. Writing LMC0_DLL_CTL3[DCLK90_FWD] = 1
+ * causes clock-delay information to be forwarded from LMC0 to LMC1.
+ */
+
+ ddr_dll_ctl3.s.dclk90_fwd = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(0), ddr_dll_ctl3.u);
+
+ /*
+ * 4. Read LMC0_DLL_CTL3 and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(0));
+ } /* if (ddr_interface_mask == 0x3) */
+
+ if (ddr_interface_mask == 0xf) {
+ /*
+ * 6.9.5.2 LMC CK Local Initialization for Four-LMC Mode
+ *
+ * 1. Write LMC2_DLL_CTL3 to its reset value except
+ * LMC2_DLL_CTL3[DLL90_BYTE_SEL] = 0x7.
+ */
+
+ ddr_dll_ctl3.u = 0;
+ ddr_dll_ctl3.s.dclk90_recal_dis = 1;
+ ddr_dll_ctl3.s.dll90_byte_sel = 7;
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(2), ddr_dll_ctl3.u);
+
+ /*
+ * 2. Write LMC3_DLL_CTL3 to its reset value except
+ * LMC3_DLL_CTL3[DLL90_BYTE_SEL] = 0x0.
+ */
+
+ ddr_dll_ctl3.u = 0;
+ ddr_dll_ctl3.s.dclk90_recal_dis = 1;
+ ddr_dll_ctl3.s.dll90_byte_sel = 0; /* HRM wants 0, not 2 */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(3), ddr_dll_ctl3.u); /* HRM wants LMC3 */
+
+ /*
+ * 3. Read LMC3_DLL_CTL3 and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(3));
+
+ /*
+ * 4. Without changing any other fields in LMC2_DLL_CTL3, write
+ * LMC2_DLL_CTL3[DCLK90_FWD] = 1 and LMC2_DLL_CTL3[DCLK90_RECAL_DIS] = 1.
+ * Writing LMC2_DLL_CTL3[DCLK90_FWD] = 1 causes LMC 2 to forward
+ * clock-delay information to LMC0. Setting
+ * LMC2_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC2 from periodically
+ * recalibrating this delay information.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL3(2),
+ c.s.dclk90_fwd = 1;
+ c.s.dclk90_recal_dis = 1);
+
+ /*
+ * 5. Without changing any other fields in LMC3_DLL_CTL3, write
+ * LMC3_DLL_CTL3[DCLK90_FWD] = 1 and LMC3_DLL_CTL3[DCLK90_RECAL_DIS] = 1.
+ * Writing LMC3_DLL_CTL3[DCLK90_FWD] = 1 causes LMC3 to forward
+ * clock-delay information to LMC1. Setting
+ * LMC3_DLL_CTL3[DCLK90_RECAL_DIS] to 1 prevents LMC3 from periodically
+ * recalibrating this delay information.
+ */
+
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL3(3),
+ c.s.dclk90_fwd = 1;
+ c.s.dclk90_recal_dis = 1);
+
+ /*
+ * 6. Read LMC3_DLL_CTL3 and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(3));
+ } /* if (ddr_interface_mask == 0xf) */
+
+
+ /* ONE-LMC MODE AFTER STEP 5 - NOTHING */
+
+ /* TWO-LMC MODE AFTER STEP 5 */
+ if (ddr_interface_mask == 0x3) {
+ cn78xx_lmc_dreset_init(node, 1);
+ }
+
+ /* FOUR-LMC MODE AFTER STEP 5 */
+ if (ddr_interface_mask == 0xf) {
+ cn78xx_lmc_dreset_init(node, 0);
+ cn78xx_lmc_dreset_init(node, 1);
+
+ /* Enable periodic recalibration of DDR90 delay line in. */
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL3(0),
+ c.s.dclk90_recal_dis = 0);
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL3(1),
+ c.s.dclk90_recal_dis = 0);
+ }
+
+
+ /* Enable fine tune mode for all LMCs */
+ for (int lmc = 0; lmc<4; ++lmc) {
+ if ((ddr_interface_mask & (1 << lmc)) == 0)
+ continue;
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DLL_CTL3(lmc),
+ c.s.fine_tune_mode = 1);
+ }
+
+ /* Enable the trim circuit on the appropriate channels to
+ adjust the DDR clock duty cycle for chips that support
+ it. */
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
+ bdk_lmcx_phy_ctl_t lmc_phy_ctl;
+ int loop_interface_num;
+
+ for (loop_interface_num = 0; loop_interface_num<4; ++loop_interface_num) {
+ if ((ddr_interface_mask & (1 << loop_interface_num)) == 0)
+ continue;
+
+ lmc_phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(loop_interface_num));
+ lmc_phy_ctl.cn83xx.lv_mode = (~loop_interface_num) & 1; /* Odd LMCs = 0, Even LMCs = 1 */
+
+ ddr_print("LMC%d: PHY_CTL : 0x%016lx\n",
+ loop_interface_num, lmc_phy_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(loop_interface_num), lmc_phy_ctl.u);
+ }
+ }
+
+ } /* Do this once */
+
+ } /* if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX)) */
+
+ set_ddr_clock_initialized(node, ddr_interface_num, 1);
+ return(0);
+}
+void
+perform_lmc_reset(bdk_node_t node, int ddr_interface_num)
+{
+ /*
+ * 6.9.6 LMC RESET Initialization
+ *
+ * The purpose of this step is to assert/deassert the RESET# pin at the
+ * DDR3/DDR4 parts.
+ *
+ * This LMC RESET step is done for all enabled LMCs.
+ *
+ * It may be appropriate to skip this step if the DDR3/DDR4 DRAM parts
+ * are in self refresh and are currently preserving their
+ * contents. (Software can determine this via
+ * LMC(0..3)_RESET_CTL[DDR3PSV] in some circumstances.) The remainder of
+ * this section assumes that the DRAM contents need not be preserved.
+ *
+ * The remainder of this section assumes that the CN78XX DDRn_RESET_L pin
+ * is attached to the RESET# pin of the attached DDR3/DDR4 parts, as will
+ * be appropriate in many systems.
+ *
+ * (In other systems, such as ones that can preserve DDR3/DDR4 part
+ * contents while CN78XX is powered down, it will not be appropriate to
+ * directly attach the CN78XX DDRn_RESET_L pin to DRESET# of the
+ * DDR3/DDR4 parts, and this section may not apply.)
+ *
+ * The remainder of this section describes the sequence for LMCn.
+ *
+ * Perform the following six substeps for LMC reset initialization:
+ *
+ * 1. If not done already, assert DDRn_RESET_L pin by writing
+ * LMC(0..3)_RESET_ CTL[DDR3RST] = 0 without modifying any other
+ * LMC(0..3)_RESET_CTL fields.
+ */
+
+ if ( !ddr_memory_preserved(node)) {
+ /*
+ * 2. Read LMC(0..3)_RESET_CTL and wait for the result.
+ */
+
+ BDK_CSR_READ(node, BDK_LMCX_RESET_CTL(ddr_interface_num));
+
+ /*
+ * 3. Wait until RESET# assertion-time requirement from JEDEC DDR3/DDR4
+ * specification is satisfied (200 us during a power-on ramp, 100ns when
+ * power is already stable).
+ */
+
+ bdk_wait_usec(200);
+
+ /*
+ * 4. Deassert DDRn_RESET_L pin by writing LMC(0..3)_RESET_CTL[DDR3RST] = 1
+ * without modifying any other LMC(0..3)_RESET_CTL fields.
+ * 5. Read LMC(0..3)_RESET_CTL and wait for the result.
+ * 6. Wait a minimum of 500us. This guarantees the necessary T = 500us
+ * delay between DDRn_RESET_L deassertion and DDRn_DIMM*_CKE* assertion.
+ */
+ cn88xx_lmc_ddr3_reset(node, ddr_interface_num, LMC_DDR3_RESET_DEASSERT);
+
+ /* Toggle Reset Again */
+ /* That is, assert, then de-assert, one more time */
+ cn88xx_lmc_ddr3_reset(node, ddr_interface_num, LMC_DDR3_RESET_ASSERT);
+ cn88xx_lmc_ddr3_reset(node, ddr_interface_num, LMC_DDR3_RESET_DEASSERT);
+
+ } /* if ( !ddr_memory_preserved(node)) */
+}
+
+///////////////////////////////////////////////////////////
+// start of DBI switchover
+
+/* first pattern example:
+ GENERAL_PURPOSE0.DATA == 64'h00ff00ff00ff00ff;
+ GENERAL_PURPOSE1.DATA == 64'h00ff00ff00ff00ff;
+ GENERAL_PURPOSE0.DATA == 16'h0000;
+*/
+const uint64_t dbi_pattern[3] = { 0x00ff00ff00ff00ffULL, 0x00ff00ff00ff00ffULL, 0x0000ULL };
+
+// Perform switchover to DBI
+static void dbi_switchover_interface(int node, int lmc)
+{
+ bdk_lmcx_modereg_params0_t modereg_params0;
+ bdk_lmcx_modereg_params3_t modereg_params3;
+ bdk_lmcx_phy_ctl_t phy_ctl;
+ bdk_lmcx_config_t lmcx_config;
+ bdk_lmcx_ddr_pll_ctl_t ddr_pll_ctl;
+ int rank_mask, rankx, active_ranks;
+ uint64_t phys_addr, rank_offset;
+ int num_lmcs, errors;
+ int dbi_settings[9], byte, unlocked, retries;
+ int ecc_ena;
+ int rank_max = 1; // FIXME: make this 4 to try all the ranks
+
+ ddr_pll_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(0));
+
+ lmcx_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ rank_mask = lmcx_config.s.init_status;
+ ecc_ena = lmcx_config.s.ecc_ena;
+
+ // FIXME: must filter out any non-supported configs
+ // ie, no DDR3, no x4 devices, no 81XX
+ if ((ddr_pll_ctl.cn83xx.ddr4_mode == 0) ||
+ (lmcx_config.s.mode_x4dev == 1) ||
+ CAVIUM_IS_MODEL(CAVIUM_CN81XX) )
+ {
+ ddr_print("N%d.LMC%d: DBI switchover: inappropriate device; EXITING...\n",
+ node, lmc);
+ return;
+ }
+
+ // this should be correct for 1 or 2 ranks, 1 or 2 DIMMs
+ num_lmcs = __bdk_dram_get_num_lmc(node);
+ rank_offset = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena + (num_lmcs/2));
+
+ ddr_print("N%d.LMC%d: DBI switchover: rank mask 0x%x, rank size 0x%016llx.\n",
+ node, lmc, rank_mask, (unsigned long long)rank_offset);
+
+ /* 1. conduct the current init sequence as usual all the way
+ after software write leveling.
+ */
+
+ read_DAC_DBI_settings(node, lmc, /*DBI*/0, dbi_settings);
+
+ display_DAC_DBI_settings(node, lmc, /* DBI */0, ecc_ena, dbi_settings, " INIT");
+
+ /* 2. set DBI related CSRs as below and issue MR write.
+ MODEREG_PARAMS3.WR_DBI=1
+ MODEREG_PARAMS3.RD_DBI=1
+ PHY_CTL.DBI_MODE_ENA=1
+ */
+ modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(lmc));
+
+ modereg_params3.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS3(lmc));
+ modereg_params3.s.wr_dbi = 1;
+ modereg_params3.s.rd_dbi = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS3(lmc), modereg_params3.u);
+
+ phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(lmc));
+ phy_ctl.s.dbi_mode_ena = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(lmc), phy_ctl.u);
+
+ /*
+ there are two options for data to send. Lets start with (1) and could move to (2) in the future:
+
+ 1) DBTRAIN_CTL[LFSR_PATTERN_SEL] = 0 (or for older chips where this does not exist)
+ set data directly in these reigsters. this will yield a clk/2 pattern:
+ GENERAL_PURPOSE0.DATA == 64'h00ff00ff00ff00ff;
+ GENERAL_PURPOSE1.DATA == 64'h00ff00ff00ff00ff;
+ GENERAL_PURPOSE0.DATA == 16'h0000;
+ 2) DBTRAIN_CTL[LFSR_PATTERN_SEL] = 1
+ here data comes from the LFSR generating a PRBS pattern
+ CHAR_CTL.EN = 0
+ CHAR_CTL.SEL = 0; // for PRBS
+ CHAR_CTL.DR = 1;
+ CHAR_CTL.PRBS = setup for whatever type of PRBS to send
+ CHAR_CTL.SKEW_ON = 1;
+ */
+ DRAM_CSR_WRITE(node, BDK_LMCX_GENERAL_PURPOSE0(lmc), dbi_pattern[0]);
+ DRAM_CSR_WRITE(node, BDK_LMCX_GENERAL_PURPOSE1(lmc), dbi_pattern[1]);
+ DRAM_CSR_WRITE(node, BDK_LMCX_GENERAL_PURPOSE2(lmc), dbi_pattern[2]);
+
+ /*
+ 3. adjust cas_latency (only necessary if RD_DBI is set).
+ here is my code for doing this:
+
+ if (csr_model.MODEREG_PARAMS3.RD_DBI.value == 1) begin
+ case (csr_model.MODEREG_PARAMS0.CL.value)
+ 0,1,2,3,4: csr_model.MODEREG_PARAMS0.CL.value += 2; // CL 9-13 -> 11-15
+ 5: begin
+ // CL=14, CWL=10,12 gets +2, CLW=11,14 gets +3
+ if((csr_model.MODEREG_PARAMS0.CWL.value==1 || csr_model.MODEREG_PARAMS0.CWL.value==3))
+ csr_model.MODEREG_PARAMS0.CL.value = 7; // 14->16
+ else
+ csr_model.MODEREG_PARAMS0.CL.value = 13; // 14->17
+ end
+ 6: csr_model.MODEREG_PARAMS0.CL.value = 8; // 15->18
+ 7: csr_model.MODEREG_PARAMS0.CL.value = 14; // 16->19
+ 8: csr_model.MODEREG_PARAMS0.CL.value = 15; // 18->21
+ default:
+ `cn_fatal(("Error mem_cfg (%s) CL (%d) with RD_DBI=1, I am not sure what to do.",
+ mem_cfg, csr_model.MODEREG_PARAMS3.RD_DBI.value))
+ endcase
+ end
+ */
+ if (modereg_params3.s.rd_dbi == 1) {
+ int old_cl, new_cl, old_cwl;
+
+ old_cl = modereg_params0.s.cl;
+ old_cwl = modereg_params0.s.cwl;
+
+ switch (old_cl) {
+ case 0: case 1: case 2: case 3: case 4: new_cl = old_cl + 2; break; // 9-13->11-15
+ // CL=14, CWL=10,12 gets +2, CLW=11,14 gets +3
+ case 5: new_cl = ((old_cwl == 1) || (old_cwl == 3)) ? 7 : 13; break;
+ case 6: new_cl = 8; break; // 15->18
+ case 7: new_cl = 14; break; // 16->19
+ case 8: new_cl = 15; break; // 18->21
+ default:
+ error_print("ERROR: Bad CL value (%d) for DBI switchover.\n", old_cl);
+ // FIXME: need to error exit here...
+ old_cl = -1;
+ new_cl = -1;
+ break;
+ }
+ ddr_print("N%d.LMC%d: DBI switchover: CL ADJ: old_cl 0x%x, old_cwl 0x%x, new_cl 0x%x.\n",
+ node, lmc, old_cl, old_cwl, new_cl);
+ modereg_params0.s.cl = new_cl;
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(lmc), modereg_params0.u);
+ }
+
+ /*
+ 4. issue MRW to MR0 (CL) and MR5 (DBI), using LMC sequence SEQ_CTL[SEQ_SEL] = MRW.
+ */
+ // Use the default values, from the CSRs fields
+ // also, do B-sides for RDIMMs...
+
+ for (rankx = 0; rankx < 4; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ // for RDIMMs, B-side writes should get done automatically when the A-side is written
+ ddr4_mrw(node, lmc, rankx, -1/* use_default*/, 0/*MRreg*/, 0 /*A-side*/); /* MR0 */
+ ddr4_mrw(node, lmc, rankx, -1/* use_default*/, 5/*MRreg*/, 0 /*A-side*/); /* MR5 */
+
+ } /* for (rankx = 0; rankx < 4; rankx++) */
+
+ /*
+ 5. conduct DBI bit deskew training via the General Purpose R/W sequence (dbtrain).
+ may need to run this over and over to get a lock (I need up to 5 in simulation):
+ SEQ_CTL[SEQ_SEL] = RW_TRAINING (15)
+ DBTRAIN_CTL.CMD_COUNT_EXT = all 1's
+ DBTRAIN_CTL.READ_CMD_COUNT = all 1's
+ DBTRAIN_CTL.TCCD_SEL = set according to MODEREG_PARAMS3[TCCD_L]
+ DBTRAIN_CTL.RW_TRAIN = 1
+ DBTRAIN_CTL.READ_DQ_COUNT = dont care
+ DBTRAIN_CTL.WRITE_ENA = 1;
+ DBTRAIN_CTL.ACTIVATE = 1;
+ DBTRAIN_CTL LRANK, PRANK, ROW_A, BG, BA, COLUMN_A = set to a valid address
+ */
+
+ // NOW - do the training
+ ddr_print("N%d.LMC%d: DBI switchover: TRAINING begins...\n",
+ node, lmc);
+
+ active_ranks = 0;
+ for (rankx = 0; rankx < rank_max; rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+
+ phys_addr = rank_offset * active_ranks;
+ // FIXME: now done by test_dram_byte_hw()
+ //phys_addr |= (lmc << 7);
+ //phys_addr = bdk_numa_get_address(node, phys_addr); // map to node
+
+ active_ranks++;
+
+ retries = 0;
+
+#if 0
+ phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(lmc));
+ phy_ctl.s.phy_reset = 1; // FIXME: this may reset too much?
+ DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(lmc), phy_ctl.u);
+#endif
+
+restart_training:
+
+ // NOTE: return is a bitmask of the erroring bytelanes - we only print it
+ errors = test_dram_byte_hw(node, lmc, phys_addr, DBTRAIN_DBI, NULL);
+
+ ddr_print("N%d.LMC%d: DBI switchover: TEST: rank %d, phys_addr 0x%lx, errors 0x%x.\n",
+ node, lmc, rankx, phys_addr, errors);
+
+ // NEXT - check for locking
+ unlocked = 0;
+ read_DAC_DBI_settings(node, lmc, /*DBI*/0, dbi_settings);
+
+ for (byte = 0; byte < (8+ecc_ena); byte++) {
+ unlocked += (dbi_settings[byte] & 1) ^ 1;
+ }
+
+ // FIXME: print out the DBI settings array after each rank?
+ if (rank_max > 1) // only when doing more than 1 rank
+ display_DAC_DBI_settings(node, lmc, /* DBI */0, ecc_ena, dbi_settings, " RANK");
+
+ if (unlocked > 0) {
+ ddr_print("N%d.LMC%d: DBI switchover: LOCK: %d still unlocked.\n",
+ node, lmc, unlocked);
+
+ retries++;
+ if (retries < 10) {
+ goto restart_training;
+ } else {
+ ddr_print("N%d.LMC%d: DBI switchover: LOCK: %d retries exhausted.\n",
+ node, lmc, retries);
+ }
+ }
+ } /* for (rankx = 0; rankx < rank_max; rankx++) */
+
+ // print out the final DBI settings array
+ display_DAC_DBI_settings(node, lmc, /* DBI */0, ecc_ena, dbi_settings, "FINAL");
+}
+// end of DBI switchover
+///////////////////////////////////////////////////////////
+
+uint32_t measure_octeon_ddr_clock(bdk_node_t node,
+ const ddr_configuration_t *ddr_configuration,
+ uint32_t cpu_hertz,
+ uint32_t ddr_hertz,
+ uint32_t ddr_ref_hertz,
+ int ddr_interface_num,
+ uint32_t ddr_interface_mask)
+{
+ uint64_t core_clocks;
+ uint64_t ddr_clocks;
+ uint64_t calc_ddr_hertz;
+
+ if (ddr_configuration) {
+ if (initialize_ddr_clock(node,
+ ddr_configuration,
+ cpu_hertz,
+ ddr_hertz,
+ ddr_ref_hertz,
+ ddr_interface_num,
+ ddr_interface_mask) != 0)
+ return 0;
+ }
+
+ /* Dynamically determine the DDR clock speed */
+ core_clocks = bdk_clock_get_count(BDK_CLOCK_TIME);
+ ddr_clocks = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(ddr_interface_num));
+ bdk_wait_usec(100000); /* 100ms */
+ ddr_clocks = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(ddr_interface_num)) - ddr_clocks;
+ core_clocks = bdk_clock_get_count(BDK_CLOCK_TIME) - core_clocks;
+ calc_ddr_hertz = ddr_clocks * bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / core_clocks;
+
+ /* Asim doesn't have a DDR clock, force the measurement to be correct */
+ if (bdk_is_platform(BDK_PLATFORM_ASIM))
+ calc_ddr_hertz = ddr_hertz;
+
+ ddr_print("LMC%d: Measured DDR clock: %lu, cpu clock: %u, ddr clocks: %lu\n",
+ ddr_interface_num, calc_ddr_hertz, cpu_hertz, ddr_clocks);
+
+ /* Check for unreasonable settings. */
+ if (calc_ddr_hertz == 0) {
+ error_print("DDR clock misconfigured. Exiting.\n");
+ exit(1);
+ }
+ return calc_ddr_hertz;
+}
+
+int octeon_ddr_initialize(bdk_node_t node,
+ uint32_t cpu_hertz,
+ uint32_t ddr_hertz,
+ uint32_t ddr_ref_hertz,
+ uint32_t ddr_interface_mask,
+ const ddr_configuration_t *ddr_configuration,
+ uint32_t *measured_ddr_hertz,
+ int board_type,
+ int board_rev_maj,
+ int board_rev_min)
+{
+ uint32_t ddr_config_valid_mask = 0;
+ int memsize_mbytes = 0;
+ const char *s;
+ int retval;
+ int interface_index;
+ uint32_t ddr_max_speed = 1210000000; /* needs to be this high for DDR4 */
+ uint32_t calc_ddr_hertz = -1;
+
+#ifndef OCTEON_SDK_VERSION_STRING
+# define OCTEON_SDK_VERSION_STRING "Development Build"
+#endif
+
+ ddr_print(OCTEON_SDK_VERSION_STRING": $Revision: 102369 $\n");
+
+#ifdef CAVIUM_ONLY
+ /* Override speed restrictions to support internal testing. */
+ ddr_max_speed = 1210000000;
+#endif /* CAVIUM_ONLY */
+
+ if (ddr_hertz > ddr_max_speed) {
+ error_print("DDR clock speed %u exceeds maximum speed supported by "
+ "processor, reducing to %uHz\n",
+ ddr_hertz, ddr_max_speed);
+ ddr_hertz = ddr_max_speed;
+ }
+
+ // Do this earlier so we can return without doing unnecessary things...
+ /* Check for DIMM 0 socket populated for each LMC present */
+ for (interface_index = 0; interface_index < 4; ++interface_index) {
+ if ((ddr_interface_mask & (1 << interface_index)) &&
+ (validate_dimm(node, &ddr_configuration[interface_index].dimm_config_table[0])) == 1)
+ {
+ ddr_config_valid_mask |= (1 << interface_index);
+ }
+ }
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX)) {
+ int four_lmc_mode = 1;
+
+ // Validate that it can only be 2-LMC mode or 4-LMC mode
+ if ((ddr_config_valid_mask != 0x03) && (ddr_config_valid_mask != 0x0f)) {
+ puts("ERROR: Invalid LMC configuration detected.\n");
+ return -1;
+ }
+
+ if ((s = lookup_env_parameter("ddr_four_lmc")) != NULL)
+ four_lmc_mode = !!strtoul(s, NULL, 0);
+
+ if (!four_lmc_mode) {
+ puts("Forcing two-LMC Mode.\n");
+ ddr_config_valid_mask &= ~(3<<2); /* Invalidate LMC[2:3] */
+ }
+ }
+
+ if (!ddr_config_valid_mask) {
+ puts("ERROR: No valid DIMMs detected on any DDR interface.\n");
+ return -1;
+ }
+
+ {
+ /*
+
+ rdf_cnt: Defines the sample point of the LMC response data in
+ the DDR-clock/core-clock crossing. For optimal
+ performance set to 10 * (DDR-clock period/core-clock
+ period) - 1. To disable set to 0. All other values
+ are reserved.
+ */
+
+ uint64_t rdf_cnt;
+ BDK_CSR_INIT(l2c_ctl, node, BDK_L2C_CTL);
+ /* It is more convenient to compute the ratio using clock
+ frequencies rather than clock periods. */
+ rdf_cnt = (((uint64_t) 10 * cpu_hertz) / ddr_hertz) - 1;
+ rdf_cnt = rdf_cnt<256 ? rdf_cnt : 255;
+ l2c_ctl.s.rdf_cnt = rdf_cnt;
+
+ if ((s = lookup_env_parameter("early_fill_count")) != NULL)
+ l2c_ctl.s.rdf_cnt = strtoul(s, NULL, 0);
+
+ ddr_print("%-45s : %d, cpu_hertz:%u, ddr_hertz:%u\n", "EARLY FILL COUNT ",
+ l2c_ctl.s.rdf_cnt, cpu_hertz, ddr_hertz);
+ DRAM_CSR_WRITE(node, BDK_L2C_CTL, l2c_ctl.u);
+ }
+
+ /* Check to see if we should limit the number of L2 ways. */
+ if ((s = lookup_env_parameter("limit_l2_ways")) != NULL) {
+ int ways = strtoul(s, NULL, 10);
+ limit_l2_ways(node, ways, 1);
+ }
+
+ /* We measure the DDR frequency by counting DDR clocks. We can
+ * confirm or adjust the expected frequency as necessary. We use
+ * the measured frequency to make accurate timing calculations
+ * used to configure the controller.
+ */
+ for (interface_index = 0; interface_index < 4; ++interface_index) {
+ uint32_t tmp_hertz;
+
+ if (! (ddr_config_valid_mask & (1 << interface_index)))
+ continue;
+
+ try_again:
+ // if we are LMC0
+ if (interface_index == 0) {
+ // if we are asking for 100 MHz refclk, we can only get it via alternate, so switch to it
+ if (ddr_ref_hertz == 100000000) {
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DDR_PLL_CTL(0), c.s.dclk_alt_refclk_sel = 1);
+ bdk_wait_usec(1000); // wait 1 msec
+ } else {
+ // if we are NOT asking for 100MHz, then reset to (assumed) 50MHz and go on
+ DRAM_CSR_MODIFY(c, node, BDK_LMCX_DDR_PLL_CTL(0), c.s.dclk_alt_refclk_sel = 0);
+ bdk_wait_usec(1000); // wait 1 msec
+ }
+ }
+
+ tmp_hertz = measure_octeon_ddr_clock(node,
+ &ddr_configuration[interface_index],
+ cpu_hertz,
+ ddr_hertz,
+ ddr_ref_hertz,
+ interface_index,
+ ddr_config_valid_mask);
+
+ // if we are LMC0 and we are asked for 100 MHz refclk,
+ // we must be sure it is available
+ // If not, we print an error message, set to 50MHz, and go on...
+ if ((interface_index == 0) && (ddr_ref_hertz == 100000000)) {
+ // validate that the clock returned is close enough to the clock desired
+ // FIXME: is 5% close enough?
+ int hertz_diff = _abs((int)tmp_hertz - (int)ddr_hertz);
+ if (hertz_diff > ((int)ddr_hertz * 5 / 100)) { // nope, diff is greater than than 5%
+ ddr_print("N%d: DRAM init: requested 100 MHz refclk NOT FOUND\n", node);
+ ddr_ref_hertz = bdk_clock_get_rate(node, BDK_CLOCK_MAIN_REF);
+ set_ddr_clock_initialized(node, 0, 0); // clear the flag before trying again!!
+ goto try_again;
+ } else {
+ ddr_print("N%d: DRAM Init: requested 100 MHz refclk FOUND and SELECTED.\n", node);
+ }
+ }
+
+ if (tmp_hertz > 0)
+ calc_ddr_hertz = tmp_hertz;
+
+ } /* for (interface_index = 0; interface_index < 4; ++interface_index) */
+
+ if (measured_ddr_hertz)
+ *measured_ddr_hertz = calc_ddr_hertz;
+
+ memsize_mbytes = 0;
+ for (interface_index = 0; interface_index < 4; ++interface_index) {
+ if (! (ddr_config_valid_mask & (1 << interface_index))) { // if LMC has no DIMMs found
+ if (ddr_interface_mask & (1 << interface_index)) { // but the LMC is present
+ for (int i = 0; i < DDR_CFG_T_MAX_DIMMS; i++) {
+ // check for slot presence
+ if (validate_dimm(node, &ddr_configuration[interface_index].dimm_config_table[i]) == 0)
+ printf("N%d.LMC%d.DIMM%d: Not Present\n", node, interface_index, i);
+ }
+ error_print("N%d.LMC%d Configuration Completed: 0 MB\n", node, interface_index);
+ }
+ continue;
+ }
+
+ retval = init_octeon_dram_interface(node,
+ &ddr_configuration[interface_index],
+ calc_ddr_hertz, /* Configure using measured value */
+ cpu_hertz,
+ ddr_ref_hertz,
+ board_type,
+ board_rev_maj,
+ board_rev_min,
+ interface_index,
+ ddr_config_valid_mask);
+ if (retval > 0)
+ memsize_mbytes += retval;
+ }
+
+ if (memsize_mbytes == 0)
+ /* All interfaces failed to initialize, so return error */
+ return -1;
+
+ // switch over to DBI mode only for chips that support it, and enabled by envvar
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
+ int do_dbi = 0;
+ if ((s = lookup_env_parameter("ddr_dbi_switchover")) != NULL) {
+ do_dbi = !!strtoul(s, NULL, 10);
+ }
+ if (do_dbi) {
+ ddr_print("DBI Switchover starting...\n");
+ for (interface_index = 0; interface_index < 4; ++interface_index) {
+ if (! (ddr_config_valid_mask & (1 << interface_index)))
+ continue;
+ dbi_switchover_interface(node, interface_index);
+ }
+ printf("DBI Switchover finished.\n");
+ }
+ }
+
+ // limit memory size if desired...
+ if ((s = lookup_env_parameter("limit_dram_mbytes")) != NULL) {
+ unsigned int mbytes = strtoul(s, NULL, 10);
+ if (mbytes > 0) {
+ memsize_mbytes = mbytes;
+ printf("Limiting DRAM size to %d MBytes based on limit_dram_mbytes env. variable\n",
+ mbytes);
+ }
+ }
+
+ return memsize_mbytes;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.h b/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.h
new file mode 100644
index 0000000000..b691e5286b
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.h
@@ -0,0 +1,124 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+extern const dimm_odt_config_t disable_odt_config[];
+
+#define rttnom_none 0 /* Rtt_Nom disabled */
+#define rttnom_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
+#define rttnom_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
+#define rttnom_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
+#define rttnom_20ohm 4 /* RZQ/12 = 240/12 = 20 ohms */
+#define rttnom_30ohm 5 /* RZQ/8 = 240/8 = 30 ohms */
+#define rttnom_rsrv1 6 /* Reserved */
+#define rttnom_rsrv2 7 /* Reserved */
+
+#define rttwr_none 0 /* Dynamic ODT off */
+#define rttwr_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
+#define rttwr_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
+#define rttwr_rsrv1 3 /* Reserved */
+
+#define dic_40ohm 0 /* RZQ/6 = 240/6 = 40 ohms */
+#define dic_34ohm 1 /* RZQ/7 = 240/7 = 34 ohms */
+
+#define driver_24_ohm 1
+#define driver_27_ohm 2
+#define driver_30_ohm 3
+#define driver_34_ohm 4
+#define driver_40_ohm 5
+#define driver_48_ohm 6
+#define driver_60_ohm 7
+
+#define rodt_ctl_none 0
+#define rodt_ctl_20_ohm 1
+#define rodt_ctl_30_ohm 2
+#define rodt_ctl_40_ohm 3
+#define rodt_ctl_60_ohm 4
+#define rodt_ctl_120_ohm 5
+
+#define ddr4_rttnom_none 0 /* Rtt_Nom disabled */
+#define ddr4_rttnom_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
+#define ddr4_rttnom_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
+#define ddr4_rttnom_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
+#define ddr4_rttnom_240ohm 4 /* RZQ/1 = 240/1 = 240 ohms */
+#define ddr4_rttnom_48ohm 5 /* RZQ/5 = 240/5 = 48 ohms */
+#define ddr4_rttnom_80ohm 6 /* RZQ/3 = 240/3 = 80 ohms */
+#define ddr4_rttnom_34ohm 7 /* RZQ/7 = 240/7 = 34 ohms */
+
+#define ddr4_rttwr_none 0 /* Dynamic ODT off */
+#define ddr4_rttwr_120ohm 1 /* RZQ/2 = 240/2 = 120 ohms */
+#define ddr4_rttwr_240ohm 2 /* RZQ/1 = 240/1 = 240 ohms */
+#define ddr4_rttwr_HiZ 3 /* HiZ */
+/* This setting will be available for cn78xx cn88xx pass 2 and cn73xx
+ pass 1. It is disabled for now. */
+//#define ddr4_rttwr_80ohm 4 /* RZQ/3 = 240/3 = 80 ohms */
+
+#define ddr4_dic_34ohm 0 /* RZQ/7 = 240/7 = 34 ohms */
+#define ddr4_dic_48ohm 1 /* RZQ/5 = 240/5 = 48 ohms */
+
+#define ddr4_rttpark_none 0 /* Rtt_Park disabled */
+#define ddr4_rttpark_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
+#define ddr4_rttpark_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
+#define ddr4_rttpark_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
+#define ddr4_rttpark_240ohm 4 /* RZQ/1 = 240/1 = 240 ohms */
+#define ddr4_rttpark_48ohm 5 /* RZQ/5 = 240/5 = 48 ohms */
+#define ddr4_rttpark_80ohm 6 /* RZQ/3 = 240/3 = 80 ohms */
+#define ddr4_rttpark_34ohm 7 /* RZQ/7 = 240/7 = 34 ohms */
+
+#define ddr4_driver_26_ohm 2
+#define ddr4_driver_30_ohm 3
+#define ddr4_driver_34_ohm 4
+#define ddr4_driver_40_ohm 5
+#define ddr4_driver_48_ohm 6
+
+#define ddr4_dqx_driver_24_ohm 1
+#define ddr4_dqx_driver_27_ohm 2
+#define ddr4_dqx_driver_30_ohm 3
+#define ddr4_dqx_driver_34_ohm 4
+#define ddr4_dqx_driver_40_ohm 5
+#define ddr4_dqx_driver_48_ohm 6
+#define ddr4_dqx_driver_60_ohm 7
+
+#define ddr4_rodt_ctl_none 0
+#define ddr4_rodt_ctl_40_ohm 1
+#define ddr4_rodt_ctl_60_ohm 2
+#define ddr4_rodt_ctl_80_ohm 3
+#define ddr4_rodt_ctl_120_ohm 4
+#define ddr4_rodt_ctl_240_ohm 5
+#define ddr4_rodt_ctl_34_ohm 6
+#define ddr4_rodt_ctl_48_ohm 7
diff --git a/src/vendorcode/cavium/bdk/libdram/libdram-config-load.c b/src/vendorcode/cavium/bdk/libdram/libdram-config-load.c
new file mode 100644
index 0000000000..5173290187
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/libdram-config-load.c
@@ -0,0 +1,262 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+
+/**
+ * Load a "odt_*rank_config" structure
+ *
+ * @param cfg Config to fill
+ * @param ranks Number of ranks we're loading (1,2,4)
+ * @param node Node we're loading for
+ * @param dimm Which DIMM this is for
+ * @param lmc Which LMC this is for
+ */
+static void load_rank_data(dram_config_t *cfg, int ranks, int num_dimms, int lmc, bdk_node_t node)
+{
+ /* Get a pointer to the structure we are filling */
+ dimm_odt_config_t *c;
+ switch (ranks)
+ {
+ case 1:
+ c = &cfg->config[lmc].odt_1rank_config[num_dimms - 1];
+ break;
+ case 2:
+ c = &cfg->config[lmc].odt_2rank_config[num_dimms - 1];
+ break;
+ case 4:
+ c = &cfg->config[lmc].odt_4rank_config[num_dimms - 1];
+ break;
+ default:
+ bdk_fatal("Unexpected number of ranks\n");
+ break;
+ }
+
+ /* Fill the global items */
+ c->odt_ena = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_DQX_CTL, ranks, num_dimms, lmc, node);
+ c->odt_mask = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_WODT_MASK, ranks, num_dimms, lmc, node);
+
+ /* Fill the per rank items */
+ int rank = 0;
+ c->odt_mask1.s.pasr_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_PASR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.asr_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_ASR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.srt_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_SRT, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_wr_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_wr_00_ext = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, ranks, num_dimms, rank, lmc, node) >> 2;
+ c->odt_mask1.s.dic_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_DIC, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_nom_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_NOM, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.db_output_impedance = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_DB_OUTPUT_IMPEDANCE, ranks, num_dimms, lmc, node);
+ rank = 1;
+ c->odt_mask1.s.pasr_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_PASR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.asr_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_ASR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.srt_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_SRT, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_wr_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_wr_01_ext = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, ranks, num_dimms, rank, lmc, node) >> 2;
+ c->odt_mask1.s.dic_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_DIC, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_nom_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_NOM, ranks, num_dimms, rank, lmc, node);
+ rank = 2;
+ c->odt_mask1.s.pasr_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_PASR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.asr_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_ASR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.srt_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_SRT, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_wr_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_wr_10_ext = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, ranks, num_dimms, rank, lmc, node) >> 2;
+ c->odt_mask1.s.dic_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_DIC, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_nom_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_NOM, ranks, num_dimms, rank, lmc, node);
+ rank = 3;
+ c->odt_mask1.s.pasr_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_PASR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.asr_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_ASR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.srt_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_SRT, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_wr_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_wr_11_ext = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, ranks, num_dimms, rank, lmc, node) >> 2;
+ c->odt_mask1.s.dic_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_DIC, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask1.s.rtt_nom_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE1_RTT_NOM, ranks, num_dimms, rank, lmc, node);
+ rank = 0;
+ c->odt_mask2.s.rtt_park_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_RTT_PARK, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vref_value_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREF_VALUE, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vref_range_00 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREF_RANGE, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vrefdq_train_en = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREFDQ_TRAIN_EN, ranks, num_dimms, lmc, node);
+ rank = 1;
+ c->odt_mask2.s.rtt_park_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_RTT_PARK, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vref_value_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREF_VALUE, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vref_range_01 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREF_RANGE, ranks, num_dimms, rank, lmc, node);
+ rank = 2;
+ c->odt_mask2.s.rtt_park_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_RTT_PARK, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vref_value_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREF_VALUE, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vref_range_10 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREF_RANGE, ranks, num_dimms, rank, lmc, node);
+ rank = 3;
+ c->odt_mask2.s.rtt_park_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_RTT_PARK, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vref_value_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREF_VALUE, ranks, num_dimms, rank, lmc, node);
+ c->odt_mask2.s.vref_range_11 = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_MODE2_VREF_RANGE, ranks, num_dimms, rank, lmc, node);
+
+ /* Fill more global items */
+ c->qs_dic = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_RODT_CTL, ranks, num_dimms, lmc, node);
+ c->rodt_ctl = bdk_config_get_int(BDK_CONFIG_DDR_RANKS_RODT_MASK, ranks, num_dimms, lmc, node);
+}
+
+/**
+ * Load a DRAM configuration based on the current bdk-config settings
+ *
+ * @param node Node the DRAM config is for
+ *
+ * @return Pointer to __libdram_global_cfg, a global structure. Returns NULL if bdk-config
+ * lacks information about DRAM.
+ */
+const dram_config_t *libdram_config_load(bdk_node_t node)
+{
+ dram_config_t *cfg = &__libdram_global_cfg;
+ const int MAX_LMCS = sizeof(cfg->config) / sizeof(cfg->config[0]);
+
+ /* Make all fields for the node default to zero */
+ memset(cfg, 0, sizeof(*cfg));
+
+ /* Fill the SPD data first as some parameters need to know the DRAM type
+ to lookup the correct values */
+ for (int lmc = 0; lmc < MAX_LMCS; lmc++)
+ {
+ for (int dimm = 0; dimm < DDR_CFG_T_MAX_DIMMS; dimm++)
+ {
+ int spd_addr = bdk_config_get_int(BDK_CONFIG_DDR_SPD_ADDR, dimm, lmc, node);
+ if (spd_addr)
+ {
+ cfg->config[lmc].dimm_config_table[dimm].spd_addr = spd_addr;
+ }
+ else
+ {
+ int spd_size;
+ const void *spd_data = bdk_config_get_blob(&spd_size, BDK_CONFIG_DDR_SPD_DATA, dimm, lmc, node);
+ if (spd_data && spd_size)
+ cfg->config[lmc].dimm_config_table[dimm].spd_ptr = spd_data;
+ }
+ }
+ }
+
+ /* Check that we know how to get DIMM inofmration. If not, return failure */
+ if (!cfg->config[0].dimm_config_table[0].spd_addr && !cfg->config[0].dimm_config_table[0].spd_ptr)
+ return NULL;
+
+ cfg->name = "Loaded from bdk-config";
+ for (int lmc = 0; lmc < MAX_LMCS; lmc++)
+ {
+ for (int num_dimms = 1; num_dimms <= DDR_CFG_T_MAX_DIMMS; num_dimms++)
+ {
+ load_rank_data(cfg, 1, num_dimms, lmc, node);
+ load_rank_data(cfg, 2, num_dimms, lmc, node);
+ load_rank_data(cfg, 4, num_dimms, lmc, node);
+ }
+
+ ddr_configuration_t *c = &cfg->config[lmc];
+ ddr3_custom_config_t *custom = &c->custom_lmc_config;
+ custom->min_rtt_nom_idx = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_MIN_RTT_NOM_IDX, lmc, node);
+ custom->max_rtt_nom_idx = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_MAX_RTT_NOM_IDX, lmc, node);
+ custom->min_rodt_ctl = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_MIN_RODT_CTL, lmc, node);
+ custom->max_rodt_ctl = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_MAX_RODT_CTL, lmc, node);
+ custom->ck_ctl = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_CK_CTL, lmc, node);
+ custom->cmd_ctl = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_CMD_CTL, lmc, node);
+ custom->ctl_ctl = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_CTL_CTL, lmc, node);
+ custom->min_cas_latency = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_MIN_CAS_LATENCY, lmc, node);
+ custom->offset_en = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_OFFSET_EN, lmc, node);
+ custom->offset_udimm = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_OFFSET, "UDIMM", lmc, node);
+ custom->offset_rdimm = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_OFFSET, "RDIMM", lmc, node);
+ custom->rlevel_compute = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_RLEVEL_COMPUTE, lmc, node);
+ custom->rlevel_comp_offset_udimm = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_RLEVEL_COMP_OFFSET, "UDIMM", lmc, node);
+ custom->rlevel_comp_offset_rdimm = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_RLEVEL_COMP_OFFSET, "RDIMM", lmc, node);
+ custom->ddr2t_udimm = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_DDR2T, "UDIMM", lmc, node);
+ custom->ddr2t_rdimm = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_DDR2T, "RDIMM", lmc, node);
+ custom->disable_sequential_delay_check = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_DISABLE_SEQUENTIAL_DELAY_CHECK, lmc, node);
+ custom->maximum_adjacent_rlevel_delay_increment
+ = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_MAXIMUM_ADJACENT_RLEVEL_DELAY_INCREMENT, lmc, node);
+ custom->parity = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_PARITY, lmc, node);
+ custom->fprch2 = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_FPRCH2, lmc, node);
+ custom->mode32b = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_MODE32B, lmc, node);
+ custom->measured_vref = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_MEASURED_VREF, lmc, node);
+
+ /* CN80XX only supports 32bit mode */
+ if (cavium_is_altpkg(CAVIUM_CN81XX))
+ custom->mode32b = 1;
+
+ /* Loop through 8 bytes, plus ecc byte */
+ #define NUM_BYTES 9 /* Max bytes on LMC (8 plus ECC) */
+ static int8_t dll_write_offset[NUM_BYTES];
+ static int8_t dll_read_offset[NUM_BYTES];
+ for (int b = 0; b < NUM_BYTES; b++)
+ {
+ dll_write_offset[b] = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_DLL_WRITE_OFFSET, b, lmc, node);
+ dll_read_offset[b] = bdk_config_get_int(BDK_CONFIG_DDR_CUSTOM_DLL_READ_OFFSET, b, lmc, node);
+ }
+ custom->dll_write_offset = dll_write_offset;
+ custom->dll_read_offset = dll_read_offset;
+ }
+
+ int is_ddr4 = (cfg->config[0].odt_1rank_config[0].odt_mask2.u != 0);
+ int speed = bdk_config_get_int(BDK_CONFIG_DDR_SPEED, node);
+ switch (speed)
+ {
+ case 0: // AUTO
+ cfg->ddr_clock_hertz = 0;
+ break;
+ case 800:
+ case 1600:
+ case 2400:
+ cfg->ddr_clock_hertz = (uint64_t)speed * 1000000 / 2;
+ break;
+ case 666:
+ cfg->ddr_clock_hertz = 333333333;
+ break;
+ case 1066:
+ cfg->ddr_clock_hertz = 533333333;
+ break;
+ case 1333:
+ cfg->ddr_clock_hertz = 666666666;
+ break;
+ case 1866:
+ if (is_ddr4)
+ cfg->ddr_clock_hertz = 940000000;
+ else
+ cfg->ddr_clock_hertz = 933333333;
+ break;
+ case 2133:
+ cfg->ddr_clock_hertz = 1050000000;
+ break;
+ default:
+ bdk_warn("Unsupported DRAM speed of %d MT/s\n", speed);
+ cfg->ddr_clock_hertz = speed * 1000000 / 2;
+ break;
+ }
+
+ return cfg;
+};
diff --git a/src/vendorcode/cavium/bdk/libdram/libdram.c b/src/vendorcode/cavium/bdk/libdram/libdram.c
new file mode 100644
index 0000000000..b19486694c
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libdram/libdram.c
@@ -0,0 +1,718 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-mio_fus.h"
+#include "dram-internal.h"
+
+/* This global variable is accessed through dram_is_verbose() to determine
+ the verbosity level. Use that function instead of setting it directly */
+dram_verbosity_t dram_verbosity = VBL_OFF; /* init this here so we could set a non-zero default */
+
+static uint32_t measured_ddr_hertz[BDK_NUMA_MAX_NODES];
+
+/* The various DRAM configs in the libdram/configs directory need space
+ to store the DRAM config. Since only one config is ever in active use
+ at a time, store the configs in __libdram_global_cfg. In a multi-node
+ setup, independent calls to get the DRAM config will load first node 0's
+ config, then node 1's */
+dram_config_t __libdram_global_cfg;
+
+static void bdk_dram_clear_mem(bdk_node_t node)
+{
+ if (!bdk_is_platform(BDK_PLATFORM_ASIM)) {
+ uint64_t mbytes = bdk_dram_get_size_mbytes(node);
+ uint64_t skip = (node == bdk_numa_master()) ? bdk_dram_get_top_of_bdk() : 0;
+ uint64_t len = (mbytes << 20) - skip;
+
+ BDK_TRACE(DRAM, "N%d: Clearing DRAM\n", node);
+ if (skip)
+ {
+ /* All memory below skip may contain valid data, so we can't clear
+ it. We still need to make sure all cache lines in this area are
+ fully dirty so that ECC bits will be updated on store. A single
+ write to the cache line isn't good enough because partial LMC
+ writes may be enabled */
+ ddr_print("N%d: Rewriting DRAM: start 0 length 0x%lx\n", node, skip);
+ volatile uint64_t *ptr = bdk_phys_to_ptr(bdk_numa_get_address(node, 8));
+ /* The above pointer got address 8 to avoid NULL pointer checking
+ in bdk_phys_to_ptr(). Correct it here */
+ ptr--;
+ uint64_t *end = bdk_phys_to_ptr(bdk_numa_get_address(node, skip));
+ while (ptr < end)
+ {
+ *ptr = *ptr;
+ ptr++;
+ }
+ }
+ ddr_print("N%d: Clearing DRAM: start 0x%lx length 0x%lx\n", node, skip, len);
+ bdk_zero_memory(bdk_phys_to_ptr(bdk_numa_get_address(node, skip)), len);
+ BDK_TRACE(DRAM, "N%d: DRAM clear complete\n", node);
+ }
+}
+
+static void bdk_dram_clear_ecc(bdk_node_t node)
+{
+ /* Clear any DRAM errors set during init */
+ BDK_TRACE(DRAM, "N%d: Clearing LMC ECC errors\n", node);
+ int num_lmc = __bdk_dram_get_num_lmc(node);
+ for (int lmc = 0; lmc < num_lmc; lmc++) {
+ DRAM_CSR_WRITE(node, BDK_LMCX_INT(lmc), BDK_CSR_READ(node, BDK_LMCX_INT(lmc)));
+ }
+}
+
+static void bdk_dram_enable_ecc_reporting(bdk_node_t node)
+{
+ /* Enable LMC ECC error HW reporting */
+ int num_lmc = __bdk_dram_get_num_lmc(node);
+
+ BDK_TRACE(DRAM, "N%d: Enable LMC ECC error reporting\n", node);
+
+ for (int lmc = 0; lmc < num_lmc; lmc++) {
+
+ // NOTE: this must be done for pass 2.x
+ // enable ECC interrupts to allow ECC error info in LMCX_INT
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
+ DRAM_CSR_WRITE(node, BDK_LMCX_INT_ENA_W1S(lmc), -1ULL);
+ BDK_CSR_INIT(lmc_int_ena_w1s, node, BDK_LMCX_INT_ENA_W1S(lmc));
+ ddr_print("N%d.LMC%d: %-36s : 0x%08lx\n",
+ node, lmc, "LMC_INT_ENA_W1S", lmc_int_ena_w1s.u);
+ }
+ }
+}
+
+static void bdk_dram_disable_ecc_reporting(bdk_node_t node)
+{
+ /* Disable LMC ECC error HW reporting */
+ int num_lmc = __bdk_dram_get_num_lmc(node);
+
+ BDK_TRACE(DRAM, "N%d: Disable LMC ECC error reporting\n", node);
+
+ for (int lmc = 0; lmc < num_lmc; lmc++) {
+
+ // NOTE: this must be done for pass 2.x
+ // disable ECC interrupts to prevent ECC error info in LMCX_INT
+ if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
+ DRAM_CSR_WRITE(node, BDK_LMCX_INT_ENA_W1C(lmc), -1ULL);
+ BDK_CSR_INIT(lmc_int_ena_w1c, node, BDK_LMCX_INT_ENA_W1C(lmc));
+ ddr_print("N%d.LMC%d: %-36s : 0x%08lx\n",
+ node, lmc, "LMC_INT_ENA_W1C", lmc_int_ena_w1c.u);
+ }
+ }
+}
+
+// this routine simply makes the calls to the tuning routines and returns any errors
+static int bdk_libdram_tune_node(int node)
+{
+ int errs, tot_errs;
+ int do_dllro_hw = 0; // default to NO
+ int do_dllwo = 0; // default to NO
+ int do_eccdll = 0; // default to NO
+ const char *str;
+ BDK_CSR_INIT(lmc_config, node, BDK_LMCX_CONFIG(0)); // FIXME: probe LMC0
+ do_eccdll = (lmc_config.s.ecc_ena != 0); // change to ON if ECC enabled
+
+ // FIXME!!! make 81xx always use HW-assist tuning
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ do_dllro_hw = 1;
+
+ // Automatically tune the data byte DLL read offsets
+ // always done by default, but allow use of HW-assist
+ // NOTE: HW-assist will also tune the ECC byte
+ str = getenv("ddr_tune_hw_offsets");
+ if (str)
+ do_dllro_hw = !!strtoul(str, NULL, 0);
+ BDK_TRACE(DRAM, "N%d: Starting DLL Read Offset Tuning for LMCs\n", node);
+ if (!do_dllro_hw || (lmc_config.s.mode32b != 0)) {
+ errs = perform_dll_offset_tuning(node, 2, /* tune */1);
+ } else {
+ errs = perform_HW_dll_offset_tuning(node, /* read */2, 0x0A/* all bytelanes */);
+ }
+ BDK_TRACE(DRAM, "N%d: Finished DLL Read Offset Tuning for LMCs, %d errors)\n",
+ node, errs);
+ tot_errs = errs;
+
+ // disabled by default for now, does not seem to be needed?
+ // Automatically tune the data byte DLL write offsets
+ // allow override of default setting
+ str = getenv("ddr_tune_write_offsets");
+ if (str)
+ do_dllwo = !!strtoul(str, NULL, 0);
+ if (do_dllwo) {
+ BDK_TRACE(DRAM, "N%d: Starting DLL Write Offset Tuning for LMCs\n", node);
+ errs = perform_dll_offset_tuning(node, /* write */1, /* tune */1);
+ BDK_TRACE(DRAM, "N%d: Finished DLL Write Offset Tuning for LMCs, %d errors)\n",
+ node, errs);
+ tot_errs += errs;
+ }
+
+ // disabled by default for now, does not seem to be needed much?
+ // Automatically tune the ECC byte DLL read offsets
+ // FIXME? allow override of the filtering
+ // FIXME? allow programmatic override, not via envvar?
+ str = getenv("ddr_tune_ecc_enable");
+ if (str)
+ do_eccdll = !!strtoul(str, NULL, 10);
+ if (do_eccdll && !do_dllro_hw && (lmc_config.s.mode32b == 0)) { // do not do HW-assist twice for ECC
+ BDK_TRACE(DRAM, "N%d: Starting ECC DLL Read Offset Tuning for LMCs\n", node);
+ errs = perform_HW_dll_offset_tuning(node, 2, 8/* ECC bytelane */);
+ BDK_TRACE(DRAM, "N%d: Finished ECC DLL Read Offset Tuning for LMCs, %d errors\n",
+ node, errs);
+ tot_errs += errs;
+ }
+
+ return tot_errs;
+}
+
+// this routine makes the calls to the tuning routines when criteria are met
+// intended to be called for automated tuning, to apply filtering...
+
+#define IS_DDR4 1
+#define IS_DDR3 0
+#define IS_RDIMM 1
+#define IS_UDIMM 0
+#define IS_1SLOT 1
+#define IS_2SLOT 0
+
+// FIXME: DDR3 is not tuned
+static const uint32_t ddr_speed_filter[2][2][2] = {
+ [IS_DDR4] = {
+ [IS_RDIMM] = {
+ [IS_1SLOT] = 940,
+ [IS_2SLOT] = 800
+ },
+ [IS_UDIMM] = {
+ [IS_1SLOT] = 1050,
+ [IS_2SLOT] = 940
+ },
+ },
+ [IS_DDR3] = {
+ [IS_RDIMM] = {
+ [IS_1SLOT] = 0, // disabled
+ [IS_2SLOT] = 0 // disabled
+ },
+ [IS_UDIMM] = {
+ [IS_1SLOT] = 0, // disabled
+ [IS_2SLOT] = 0 // disabled
+ }
+ }
+};
+
+static int bdk_libdram_maybe_tune_node(int node)
+{
+ const char *str;
+
+ // FIXME: allow an override here so that all configs can be tuned or none
+ // If the envvar is defined, always either force it or avoid it accordingly
+ if ((str = getenv("ddr_tune_all_configs")) != NULL) {
+ int tune_it = !!strtoul(str, NULL, 0);
+ printf("N%d: DRAM auto-tuning %s.\n", node, (tune_it) ? "forced" : "avoided");
+ return (tune_it) ? bdk_libdram_tune_node(node) : 0;
+ }
+
+ // filter the tuning calls here...
+ // determine if we should/can run automatically for this configuration
+ //
+ // FIXME: tune only when the configuration indicates it will help:
+ // DDR type, RDIMM or UDIMM, 1-slot or 2-slot, and speed
+ //
+ uint32_t ddr_speed = divide_nint(libdram_get_freq_from_pll(node, 0), 1000000); // sample LMC0
+ BDK_CSR_INIT(lmc_config, node, BDK_LMCX_CONFIG(0)); // sample LMC0
+
+ int is_ddr4 = !!__bdk_dram_is_ddr4(node, 0);
+ int is_rdimm = !!__bdk_dram_is_rdimm(node, 0);
+ int is_1slot = !!(lmc_config.s.init_status < 4); // HACK, should do better
+ int do_tune = 0;
+
+ uint32_t ddr_min_speed = ddr_speed_filter[is_ddr4][is_rdimm][is_1slot];
+ do_tune = (ddr_min_speed && (ddr_speed > ddr_min_speed));
+
+ ddr_print("N%d: DDR%d %cDIMM %d-slot at %d MHz %s eligible for auto-tuning.\n",
+ node, (is_ddr4)?4:3, (is_rdimm)?'R':'U', (is_1slot)?1:2,
+ ddr_speed, (do_tune)?"is":"is not");
+
+ // call the tuning routines, done filtering...
+ return ((do_tune) ? bdk_libdram_tune_node(node) : 0);
+}
+
+/**
+ * This is the main DRAM init function. Users of libdram should call this function,
+ * avoiding the other internal function. As a rule, functions starting with
+ * "libdram_*" are part of the external API and should be used.
+ *
+ * @param node Node to initialize. This may not be the same node as the one running the code
+ * @param dram_config
+ * DRAM configuration to use
+ * @param ddr_clock_override
+ * If non-zeo, this overrides the DRAM clock speed in the config structure. This
+ * allows quickly testing of different DRAM speeds without modifying the basic
+ * config. If zero, the DRAM speed in the config is used.
+ *
+ * @return Amount of memory in MB. Zero or negative is a failure.
+ */
+int libdram_config(int node, const dram_config_t *dram_config, int ddr_clock_override)
+{
+ if (bdk_is_platform(BDK_PLATFORM_ASIM))
+ return bdk_dram_get_size_mbytes(node);
+
+ /* Boards may need to mux the TWSI connection between THUNDERX and the BMC.
+ This allows the BMC to monitor DIMM temeratures and health */
+ int gpio_select = bdk_config_get_int(BDK_CONFIG_DRAM_CONFIG_GPIO);
+ if (gpio_select != -1)
+ bdk_gpio_initialize(bdk_numa_master(), gpio_select, 1, 1);
+
+ /* Read all the SPDs and store them in the device tree. They are needed by
+ later software to populate SMBIOS information */
+ for (int lmc = 0; lmc < 4; lmc++)
+ for (int dimm = 0; dimm < DDR_CFG_T_MAX_DIMMS; dimm++)
+ read_entire_spd(node, (dram_config_t *)dram_config, lmc, dimm);
+
+ const ddr_configuration_t *ddr_config = dram_config->config;
+ int ddr_clock_hertz = (ddr_clock_override) ? ddr_clock_override : dram_config->ddr_clock_hertz;
+ if (ddr_clock_hertz == 0) // 0 == AUTO
+ {
+ ddr_clock_hertz = dram_get_default_spd_speed(node, ddr_config);
+ if (ddr_clock_hertz < 0) {
+ printf("N%d: DRAM init: AUTO clock ILLEGAL configuration\n", node);
+ return -1;
+ }
+ }
+ int errs;
+
+ // At this point, we only know the desired clock rate (ddr_clock_hertz).
+ // We do not know whether we are configuring RDIMMs.
+ // We also do not yet know if 100MHz alternate refclk is actually available.
+ // so, if we are being asked for 2133MT/s or better, we still need to do:
+ // 1. probe for RDIMMs (if not, 50MHz refclk is good enough)
+ // 2. determine if 100MHz refclk is there, and switch to it before starting any configuration
+ //
+ // NOTES:
+ // 1. dclk_alt_refclk_sel need only be set on LMC0 (see above disabled code)
+ // 2. I think we need to first probe to see if we need it, and configure it then if dictated use
+ // 3. then go on to configure at the selected refclk
+ int ddr_refclk_hertz = bdk_clock_get_rate(node, BDK_CLOCK_MAIN_REF);
+ int alt_refclk = bdk_config_get_int(BDK_CONFIG_DDR_ALT_REFCLK, node);
+
+ char *str = getenv("ddr_100mhz_refclk");
+ if (str) { // if the envvar was found, force it to that setting
+ int do_100mhz = !!strtoul(str, NULL, 0);
+ alt_refclk = (do_100mhz) ? 100 : 50;
+ }
+
+ dram_verbosity = bdk_config_get_int(BDK_CONFIG_DRAM_VERBOSE);
+
+ // Here we check for fuses that limit the number of LMCs we can configure,
+ // but only on 83XX and 88XX...
+ int lmc_limit = 4;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) || CAVIUM_IS_MODEL(CAVIUM_CN83XX)) {
+ BDK_CSR_INIT(mio_fus_dat2, node, BDK_MIO_FUS_DAT2);
+ if (mio_fus_dat2.s.lmc_half) {
+ lmc_limit = (CAVIUM_IS_MODEL(CAVIUM_CN88XX)) ? 2 : 1; // limit LMCs to half present
+ error_print("Only %d LMC(s)s supported for this Thunder model\n", lmc_limit);
+ }
+ }
+
+ /* We need to calculate the interface mask based on the provided SPD
+ addresses/contents */
+ uint32_t interface_mask = 0;
+ for (int i = 0; i < lmc_limit; i++)
+ {
+ // We need to check only DIMM 0 of each LMC for possible presence of the LMC.
+ // This trusts that the board database is correctly configured.
+ // Empty DIMM slots in present LMCs will be detected later.
+ if (ddr_config[i].dimm_config_table[0].spd_addr ||
+ ddr_config[i].dimm_config_table[0].spd_ptr)
+ interface_mask |= 1 << i;
+
+ // we know whether alternate refclk is always wanted
+ // we also know already if we want 2133 MT/s
+ // if alt refclk not always wanted, then probe DDR and DIMM type
+ // if DDR4 and RDIMMs, then set desired refclk to 100MHz, otherwise to default (50MHz)
+ // depend on ddr_initialize() to do the refclk selection and validation
+ if (i == 0) { // only check for LMC0
+ if (alt_refclk) { // if alternate refclk was specified, let it override everything
+ ddr_refclk_hertz = alt_refclk * 1000000;
+ ddr_print("N%d: DRAM init: %d MHz refclk is REQUESTED ALWAYS\n", node, alt_refclk);
+ } else if (ddr_clock_hertz > 1000000000) { // if more than 2000 MT/s
+ int ddr_type = get_ddr_type(node, &ddr_config[0].dimm_config_table[0]);
+ int spd_dimm_type = get_dimm_module_type(node, &ddr_config[0].dimm_config_table[0], ddr_type);
+ // is DDR4 and RDIMM just to be sure
+ if ((ddr_type == DDR4_DRAM) &&
+ ((spd_dimm_type == 1) || (spd_dimm_type == 5) || (spd_dimm_type == 8))) {
+ ddr_refclk_hertz = 100000000; // yes, we require 100MHz refclk, so set it
+ ddr_print("N%d: DRAM init: 100 MHz refclk is REQUIRED\n", node);
+ }
+ } // if (ddr_clock_hertz > 1000000000)
+ } // if (i == 0)
+ }
+
+ BDK_TRACE(DRAM, "N%d: DRAM init started (hertz=%d, refclk=%d, config=%p)\n",
+ node, ddr_clock_hertz, ddr_refclk_hertz, dram_config);
+ debug_print("N%d: DRAM init started (hertz=%d, refclk=%d, config=%p)\n",
+ node, ddr_clock_hertz, ddr_refclk_hertz, dram_config);
+
+ BDK_TRACE(DRAM, "N%d: Calling DRAM init\n", node);
+ measured_ddr_hertz[node] = 0;
+ int mbytes = octeon_ddr_initialize(node,
+ bdk_clock_get_rate(node, BDK_CLOCK_RCLK),
+ ddr_clock_hertz,
+ ddr_refclk_hertz,
+ interface_mask,
+ ddr_config,
+ &measured_ddr_hertz[node],
+ 0,
+ 0,
+ 0);
+ BDK_TRACE(DRAM, "N%d: DRAM init returned %d, measured %u Hz\n",
+ node, mbytes, measured_ddr_hertz[node]);
+
+ // do not tune or mess with memory if there was an init problem...
+ if (mbytes > 0) {
+
+ bdk_dram_disable_ecc_reporting(node);
+
+ // call the tuning routines, with filtering...
+ BDK_TRACE(DRAM, "N%d: Calling DRAM tuning\n", node);
+ errs = bdk_libdram_maybe_tune_node(node);
+ BDK_TRACE(DRAM, "N%d: DRAM tuning returned %d errors\n",
+ node, errs);
+
+ // finally, clear memory and any left-over ECC errors
+ bdk_dram_clear_mem(node);
+ bdk_dram_clear_ecc(node);
+
+ bdk_dram_enable_ecc_reporting(node);
+ }
+
+ /* Boards may need to mux the TWSI connection between THUNDERX and the BMC.
+ This allows the BMC to monitor DIMM temeratures and health */
+ if (gpio_select != -1)
+ bdk_gpio_initialize(bdk_numa_master(), gpio_select, 1, 0);
+
+ return mbytes;
+}
+
+/**
+ * This is the main DRAM tuning function. Users of libdram should call this function,
+ * avoiding the other internal function. As a rule, functions starting with
+ * "libdram_*" are part of the external API and should be used.
+ *
+ * @param node Node to tune. This may not be the same node as the one running the code
+ *
+ * @return Success or Fail
+ */
+int libdram_tune(int node)
+{
+ int tot_errs;
+ int l2c_is_locked = bdk_l2c_is_locked(node);
+
+ dram_verbosity = bdk_config_get_int(BDK_CONFIG_DRAM_VERBOSE);
+
+ // the only way this entry point should be called is from a MENU item,
+ // so, enable any non-running cores on this node, and leave them
+ // running at the end...
+ ddr_print("N%d: %s: Starting cores (mask was 0x%lx)\n",
+ node, __FUNCTION__, bdk_get_running_coremask(node));
+ bdk_init_cores(node, ~0ULL);
+
+ // must test for L2C locked here, cannot go on with it unlocked
+ // FIXME: but we only need to worry about Node 0???
+ if (node == 0) {
+ if (!l2c_is_locked) { // is unlocked, must lock it now
+ ddr_print("N%d: %s: L2C was unlocked - locking it now\n", node, __FUNCTION__);
+ // FIXME: this should be common-ized; it currently matches bdk_init()...
+ bdk_l2c_lock_mem_region(node, 0, bdk_l2c_get_cache_size_bytes(node) * 3 / 4);
+ } else {
+ ddr_print("N%d: %s: L2C was already locked - continuing\n", node, __FUNCTION__);
+ }
+ } else {
+ ddr_print("N%d: %s: non-zero node, not worrying about L2C lock status\n", node, __FUNCTION__);
+ }
+
+ // call the tuning routines, no filtering...
+ tot_errs = bdk_libdram_tune_node(node);
+
+ // FIXME: only for node 0, unlock L2C if it was unlocked before...
+ if (node == 0) {
+ if (!l2c_is_locked) { // it was Node 0 and unlocked, must re-unlock it now
+ ddr_print("N%d: Node 0 L2C was unlocked before - unlocking it now\n", node);
+ // FIXME: this should be common-ized; it currently matches bdk_init()...
+ bdk_l2c_unlock_mem_region(node, 0, bdk_l2c_get_cache_size_bytes(node) * 3 / 4);
+ } else {
+ ddr_print("N%d: %s: L2C was already locked - leaving it locked\n", node, __FUNCTION__);
+ }
+ } else {
+ ddr_print("N%d: %s: non-zero node, not worrying about L2C lock status\n", node, __FUNCTION__);
+ }
+
+ // make sure to clear memory and any ECC errs when done...
+ bdk_dram_clear_mem(node);
+ bdk_dram_clear_ecc(node);
+
+ return tot_errs;
+}
+
+/**
+ * This is the main function for DRAM margining of Write Voltage.
+ * Users of libdram should call this function,
+ * avoiding the other internal function. As a rule, functions starting with
+ * "libdram_*" are part of the external API and should be used.
+ *
+ * @param node Node to test. This may not be the same node as the one running the code
+ *
+ * @return Success or Fail
+ */
+static
+int libdram_margin_write_voltage(int node)
+{
+ int tot_errs;
+
+ // call the margining routine
+ tot_errs = perform_margin_write_voltage(node);
+
+ // make sure to clear memory and any ECC errs when done...
+ bdk_dram_clear_mem(node);
+ bdk_dram_clear_ecc(node);
+
+ return tot_errs;
+}
+
+/**
+ * This is the main function for DRAM margining of Read Voltage.
+ * Users of libdram should call this function,
+ * avoiding the other internal function. As a rule, functions starting with
+ * "libdram_*" are part of the external API and should be used.
+ *
+ * @param node Node to test. This may not be the same node as the one running the code
+ *
+ * @return Success or Fail
+ */
+static
+int libdram_margin_read_voltage(int node)
+{
+ int tot_errs;
+
+ // call the margining routine
+ tot_errs = perform_margin_read_voltage(node);
+
+ // make sure to clear memory and any ECC errs when done...
+ bdk_dram_clear_mem(node);
+ bdk_dram_clear_ecc(node);
+
+ return tot_errs;
+}
+
+/**
+ * This is the main function for DRAM margining of Write Timing.
+ * Users of libdram should call this function,
+ * avoiding the other internal function. As a rule, functions starting with
+ * "libdram_*" are part of the external API and should be used.
+ *
+ * @param node Node to test. This may not be the same node as the one running the code
+ *
+ * @return Success or Fail
+ */
+static
+int libdram_margin_write_timing(int node)
+{
+ int tot_errs;
+
+ // call the tuning routine, tell it we are margining not tuning...
+ tot_errs = perform_dll_offset_tuning(node, /* write offsets */1, /* margin */0);
+
+ // make sure to clear memory and any ECC errs when done...
+ bdk_dram_clear_mem(node);
+ bdk_dram_clear_ecc(node);
+
+ return tot_errs;
+}
+
+/**
+ * This is the main function for DRAM margining of Read Timing.
+ * Users of libdram should call this function,
+ * avoiding the other internal function. As a rule, functions starting with
+ * "libdram_*" are part of the external API and should be used.
+ *
+ * @param node Node to test. This may not be the same node as the one running the code
+ *
+ * @return Success or Fail
+ */
+static
+int libdram_margin_read_timing(int node)
+{
+ int tot_errs;
+
+ // call the tuning routine, tell it we are margining not tuning...
+ tot_errs = perform_dll_offset_tuning(node, /* read offsets */2, /* margin */0);
+
+ // make sure to clear memory and any ECC errs when done...
+ bdk_dram_clear_mem(node);
+ bdk_dram_clear_ecc(node);
+
+ return tot_errs;
+}
+
+/**
+ * This is the main function for all DRAM margining.
+ * Users of libdram should call this function,
+ * avoiding the other internal function. As a rule, functions starting with
+ * "libdram_*" are part of the external API and should be used.
+ *
+ * @param node Node to test. This may not be the same node as the one running the code
+ *
+ * @return Success or Fail
+ */
+int libdram_margin(int node)
+{
+ int ret_rt, ret_wt, ret_rv, ret_wv;
+ char *risk[2] = { "Low Risk", "Needs Review" };
+ int l2c_is_locked = bdk_l2c_is_locked(node);
+
+ // for now, no margining on 81xx, until we can reduce the dynamic runtime size...
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX)) {
+ printf("Sorry, margining is not available on 81xx yet...\n");
+ return 0;
+ }
+
+ dram_verbosity = bdk_config_get_int(BDK_CONFIG_DRAM_VERBOSE);
+
+ // the only way this entry point should be called is from a MENU item,
+ // so, enable any non-running cores on this node, and leave them
+ // running at the end...
+ ddr_print("N%d: %s: Starting cores (mask was 0x%lx)\n",
+ node, __FUNCTION__, bdk_get_running_coremask(node));
+ bdk_init_cores(node, ~0ULL);
+
+ // must test for L2C locked here, cannot go on with it unlocked
+ // FIXME: but we only need to worry about Node 0???
+ if (node == 0) {
+ if (!l2c_is_locked) { // is unlocked, must lock it now
+ ddr_print("N%d: %s: L2C was unlocked - locking it now\n", node, __FUNCTION__);
+ // FIXME: this should be common-ized; it currently matches bdk_init()...
+ bdk_l2c_lock_mem_region(node, 0, bdk_l2c_get_cache_size_bytes(node) * 3 / 4);
+ } else {
+ ddr_print("N%d: %s: L2C was already locked - continuing\n", node, __FUNCTION__);
+ }
+ } else {
+ ddr_print("N%d: %s: non-zero node, not worrying about L2C lock status\n", node, __FUNCTION__);
+ }
+
+ debug_print("N%d: Starting DRAM Margin ALL\n", node);
+ ret_rt = libdram_margin_read_timing(node);
+ ret_wt = libdram_margin_write_timing(node);
+ ret_rv = libdram_margin_read_voltage(node);
+ ret_wv = libdram_margin_write_voltage(node);
+ debug_print("N%d: DRAM Margin ALL finished\n", node);
+
+ /*
+ >>> Summary from DDR Margining tool:
+ >>> N0: Read Timing Margin : Low Risk
+ >>> N0: Write Timing Margin : Low Risk
+ >>> N0: Read Voltage Margin : Low Risk
+ >>> N0: Write Voltage Margin : Low Risk
+ */
+ printf(" \n");
+ printf("-------------------------------------\n");
+ printf(" \n");
+ printf("Summary from DDR Margining tool\n");
+ printf("N%d: Read Timing Margin : %s\n", node, risk[!!ret_rt]);
+ printf("N%d: Write Timing Margin : %s\n", node, risk[!!ret_wt]);
+
+ // these may not have been done due to DDR3 and/or THUNDER pass 1.x
+ // FIXME? would it be better to print an appropriate message here?
+ if (ret_rv != -1) printf("N%d: Read Voltage Margin : %s\n", node, risk[!!ret_rv]);
+ if (ret_wv != -1) printf("N%d: Write Voltage Margin : %s\n", node, risk[!!ret_wv]);
+
+ printf(" \n");
+ printf("-------------------------------------\n");
+ printf(" \n");
+
+ // FIXME: only for node 0, unlock L2C if it was unlocked before...
+ if (node == 0) {
+ if (!l2c_is_locked) { // it was Node 0 and unlocked, must re-unlock it now
+ ddr_print("N%d: Node 0 L2C was unlocked before - unlocking it now\n", node);
+ // FIXME: this should be common-ized; it currently matches bdk_init()...
+ bdk_l2c_unlock_mem_region(node, 0, bdk_l2c_get_cache_size_bytes(node) * 3 / 4);
+ } else {
+ ddr_print("N%d: %s: L2C was already locked - leaving it locked\n", node, __FUNCTION__);
+ }
+ } else {
+ ddr_print("N%d: %s: non-zero node, not worrying about L2C lock status\n", node, __FUNCTION__);
+ }
+
+ return 0;
+}
+
+/**
+ * Get the measured DRAM frequency after a call to libdram_config
+ *
+ * @param node Node to get frequency for
+ *
+ * @return Frequency in Hz
+ */
+uint32_t libdram_get_freq(int node)
+{
+ return measured_ddr_hertz[node];
+}
+
+/**
+ * Get the measured DRAM frequency from the DDR_PLL_CTL CSR
+ *
+ * @param node Node to get frequency for
+ *
+ * @return Frequency in Hz
+ */
+uint32_t libdram_get_freq_from_pll(int node, int lmc)
+{
+ static const uint8_t _en[] = {1, 2, 3, 4, 5, 6, 7, 8, 10, 12};
+ BDK_CSR_INIT(c, node, BDK_LMCX_DDR_PLL_CTL(0));
+ // we check the alternate refclk select bit in LMC0 to indicate 100MHz use
+ // assumption: the alternate refclk is setup for 100MHz
+ uint64_t ddr_ref_hertz = (c.s.dclk_alt_refclk_sel) ? 100000000 : bdk_clock_get_rate(node, BDK_CLOCK_MAIN_REF);
+ uint64_t en = _en[c.cn83xx.ddr_ps_en];
+ uint64_t calculated_ddr_hertz = ddr_ref_hertz * (c.cn83xx.clkf + 1) / ((c.cn83xx.clkr + 1) * en);
+ return calculated_ddr_hertz;
+}
+
+#ifndef DRAM_CSR_WRITE_INLINE
+void dram_csr_write(bdk_node_t node, const char *csr_name, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value)
+{
+ VB_PRT(VBL_CSRS, "N%d: DDR Config %s[%016lx] => %016lx\n", node, csr_name, address, value);
+ bdk_csr_write(node, type, busnum, size, address, value);
+}
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/bdk.h b/src/vendorcode/cavium/include/bdk/bdk.h
new file mode 100644
index 0000000000..c3e0a518db
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/bdk.h
@@ -0,0 +1,80 @@
+#ifndef __BDK_H__
+#define __BDK_H__
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Master include file for all BDK function.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include "../libbdk-arch/bdk-arch.h"
+#include "../libbdk-os/bdk-os.h"
+#include "../libfatfs/ff.h"
+#include "../libfatfs/diskio.h"
+#ifndef BDK_BUILD_HOST
+#include "../libbdk-hal/bdk-hal.h"
+#include "../libbdk-boot/bdk-boot.h"
+#include "../libbdk-dram/bdk-dram.h"
+#include "../libbdk-driver/bdk-driver.h"
+#include "../libbdk-trust/bdk-trust.h"
+#include "../libdram/libdram.h"
+#include "bdk-functions.h"
+#endif
+#include "../libbdk-lua/bdk-lua.h"
+#include "../libbdk-bist/bist.h"
+
+/**
+ * @mainpage
+ *
+ * This document goes through the internal details of the BDK. Its purpose is
+ * to serve as a API reference for people writing applications. Users of the
+ * BDK's binary applications do not need these details.
+ */
+
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h
new file mode 100644
index 0000000000..e2434a72d8
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h
@@ -0,0 +1,85 @@
+#ifndef __BDK_ARCH_H__
+#define __BDK_ARCH_H__
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Master include file for architecture support. Use bdk.h
+ * instead of including this file directly.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ */
+
+#ifndef __BYTE_ORDER
+ #if !defined(__ORDER_BIG_ENDIAN__) || !defined(__ORDER_LITTLE_ENDIAN__) || !defined(__BYTE_ORDER__)
+ #error Unable to determine Endian mode
+ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define __BYTE_ORDER __ORDER_BIG_ENDIAN__
+ #define BDK_LITTLE_ENDIAN_STRUCT __attribute__ ((scalar_storage_order("little-endian")))
+ #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ #define __BYTE_ORDER __ORDER_LITTLE_ENDIAN__
+ #define BDK_LITTLE_ENDIAN_STRUCT
+ #else
+ #error Unable to determine Endian mode
+ #endif
+ #define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
+ #define __LITTLE_ENDIAN __ORDER_LITTLE_ENDIAN__
+#endif
+
+#include "bdk-require.h"
+#include "bdk-swap.h"
+#ifndef BDK_BUILD_HOST
+#include "bdk-asm.h"
+#endif
+#include "bdk-model.h"
+#include "bdk-numa.h"
+#include "bdk-csr.h"
+#ifndef BDK_BUILD_HOST
+#include "bdk-lmt.h"
+#endif
+#include "bdk-warn.h"
+#include "bdk-version.h"
+#ifndef BDK_BUILD_HOST
+#include "bdk-fuse.h"
+#include "bdk-platform.h"
+#endif
+
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-asm.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-asm.h
new file mode 100644
index 0000000000..4206247c2a
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-asm.h
@@ -0,0 +1,94 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * This is file defines ASM primitives for the executive.
+
+ * <hr>$Revision: 53373 $<hr>
+ *
+ * @defgroup asm Assembly support
+ * @{
+ */
+
+/* This header file can be included from a .S file. Keep non-preprocessor
+ things under !__ASSEMBLER__. */
+#ifndef __ASSEMBLER__
+
+/* turn the variable name into a string */
+#define __BDK_TMP_STR(x) __BDK_TMP_STR2(x)
+#define __BDK_TMP_STR2(x) #x
+#define __BDK_VASTR(...) #__VA_ARGS__
+
+#define BDK_MRS_NV(reg, val) asm ("mrs %x[rd]," #reg : [rd] "=r" (val))
+#define BDK_MRS(reg, val) asm volatile ("mrs %x[rd]," #reg : [rd] "=r" (val))
+#define BDK_MSR(reg, val) asm volatile ("msr " #reg ",%x[rd]" : : [rd] "r" (val))
+
+/* other useful stuff */
+#define BDK_MB asm volatile ("dmb sy" : : :"memory") /* Full memory barrier, like MIPS SYNC */
+#define BDK_WMB asm volatile ("dmb st" : : :"memory") /* Write memory barreir, like MIPS SYNCW */
+#define BDK_WFE asm volatile ("wfe" : : :"memory") /* Wait for event */
+#define BDK_SEV asm volatile ("sev" : : :"memory") /* Send global event */
+#define BDK_DSB asm volatile ("dsb sy" : : :"memory") /* DSB */
+
+// normal prefetches that use the pref instruction
+#define BDK_PREFETCH_PREFX(type, address, offset) asm volatile ("PRFUM " type ", [%[rbase],%[off]]" : : [rbase] "r" (address), [off] "I" (offset))
+// a normal prefetch
+#define BDK_PREFETCH(address, offset) BDK_PREFETCH_PREFX("PLDL1KEEP", address, offset)
+#define BDK_ICACHE_INVALIDATE { asm volatile ("ic iallu" : : ); } // invalidate entire icache
+
+#define BDK_SYS_CVMCACHE_WBI_L2 "#0,c11,c1,#2" // L2 Cache Cache Hit Writeback Invalidate
+#define BDK_SYS_CVMCACHE_WB_L2 "#0,c11,c1,#3" // L2 Cache Hit Writeback
+#define BDK_SYS_CVMCACHE_LCK_L2 "#0,c11,c1,#4" // L2 Cache Fetch and Lock
+#define BDK_SYS_CVMCACHE_WBI_L2_INDEXED "#0,c11,c0,#5" // L2 Cache Index Writeback Invalidate
+#define BDK_SYS_CVMCACHE_LTG_L2_INDEXED "#0,c11,c0,#7" // L2 Cache Index Load Tag
+#define BDK_SYS_CVMCACHE_INVALL_DC "#0,c11,c0,#2" // L1 Dcache Invalidate
+#define BDK_CACHE_WBI_L2(address) { asm volatile ("sys " BDK_SYS_CVMCACHE_WBI_L2 ", %0" : : "r" (address)); } // Push to memory, invalidate, and unlock
+#define BDK_CACHE_WBI_L2_INDEXED(encoded) { asm volatile ("sys " BDK_SYS_CVMCACHE_WBI_L2_INDEXED ", %0" : : "r" (encoded)); } // Push to memory, invalidate, and unlock, index by set/way
+#define BDK_CACHE_WB_L2(address) { asm volatile ("sys " BDK_SYS_CVMCACHE_WB_L2 ", %0" : : "r" (address)); } // Push to memory, don't invalidate, don't unlock
+#define BDK_CACHE_LCK_L2(address) { asm volatile ("sys " BDK_SYS_CVMCACHE_LCK_L2 ", %0" : : "r" (address)); } // Lock into L2
+#define BDK_DCACHE_INVALIDATE { asm volatile ("sys " BDK_SYS_CVMCACHE_INVALL_DC ", xzr"); } // Invalidate the entire Dcache on local core
+#define BDK_CACHE_LTG_L2_INDEXED(encoded) { asm volatile ("sys " BDK_SYS_CVMCACHE_LTG_L2_INDEXED ", %0" : : "r" (encoded)); } // Load L2 TAG, index by set/way
+
+#define BDK_STORE_PAIR(ptr, data1, data2) { asm volatile ("stp %x[d1], %x[d2], [%[b]]" : [mem] "+m" (*(__uint128_t*)ptr) : [b] "r" (ptr), [d1] "r" (data1), [d2] "r" (data2)); }
+
+#endif /* __ASSEMBLER__ */
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h
new file mode 100644
index 0000000000..95805ec671
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h
@@ -0,0 +1,324 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions and macros for accessing Cavium CSRs.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @defgroup csr CSR support
+ * @{
+ */
+
+/**
+ * Possible CSR bus types
+ */
+typedef enum {
+ BDK_CSR_TYPE_DAB, /**< External debug 64bit CSR */
+ BDK_CSR_TYPE_DAB32b, /**< External debug 32bit CSR */
+ BDK_CSR_TYPE_MDSB, /**< CN93XX: Memory Diagnostic Serial Bus?, not memory mapped */
+ BDK_CSR_TYPE_NCB, /**< Fast 64bit CSR */
+ BDK_CSR_TYPE_NCB32b, /**< Fast 32bit CSR */
+ BDK_CSR_TYPE_PCCBR,
+ BDK_CSR_TYPE_PCCPF,
+ BDK_CSR_TYPE_PCCVF,
+ BDK_CSR_TYPE_PCICONFIGRC, /**< PCIe config address (RC mode) */
+ BDK_CSR_TYPE_PCICONFIGEP, /**< PCIe config address (EP mode) */
+ BDK_CSR_TYPE_PCICONFIGEP_SHADOW, /**< CN93XX: PCIEP register invisible to host, not memory mapped */
+ BDK_CSR_TYPE_PCICONFIGEPVF, /**< CN93XX: PCIEP registers only on vertain PEMs, not memory mapped */
+ BDK_CSR_TYPE_PEXP, /**< PCIe BAR 0 address only */
+ BDK_CSR_TYPE_PEXP_NCB, /**< NCB-direct and PCIe BAR0 address */
+ BDK_CSR_TYPE_RSL, /**< Slow 64bit CSR */
+ BDK_CSR_TYPE_RSL32b, /**< Slow 32bit CSR */
+ BDK_CSR_TYPE_RVU_PF_BAR0, /**< Index into RVU PF BAR0 */
+ BDK_CSR_TYPE_RVU_PF_BAR2, /**< Index into RVU PF BAR2 */
+ BDK_CSR_TYPE_RVU_PFVF_BAR2, /**< Index into RVU PF or VF BAR2 */
+ BDK_CSR_TYPE_RVU_VF_BAR2, /**< Index into RVU VF BAR2 */
+ BDK_CSR_TYPE_SYSREG, /**< Core system register */
+} bdk_csr_type_t;
+
+#define BDK_CSR_DB_MAX_PARAM 4
+typedef struct __attribute__ ((packed)) {
+ uint32_t name_index : 20;/**< Index into __bdk_csr_db_string where the name is */
+ uint32_t base_index : 14;/**< Index into __bdk_csr_db_number where the base address is */
+ uint8_t unused : 5;
+ bdk_csr_type_t type : 5; /**< Enum type from above */
+ uint8_t width : 4; /**< CSR width in bytes */
+ uint16_t field_index; /**< Index into __bdk_csr_db_fieldList where the fields start */
+ uint16_t range[BDK_CSR_DB_MAX_PARAM]; /**< Index into __bdk_csr_db_range where the range is */
+ uint16_t param_inc[BDK_CSR_DB_MAX_PARAM]; /**< Index into __bdk_csr_db_number where the param multiplier is */
+} __bdk_csr_db_type_t;
+
+typedef struct __attribute__ ((packed)) {
+ uint32_t name_index : 20;/**< Index into __bdk_csr_db_string where the name is */
+ uint32_t start_bit : 6; /**< LSB of the field */
+ uint32_t stop_bit : 6; /**< MSB of the field */
+} __bdk_csr_db_field_t;
+
+typedef struct {
+ uint32_t model;
+ const int16_t *data; /**< Array of integers indexing __bdk_csr_db_csr */
+} __bdk_csr_db_map_t;
+
+extern void __bdk_csr_fatal(const char *name, int num_args, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) __attribute__ ((noreturn));
+extern int bdk_csr_decode(const char *name, uint64_t value);
+extern int bdk_csr_field(const char *csr_name, int field_start_bit, const char **field_name);
+extern uint64_t bdk_csr_read_by_name(bdk_node_t node, const char *name);
+extern int bdk_csr_write_by_name(bdk_node_t node, const char *name, uint64_t value);
+extern int __bdk_csr_lookup_index(const char *name, int params[]);
+extern int bdk_csr_get_name(const char *last_name, char *buffer);
+struct bdk_readline_tab;
+extern struct bdk_readline_tab *__bdk_csr_get_tab_complete() BDK_WEAK;
+extern uint64_t bdk_sysreg_read(int node, int core, uint64_t regnum);
+extern void bdk_sysreg_write(int node, int core, uint64_t regnum, uint64_t value);
+
+#ifndef BDK_BUILD_HOST
+
+/**
+ * Read a value from a CSR. Normally this function should not be
+ * used directly. Instead use the macro BDK_CSR_READ that fills
+ * in the parameters to this function for you.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special value.
+ * @param type Bus type the CSR is on
+ * @param busnum Bus number the CSR is on
+ * @param size Width of the CSR in bytes
+ * @param address The address of the CSR
+ *
+ * @return The value of the CSR
+ */
+static inline uint64_t bdk_csr_read(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address) __attribute__ ((always_inline));
+static inline uint64_t bdk_csr_read(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address)
+{
+ extern uint64_t __bdk_csr_read_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address);
+ switch (type)
+ {
+ case BDK_CSR_TYPE_DAB:
+ case BDK_CSR_TYPE_DAB32b:
+ case BDK_CSR_TYPE_NCB:
+ case BDK_CSR_TYPE_NCB32b:
+ case BDK_CSR_TYPE_PEXP_NCB:
+ case BDK_CSR_TYPE_RSL:
+ case BDK_CSR_TYPE_RSL32b:
+ case BDK_CSR_TYPE_RVU_PF_BAR0:
+ case BDK_CSR_TYPE_RVU_PF_BAR2:
+ case BDK_CSR_TYPE_RVU_PFVF_BAR2:
+ case BDK_CSR_TYPE_RVU_VF_BAR2:
+ address |= (uint64_t)(node&3) << 44;
+ /* Note: This code assume a 1:1 mapping of all of address space.
+ It is designed to run with the MMU disabled */
+ switch (size)
+ {
+ case 1:
+ return *(volatile uint8_t *)address;
+ case 2:
+ return bdk_le16_to_cpu(*(volatile uint16_t *)address);
+ case 4:
+ return bdk_le32_to_cpu(*(volatile uint32_t *)address);
+ default:
+ return bdk_le64_to_cpu(*(volatile uint64_t *)address);
+ }
+ default:
+ return __bdk_csr_read_slow(node, type, busnum, size, address);
+ }
+}
+
+
+/**
+ * Wrate a value to a CSR. Normally this function should not be
+ * used directly. Instead use the macro BDK_CSR_WRITE that fills
+ * in the parameters to this function for you.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special value.
+ * @param type Bus type the CSR is on
+ * @param busnum Bus number the CSR is on
+ * @param size Width of the CSR in bytes
+ * @param address The address of the CSR
+ * @param value Value to write to the CSR
+ */
+static inline void bdk_csr_write(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value) __attribute__ ((always_inline));
+static inline void bdk_csr_write(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value)
+{
+ extern void __bdk_csr_write_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value);
+ switch (type)
+ {
+ case BDK_CSR_TYPE_DAB:
+ case BDK_CSR_TYPE_DAB32b:
+ case BDK_CSR_TYPE_NCB:
+ case BDK_CSR_TYPE_NCB32b:
+ case BDK_CSR_TYPE_PEXP_NCB:
+ case BDK_CSR_TYPE_RSL:
+ case BDK_CSR_TYPE_RSL32b:
+ case BDK_CSR_TYPE_RVU_PF_BAR0:
+ case BDK_CSR_TYPE_RVU_PF_BAR2:
+ case BDK_CSR_TYPE_RVU_PFVF_BAR2:
+ case BDK_CSR_TYPE_RVU_VF_BAR2:
+ address |= (uint64_t)(node&3) << 44;
+ /* Note: This code assume a 1:1 mapping of all of address space.
+ It is designed to run with the MMU disabled */
+ switch (size)
+ {
+ case 1:
+ *(volatile uint8_t *)address = value;
+ break;
+ case 2:
+ *(volatile uint16_t *)address = bdk_cpu_to_le16(value);
+ break;
+ case 4:
+ *(volatile uint32_t *)address = bdk_cpu_to_le32(value);
+ break;
+ default:
+ *(volatile uint64_t *)address = bdk_cpu_to_le64(value);
+ break;
+ }
+ break;
+
+ default:
+ __bdk_csr_write_slow(node, type, busnum, size, address, value);
+ }
+}
+
+#else
+#define bdk_csr_read thunder_remote_read_csr
+#define bdk_csr_write thunder_remote_write_csr
+#endif
+
+/**
+ * This macro makes it easy to define a variable of the correct
+ * type for a CSR.
+ */
+#define BDK_CSR_DEFINE(name, csr) typedef_##csr name
+
+/**
+ * This macro makes it easy to define a variable and initialize it
+ * with a CSR.
+ */
+#define BDK_CSR_INIT(name, node, csr) typedef_##csr name = {.u = bdk_csr_read(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), csr)}
+
+/**
+ * Macro to read a CSR
+ */
+#define BDK_CSR_READ(node, csr) bdk_csr_read(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), csr)
+
+/**
+ * Macro to write a CSR
+ */
+#define BDK_CSR_WRITE(node, csr, value) bdk_csr_write(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), csr, value)
+
+/**
+ * Macro to make a read, modify, and write sequence easy. The "code_block"
+ * should be replaced with a C code block or a comma separated list of
+ * "name.s.field = value", without the quotes.
+ */
+#define BDK_CSR_MODIFY(name, node, csr, code_block) do { \
+ uint64_t _tmp_address = csr; \
+ typedef_##csr name = {.u = bdk_csr_read(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), _tmp_address)}; \
+ code_block; \
+ bdk_csr_write(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), _tmp_address, name.u); \
+ } while (0)
+
+/**
+ * This macro spins on a field waiting for it to reach a value. It
+ * is common in code to need to wait for a specific field in a CSR
+ * to match a specific value. Conceptually this macro expands to:
+ *
+ * 1) read csr at "address" with a csr typedef of "type"
+ * 2) Check if ("type".s."field" "op" "value")
+ * 3) If #2 isn't true loop to #1 unless too much time has passed.
+ */
+#define BDK_CSR_WAIT_FOR_FIELD(node, csr, field, op, value, timeout_usec) \
+ ({int result; \
+ do { \
+ uint64_t done = bdk_clock_get_count(BDK_CLOCK_TIME) + (uint64_t)timeout_usec * \
+ bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / 1000000; \
+ typedef_##csr c; \
+ uint64_t _tmp_address = csr; \
+ while (1) \
+ { \
+ c.u = bdk_csr_read(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), _tmp_address); \
+ if ((c.s.field) op (value)) { \
+ result = 0; \
+ break; \
+ } else if (bdk_clock_get_count(BDK_CLOCK_TIME) > done) { \
+ result = -1; \
+ break; \
+ } else \
+ bdk_thread_yield(); \
+ } \
+ } while (0); \
+ result;})
+
+/**
+ * This macro spins on a field waiting for it to reach a value. It
+ * is common in code to need to wait for a specific field in a CSR
+ * to match a specific value. Conceptually this macro expands to:
+ *
+ * 1) read csr at "address" with a csr typedef of "type"
+ * 2) Check if ("type"."chip"."field" "op" "value")
+ * 3) If #2 isn't true loop to #1 unless too much time has passed.
+ *
+ * Note that usage of this macro should be avoided. When future chips
+ * change bit locations, the compiler will not catch those changes
+ * with this macro. Changes silently do the wrong thing at runtime.
+ */
+#define BDK_CSR_WAIT_FOR_CHIP_FIELD(node, csr, chip, field, op, value, timeout_usec) \
+ ({int result; \
+ do { \
+ uint64_t done = bdk_clock_get_count(BDK_CLOCK_TIME) + (uint64_t)timeout_usec * \
+ bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / 1000000; \
+ typedef_##csr c; \
+ uint64_t _tmp_address = csr; \
+ while (1) \
+ { \
+ c.u = bdk_csr_read(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), _tmp_address); \
+ if ((c.chip.field) op (value)) { \
+ result = 0; \
+ break; \
+ } else if (bdk_clock_get_count(BDK_CLOCK_TIME) > done) { \
+ result = -1; \
+ break; \
+ } else \
+ bdk_thread_yield(); \
+ } \
+ } while (0); \
+ result;})
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ap.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ap.h
new file mode 100644
index 0000000000..20ed3af737
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ap.h
@@ -0,0 +1,34851 @@
+#ifndef __BDK_CSRS_AP_H__
+#define __BDK_CSRS_AP_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium AP.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration ap_clidr_el1_ctype_e
+ *
+ * Cache Type Field Enumeration
+ * Enumerates the CTYPEn fields of AP_CLIDR_EL1. All other values are reserved.
+ */
+#define BDK_AP_CLIDR_EL1_CTYPE_E_DATA (2)
+#define BDK_AP_CLIDR_EL1_CTYPE_E_INSTRUCTION (1)
+#define BDK_AP_CLIDR_EL1_CTYPE_E_NONE (0)
+#define BDK_AP_CLIDR_EL1_CTYPE_E_SEPARATE (3)
+#define BDK_AP_CLIDR_EL1_CTYPE_E_UNIFIED (4)
+
+/**
+ * Enumeration ap_psb_accum_sel_e
+ *
+ * AP Power Serial Bus Accumulator Selection Enumeration
+ * Enumerates the AP accumulator events used by PSBS_AP()_ACCUM()_SEL.
+ */
+#define BDK_AP_PSB_ACCUM_SEL_E_TBD0 (0)
+
+/**
+ * Enumeration ap_psb_event_e
+ *
+ * AP Power Serial Bus Event Enumeration
+ * Enumerates the event numbers for AP slaves, which correspond to index {b} of PSBS_AP()_EVENT()_CFG.
+ */
+#define BDK_AP_PSB_EVENT_E_TBD0 (0)
+
+/**
+ * Register (SYSREG) ap_actlr_el#
+ *
+ * AP Auxiliary Control Register
+ * These registers are implementation defined for implementation specific control functionality
+ * while executing at the associated execution level, or, in the case of ACTLR_EL1, while
+ * executing at EL0.
+ */
+union bdk_ap_actlr_elx
+{
+ uint64_t u;
+ struct bdk_ap_actlr_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_actlr_elx_s cn; */
+};
+typedef union bdk_ap_actlr_elx bdk_ap_actlr_elx_t;
+
+static inline uint64_t BDK_AP_ACTLR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ACTLR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=3))
+ return 0x30001000100ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_ACTLR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ACTLR_ELX(a) bdk_ap_actlr_elx_t
+#define bustype_BDK_AP_ACTLR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ACTLR_ELX(a) "AP_ACTLR_ELX"
+#define busnum_BDK_AP_ACTLR_ELX(a) (a)
+#define arguments_BDK_AP_ACTLR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_afsr#_el#
+ *
+ * AP Auxiliary Fault Status 0 and 1 Registers
+ * Provides additional implementation defined fault status
+ * information for exceptions taken to EL*.
+ */
+union bdk_ap_afsrx_elx
+{
+ uint32_t u;
+ struct bdk_ap_afsrx_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_afsrx_elx_s cn; */
+};
+typedef union bdk_ap_afsrx_elx bdk_ap_afsrx_elx_t;
+
+static inline uint64_t BDK_AP_AFSRX_ELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_AFSRX_ELX(unsigned long a, unsigned long b)
+{
+ if ((a<=1) && ((b>=1)&&(b<=3)))
+ return 0x30005010000ll + 0x100ll * ((a) & 0x1) + 0ll * ((b) & 0x3);
+ __bdk_csr_fatal("AP_AFSRX_ELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_AP_AFSRX_ELX(a,b) bdk_ap_afsrx_elx_t
+#define bustype_BDK_AP_AFSRX_ELX(a,b) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_AFSRX_ELX(a,b) "AP_AFSRX_ELX"
+#define busnum_BDK_AP_AFSRX_ELX(a,b) (a)
+#define arguments_BDK_AP_AFSRX_ELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (SYSREG) ap_afsr#_el12
+ *
+ * AP Auxiliary Fault Status 0 and 1 Alias Registers
+ * Alias of AFSR(n)_EL1 when accessed from EL2 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_afsrx_el12
+{
+ uint32_t u;
+ struct bdk_ap_afsrx_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_afsrx_el12_s cn; */
+};
+typedef union bdk_ap_afsrx_el12 bdk_ap_afsrx_el12_t;
+
+static inline uint64_t BDK_AP_AFSRX_EL12(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_AFSRX_EL12(unsigned long a)
+{
+ if (a<=1)
+ return 0x30505010000ll + 0x100ll * ((a) & 0x1);
+ __bdk_csr_fatal("AP_AFSRX_EL12", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_AFSRX_EL12(a) bdk_ap_afsrx_el12_t
+#define bustype_BDK_AP_AFSRX_EL12(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_AFSRX_EL12(a) "AP_AFSRX_EL12"
+#define busnum_BDK_AP_AFSRX_EL12(a) (a)
+#define arguments_BDK_AP_AFSRX_EL12(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_aidr_el1
+ *
+ * AP Auxiliary ID Register
+ * Provides implementation defined identification information.
+ */
+union bdk_ap_aidr_el1
+{
+ uint64_t u;
+ struct bdk_ap_aidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_aidr_el1_s cn; */
+};
+typedef union bdk_ap_aidr_el1 bdk_ap_aidr_el1_t;
+
+#define BDK_AP_AIDR_EL1 BDK_AP_AIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_AIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_AIDR_EL1_FUNC(void)
+{
+ return 0x30100000700ll;
+}
+
+#define typedef_BDK_AP_AIDR_EL1 bdk_ap_aidr_el1_t
+#define bustype_BDK_AP_AIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_AIDR_EL1 "AP_AIDR_EL1"
+#define busnum_BDK_AP_AIDR_EL1 0
+#define arguments_BDK_AP_AIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_amair_el#
+ *
+ * AP Auxiliary Memory Attribute Indirection Register
+ * Provides implementation defined memory attributes for the
+ * memory regions specified by MAIR_EL*.
+ */
+union bdk_ap_amair_elx
+{
+ uint64_t u;
+ struct bdk_ap_amair_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_amair_elx_s cn; */
+};
+typedef union bdk_ap_amair_elx bdk_ap_amair_elx_t;
+
+static inline uint64_t BDK_AP_AMAIR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_AMAIR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=3))
+ return 0x3000a030000ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_AMAIR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_AMAIR_ELX(a) bdk_ap_amair_elx_t
+#define bustype_BDK_AP_AMAIR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_AMAIR_ELX(a) "AP_AMAIR_ELX"
+#define busnum_BDK_AP_AMAIR_ELX(a) (a)
+#define arguments_BDK_AP_AMAIR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_amair_el12
+ *
+ * AP Auxiliary Memory Attribute Indirection Register
+ * Alias of AMAIR_EL1 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_amair_el12
+{
+ uint64_t u;
+ struct bdk_ap_amair_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_amair_el12_s cn; */
+};
+typedef union bdk_ap_amair_el12 bdk_ap_amair_el12_t;
+
+#define BDK_AP_AMAIR_EL12 BDK_AP_AMAIR_EL12_FUNC()
+static inline uint64_t BDK_AP_AMAIR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_AMAIR_EL12_FUNC(void)
+{
+ return 0x3050a030000ll;
+}
+
+#define typedef_BDK_AP_AMAIR_EL12 bdk_ap_amair_el12_t
+#define bustype_BDK_AP_AMAIR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_AMAIR_EL12 "AP_AMAIR_EL12"
+#define busnum_BDK_AP_AMAIR_EL12 0
+#define arguments_BDK_AP_AMAIR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ccsidr_el1
+ *
+ * AP Current Cache Size ID Register
+ * This register provides information about the architecture of the currently selected
+ * cache. AP_CSSELR_EL1 selects which Cache Size ID Register is accessible.
+ */
+union bdk_ap_ccsidr_el1
+{
+ uint32_t u;
+ struct bdk_ap_ccsidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wt : 1; /**< [ 31: 31](RO) Indicates whether the selected cache level supports write-through.
+ 0 = Write-through not supported.
+ 1 = Write-through supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wb : 1; /**< [ 30: 30](RO) Indicates whether the selected cache level supports write-back.
+ 0 = Write-back not supported.
+ 1 = Write-back supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t ra : 1; /**< [ 29: 29](RO) Indicates whether the selected cache level supports read-allocation.
+ 0 = Read-allocation not supported.
+ 1 = Read-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wa : 1; /**< [ 28: 28](RO) Indicates whether the selected cache level supports write-allocation.
+ 0 = Write-allocation not supported.
+ 1 = Write-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t numsets : 15; /**< [ 27: 13](RO) Number of sets in cache minus 1, therefore a value of 0
+ indicates 1 set in the cache. The number of sets does not have
+ to be a power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 7.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 15.
+
+ For CN88XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 8191. */
+ uint32_t associativity : 10; /**< [ 12: 3](RO) Associativity of cache minus 1, therefore a value of 0 indicates
+ an associativity of 1. The associativity does not have to be a
+ power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 31.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 38.
+
+ For CN88XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 15. */
+ uint32_t linesize : 3; /**< [ 2: 0](RO) Cache-line size, in (Log2(Number of bytes in cache line)) - 4.
+
+ For CNXXXX, 128 bytes. */
+#else /* Word 0 - Little Endian */
+ uint32_t linesize : 3; /**< [ 2: 0](RO) Cache-line size, in (Log2(Number of bytes in cache line)) - 4.
+
+ For CNXXXX, 128 bytes. */
+ uint32_t associativity : 10; /**< [ 12: 3](RO) Associativity of cache minus 1, therefore a value of 0 indicates
+ an associativity of 1. The associativity does not have to be a
+ power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 31.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 38.
+
+ For CN88XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 15. */
+ uint32_t numsets : 15; /**< [ 27: 13](RO) Number of sets in cache minus 1, therefore a value of 0
+ indicates 1 set in the cache. The number of sets does not have
+ to be a power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 7.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 15.
+
+ For CN88XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 8191. */
+ uint32_t wa : 1; /**< [ 28: 28](RO) Indicates whether the selected cache level supports write-allocation.
+ 0 = Write-allocation not supported.
+ 1 = Write-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t ra : 1; /**< [ 29: 29](RO) Indicates whether the selected cache level supports read-allocation.
+ 0 = Read-allocation not supported.
+ 1 = Read-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wb : 1; /**< [ 30: 30](RO) Indicates whether the selected cache level supports write-back.
+ 0 = Write-back not supported.
+ 1 = Write-back supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wt : 1; /**< [ 31: 31](RO) Indicates whether the selected cache level supports write-through.
+ 0 = Write-through not supported.
+ 1 = Write-through supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ccsidr_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wt : 1; /**< [ 31: 31](RO) Indicates whether the selected cache level supports write-through.
+ 0 = Write-through not supported.
+ 1 = Write-through supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wb : 1; /**< [ 30: 30](RO) Indicates whether the selected cache level supports write-back.
+ 0 = Write-back not supported.
+ 1 = Write-back supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t ra : 1; /**< [ 29: 29](RO) Indicates whether the selected cache level supports read-allocation.
+ 0 = Read-allocation not supported.
+ 1 = Read-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wa : 1; /**< [ 28: 28](RO) Indicates whether the selected cache level supports write-allocation.
+ 0 = Write-allocation not supported.
+ 1 = Write-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t numsets : 15; /**< [ 27: 13](RO) Number of sets in cache minus 1, therefore a value of 0
+ indicates 1 set in the cache. The number of sets does not have
+ to be a power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is TBD.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is TBD.
+
+ For CN98XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is TBD.
+
+ For CN98XX L3 (AP_CSSELR_EL1[LEVEL] = 0x2, AP_CSSELR_EL1[IND] = 0), is TBD. */
+ uint32_t associativity : 10; /**< [ 12: 3](RO) Associativity of cache minus 1, therefore a value of 0 indicates
+ an associativity of 1. The associativity does not have to be a
+ power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is TBD.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is TBD.
+
+ For CN98XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is TBD.
+
+ For CN98XX L3 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is TBD. */
+ uint32_t linesize : 3; /**< [ 2: 0](RO) Cache-line size, in (Log2(Number of bytes in cache line)) - 4.
+
+ For CNXXXX, 128 bytes. */
+#else /* Word 0 - Little Endian */
+ uint32_t linesize : 3; /**< [ 2: 0](RO) Cache-line size, in (Log2(Number of bytes in cache line)) - 4.
+
+ For CNXXXX, 128 bytes. */
+ uint32_t associativity : 10; /**< [ 12: 3](RO) Associativity of cache minus 1, therefore a value of 0 indicates
+ an associativity of 1. The associativity does not have to be a
+ power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is TBD.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is TBD.
+
+ For CN98XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is TBD.
+
+ For CN98XX L3 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is TBD. */
+ uint32_t numsets : 15; /**< [ 27: 13](RO) Number of sets in cache minus 1, therefore a value of 0
+ indicates 1 set in the cache. The number of sets does not have
+ to be a power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is TBD.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is TBD.
+
+ For CN98XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is TBD.
+
+ For CN98XX L3 (AP_CSSELR_EL1[LEVEL] = 0x2, AP_CSSELR_EL1[IND] = 0), is TBD. */
+ uint32_t wa : 1; /**< [ 28: 28](RO) Indicates whether the selected cache level supports write-allocation.
+ 0 = Write-allocation not supported.
+ 1 = Write-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t ra : 1; /**< [ 29: 29](RO) Indicates whether the selected cache level supports read-allocation.
+ 0 = Read-allocation not supported.
+ 1 = Read-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wb : 1; /**< [ 30: 30](RO) Indicates whether the selected cache level supports write-back.
+ 0 = Write-back not supported.
+ 1 = Write-back supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wt : 1; /**< [ 31: 31](RO) Indicates whether the selected cache level supports write-through.
+ 0 = Write-through not supported.
+ 1 = Write-through supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_ap_ccsidr_el1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wt : 1; /**< [ 31: 31](RO) Indicates whether the selected cache level supports write-through.
+ 0 = Write-through not supported.
+ 1 = Write-through supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wb : 1; /**< [ 30: 30](RO) Indicates whether the selected cache level supports write-back.
+ 0 = Write-back not supported.
+ 1 = Write-back supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t ra : 1; /**< [ 29: 29](RO) Indicates whether the selected cache level supports read-allocation.
+ 0 = Read-allocation not supported.
+ 1 = Read-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wa : 1; /**< [ 28: 28](RO) Indicates whether the selected cache level supports write-allocation.
+ 0 = Write-allocation not supported.
+ 1 = Write-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t numsets : 15; /**< [ 27: 13](RO) Number of sets in cache minus 1, therefore a value of 0
+ indicates 1 set in the cache. The number of sets does not have
+ to be a power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 7.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 15.
+
+ For CN81XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 1023.
+
+ For CN80XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 1023. */
+ uint32_t associativity : 10; /**< [ 12: 3](RO) Associativity of cache minus 1, therefore a value of 0 indicates
+ an associativity of 1. The associativity does not have to be a
+ power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 31.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 38.
+
+ For CN81XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 15.
+
+ For CN80XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 7. */
+ uint32_t linesize : 3; /**< [ 2: 0](RO) Cache-line size, in (Log2(Number of bytes in cache line)) - 4.
+
+ For CNXXXX, 128 bytes. */
+#else /* Word 0 - Little Endian */
+ uint32_t linesize : 3; /**< [ 2: 0](RO) Cache-line size, in (Log2(Number of bytes in cache line)) - 4.
+
+ For CNXXXX, 128 bytes. */
+ uint32_t associativity : 10; /**< [ 12: 3](RO) Associativity of cache minus 1, therefore a value of 0 indicates
+ an associativity of 1. The associativity does not have to be a
+ power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 31.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 38.
+
+ For CN81XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 15.
+
+ For CN80XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 7. */
+ uint32_t numsets : 15; /**< [ 27: 13](RO) Number of sets in cache minus 1, therefore a value of 0
+ indicates 1 set in the cache. The number of sets does not have
+ to be a power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 7.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 15.
+
+ For CN81XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 1023.
+
+ For CN80XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 1023. */
+ uint32_t wa : 1; /**< [ 28: 28](RO) Indicates whether the selected cache level supports write-allocation.
+ 0 = Write-allocation not supported.
+ 1 = Write-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t ra : 1; /**< [ 29: 29](RO) Indicates whether the selected cache level supports read-allocation.
+ 0 = Read-allocation not supported.
+ 1 = Read-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wb : 1; /**< [ 30: 30](RO) Indicates whether the selected cache level supports write-back.
+ 0 = Write-back not supported.
+ 1 = Write-back supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wt : 1; /**< [ 31: 31](RO) Indicates whether the selected cache level supports write-through.
+ 0 = Write-through not supported.
+ 1 = Write-through supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_ap_ccsidr_el1_s cn88xx; */
+ struct bdk_ap_ccsidr_el1_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wt : 1; /**< [ 31: 31](RO) Indicates whether the selected cache level supports write-through.
+ 0 = Write-through not supported.
+ 1 = Write-through supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wb : 1; /**< [ 30: 30](RO) Indicates whether the selected cache level supports write-back.
+ 0 = Write-back not supported.
+ 1 = Write-back supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t ra : 1; /**< [ 29: 29](RO) Indicates whether the selected cache level supports read-allocation.
+ 0 = Read-allocation not supported.
+ 1 = Read-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wa : 1; /**< [ 28: 28](RO) Indicates whether the selected cache level supports write-allocation.
+ 0 = Write-allocation not supported.
+ 1 = Write-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t numsets : 15; /**< [ 27: 13](RO) Number of sets in cache minus 1, therefore a value of 0
+ indicates 1 set in the cache. The number of sets does not have
+ to be a power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 7.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 15.
+
+ For CN83XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 4095. */
+ uint32_t associativity : 10; /**< [ 12: 3](RO) Associativity of cache minus 1, therefore a value of 0 indicates
+ an associativity of 1. The associativity does not have to be a
+ power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 31.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 38.
+
+ For CN83XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 15. */
+ uint32_t linesize : 3; /**< [ 2: 0](RO) Cache-line size, in (Log2(Number of bytes in cache line)) - 4.
+
+ For CNXXXX, 128 bytes. */
+#else /* Word 0 - Little Endian */
+ uint32_t linesize : 3; /**< [ 2: 0](RO) Cache-line size, in (Log2(Number of bytes in cache line)) - 4.
+
+ For CNXXXX, 128 bytes. */
+ uint32_t associativity : 10; /**< [ 12: 3](RO) Associativity of cache minus 1, therefore a value of 0 indicates
+ an associativity of 1. The associativity does not have to be a
+ power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 31.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 38.
+
+ For CN83XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 15. */
+ uint32_t numsets : 15; /**< [ 27: 13](RO) Number of sets in cache minus 1, therefore a value of 0
+ indicates 1 set in the cache. The number of sets does not have
+ to be a power of 2.
+
+ For CNXXXX L1D (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 0), is 7.
+
+ For CNXXXX L1I (AP_CSSELR_EL1[LEVEL] = 0x0, AP_CSSELR_EL1[IND] = 1), is 15.
+
+ For CN83XX L2 (AP_CSSELR_EL1[LEVEL] = 0x1, AP_CSSELR_EL1[IND] = 0), is 4095. */
+ uint32_t wa : 1; /**< [ 28: 28](RO) Indicates whether the selected cache level supports write-allocation.
+ 0 = Write-allocation not supported.
+ 1 = Write-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t ra : 1; /**< [ 29: 29](RO) Indicates whether the selected cache level supports read-allocation.
+ 0 = Read-allocation not supported.
+ 1 = Read-allocation supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wb : 1; /**< [ 30: 30](RO) Indicates whether the selected cache level supports write-back.
+ 0 = Write-back not supported.
+ 1 = Write-back supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+ uint32_t wt : 1; /**< [ 31: 31](RO) Indicates whether the selected cache level supports write-through.
+ 0 = Write-through not supported.
+ 1 = Write-through supported.
+
+ For CNXXXX does not apply as hardware managed coherence. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_ap_ccsidr_el1 bdk_ap_ccsidr_el1_t;
+
+#define BDK_AP_CCSIDR_EL1 BDK_AP_CCSIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_CCSIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CCSIDR_EL1_FUNC(void)
+{
+ return 0x30100000000ll;
+}
+
+#define typedef_BDK_AP_CCSIDR_EL1 bdk_ap_ccsidr_el1_t
+#define bustype_BDK_AP_CCSIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CCSIDR_EL1 "AP_CCSIDR_EL1"
+#define busnum_BDK_AP_CCSIDR_EL1 0
+#define arguments_BDK_AP_CCSIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_clidr_el1
+ *
+ * AP Cache Level ID Register
+ * This register identifies the type of cache, or caches, implemented at each level, up
+ * to a maximum of seven levels. Also identifies the Level of Coherence (LoC) and Level
+ * of Unification (LoU) for the cache hierarchy.
+ */
+union bdk_ap_clidr_el1
+{
+ uint32_t u;
+ struct bdk_ap_clidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t louu : 3; /**< [ 29: 27](RO) Level of unification uniprocessor for the cache hierarchy. */
+ uint32_t loc : 3; /**< [ 26: 24](RO) Level of coherence for the cache hierarchy.
+
+ For CN88XX, 0x1 for pass 1, 0x0 for pass 2 and subsequent chips. */
+ uint32_t louis : 3; /**< [ 23: 21](RO) Level of unification inner shareable for the cache hierarchy. */
+ uint32_t ctype7 : 3; /**< [ 20: 18](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ All other values are reserved.
+
+ For CNXXXX, no L7 cache. */
+ uint32_t ctype6 : 3; /**< [ 17: 15](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L6 cache. */
+ uint32_t ctype5 : 3; /**< [ 14: 12](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L5 cache. */
+ uint32_t ctype4 : 3; /**< [ 11: 9](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L4 cache. */
+ uint32_t ctype3 : 3; /**< [ 8: 6](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L3 cache. */
+ uint32_t ctype2 : 3; /**< [ 5: 3](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, L2 cache is unified. */
+ uint32_t ctype1 : 3; /**< [ 2: 0](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, L1 Dcache and Icache are independent. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctype1 : 3; /**< [ 2: 0](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, L1 Dcache and Icache are independent. */
+ uint32_t ctype2 : 3; /**< [ 5: 3](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, L2 cache is unified. */
+ uint32_t ctype3 : 3; /**< [ 8: 6](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L3 cache. */
+ uint32_t ctype4 : 3; /**< [ 11: 9](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L4 cache. */
+ uint32_t ctype5 : 3; /**< [ 14: 12](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L5 cache. */
+ uint32_t ctype6 : 3; /**< [ 17: 15](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L6 cache. */
+ uint32_t ctype7 : 3; /**< [ 20: 18](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ All other values are reserved.
+
+ For CNXXXX, no L7 cache. */
+ uint32_t louis : 3; /**< [ 23: 21](RO) Level of unification inner shareable for the cache hierarchy. */
+ uint32_t loc : 3; /**< [ 26: 24](RO) Level of coherence for the cache hierarchy.
+
+ For CN88XX, 0x1 for pass 1, 0x0 for pass 2 and subsequent chips. */
+ uint32_t louu : 3; /**< [ 29: 27](RO) Level of unification uniprocessor for the cache hierarchy. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_clidr_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t louu : 3; /**< [ 29: 27](RO) Level of unification uniprocessor for the cache hierarchy. */
+ uint32_t loc : 3; /**< [ 26: 24](RO) Level of coherence for the cache hierarchy.
+
+ For CNXXXX, 0x0. */
+ uint32_t louis : 3; /**< [ 23: 21](RO) Level of unification inner shareable for the cache hierarchy. */
+ uint32_t ctype7 : 3; /**< [ 20: 18](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, no L7 cache. */
+ uint32_t ctype6 : 3; /**< [ 17: 15](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, no L6 cache. */
+ uint32_t ctype5 : 3; /**< [ 14: 12](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, no L5 cache. */
+ uint32_t ctype4 : 3; /**< [ 11: 9](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, no L4 cache. */
+ uint32_t ctype3 : 3; /**< [ 8: 6](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, L3 cache is unified. */
+ uint32_t ctype2 : 3; /**< [ 5: 3](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, L2 cache is an instruction cache only.
+
+ Internal:
+ L2 can be NONE if fused-off. */
+ uint32_t ctype1 : 3; /**< [ 2: 0](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, L1 Dcache and Icache are independent. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctype1 : 3; /**< [ 2: 0](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, L1 Dcache and Icache are independent. */
+ uint32_t ctype2 : 3; /**< [ 5: 3](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, L2 cache is an instruction cache only.
+
+ Internal:
+ L2 can be NONE if fused-off. */
+ uint32_t ctype3 : 3; /**< [ 8: 6](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, L3 cache is unified. */
+ uint32_t ctype4 : 3; /**< [ 11: 9](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, no L4 cache. */
+ uint32_t ctype5 : 3; /**< [ 14: 12](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, no L5 cache. */
+ uint32_t ctype6 : 3; /**< [ 17: 15](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, no L6 cache. */
+ uint32_t ctype7 : 3; /**< [ 20: 18](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+
+ Enumerated by AP_CLIDR_EL1_CTYPE_E.
+
+ For CNXXXX, no L7 cache. */
+ uint32_t louis : 3; /**< [ 23: 21](RO) Level of unification inner shareable for the cache hierarchy. */
+ uint32_t loc : 3; /**< [ 26: 24](RO) Level of coherence for the cache hierarchy.
+
+ For CNXXXX, 0x0. */
+ uint32_t louu : 3; /**< [ 29: 27](RO) Level of unification uniprocessor for the cache hierarchy. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_ap_clidr_el1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t louu : 3; /**< [ 29: 27](RO) Level of unification uniprocessor for the cache hierarchy. */
+ uint32_t loc : 3; /**< [ 26: 24](RO) Level of coherence for the cache hierarchy.
+
+ For CNXXXX, 0x0. */
+ uint32_t louis : 3; /**< [ 23: 21](RO) Level of unification inner shareable for the cache hierarchy. */
+ uint32_t ctype7 : 3; /**< [ 20: 18](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ All other values are reserved.
+
+ For CNXXXX, no L7 cache. */
+ uint32_t ctype6 : 3; /**< [ 17: 15](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L6 cache. */
+ uint32_t ctype5 : 3; /**< [ 14: 12](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L5 cache. */
+ uint32_t ctype4 : 3; /**< [ 11: 9](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L4 cache. */
+ uint32_t ctype3 : 3; /**< [ 8: 6](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L3 cache. */
+ uint32_t ctype2 : 3; /**< [ 5: 3](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, L2 cache is unified. */
+ uint32_t ctype1 : 3; /**< [ 2: 0](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, L1 Dcache and Icache are independent. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctype1 : 3; /**< [ 2: 0](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, L1 Dcache and Icache are independent. */
+ uint32_t ctype2 : 3; /**< [ 5: 3](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, L2 cache is unified. */
+ uint32_t ctype3 : 3; /**< [ 8: 6](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L3 cache. */
+ uint32_t ctype4 : 3; /**< [ 11: 9](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L4 cache. */
+ uint32_t ctype5 : 3; /**< [ 14: 12](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L5 cache. */
+ uint32_t ctype6 : 3; /**< [ 17: 15](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ For CNXXXX, no L6 cache. */
+ uint32_t ctype7 : 3; /**< [ 20: 18](RO) Cache type fields. Indicate the type of cache implemented at
+ each level, from Level 1 up to a maximum of seven levels of
+ cache hierarchy.
+ 0x0 = No cache.
+ 0x1 = Instruction cache only.
+ 0x2 = Data cache only.
+ 0x3 = Separate instruction and data caches.
+ 0x4 = Unified cache.
+
+ All other values are reserved.
+
+ For CNXXXX, no L7 cache. */
+ uint32_t louis : 3; /**< [ 23: 21](RO) Level of unification inner shareable for the cache hierarchy. */
+ uint32_t loc : 3; /**< [ 26: 24](RO) Level of coherence for the cache hierarchy.
+
+ For CNXXXX, 0x0. */
+ uint32_t louu : 3; /**< [ 29: 27](RO) Level of unification uniprocessor for the cache hierarchy. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_ap_clidr_el1_s cn88xx; */
+ /* struct bdk_ap_clidr_el1_cn81xx cn83xx; */
+};
+typedef union bdk_ap_clidr_el1 bdk_ap_clidr_el1_t;
+
+#define BDK_AP_CLIDR_EL1 BDK_AP_CLIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_CLIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CLIDR_EL1_FUNC(void)
+{
+ return 0x30100000100ll;
+}
+
+#define typedef_BDK_AP_CLIDR_EL1 bdk_ap_clidr_el1_t
+#define bustype_BDK_AP_CLIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CLIDR_EL1 "AP_CLIDR_EL1"
+#define busnum_BDK_AP_CLIDR_EL1 0
+#define arguments_BDK_AP_CLIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntfrq_el0
+ *
+ * AP Counter-timer Frequency Register
+ * Holds the clock frequency of the system counter.
+ */
+union bdk_ap_cntfrq_el0
+{
+ uint32_t u;
+ struct bdk_ap_cntfrq_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Clock frequency. Indicates the system counter clock frequency,
+ in Hz. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Clock frequency. Indicates the system counter clock frequency,
+ in Hz. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntfrq_el0_s cn; */
+};
+typedef union bdk_ap_cntfrq_el0 bdk_ap_cntfrq_el0_t;
+
+#define BDK_AP_CNTFRQ_EL0 BDK_AP_CNTFRQ_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTFRQ_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTFRQ_EL0_FUNC(void)
+{
+ return 0x3030e000000ll;
+}
+
+#define typedef_BDK_AP_CNTFRQ_EL0 bdk_ap_cntfrq_el0_t
+#define bustype_BDK_AP_CNTFRQ_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTFRQ_EL0 "AP_CNTFRQ_EL0"
+#define busnum_BDK_AP_CNTFRQ_EL0 0
+#define arguments_BDK_AP_CNTFRQ_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cnthctl_el2
+ *
+ * AP Counter-timer Hypervisor Control Non-E2H Register
+ * Controls the generation of an event stream from the physical
+ * counter, and access from nonsecure EL1 to the physical
+ * counter and the nonsecure EL1 physical timer.
+ *
+ * This register is at the same select as AP_CNTHCTL_EL2_E2H and is used when E2H=0.
+ */
+union bdk_ap_cnthctl_el2
+{
+ uint32_t u;
+ struct bdk_ap_cnthctl_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t evnti : 4; /**< [ 7: 4](R/W) Selects which bit (0 to 15) of the corresponding counter
+ register ( AP_CNTPCT_EL0 or AP_CNTVCT_EL0) is the trigger for the
+ event stream generated from that counter, when that stream is
+ enabled. */
+ uint32_t evntdir : 1; /**< [ 3: 3](R/W) Controls which transition of the counter register ( AP_CNTPCT_EL0
+ or AP_CNTVCT_EL0) trigger bit, defined by EVNTI, generates an
+ event when the event stream is enabled:
+ 0 = A 0 to 1 transition of the trigger bit triggers an event.
+ 1 = A 1 to 0 transition of the trigger bit triggers an event. */
+ uint32_t evnten : 1; /**< [ 2: 2](R/W) Enables the generation of an event stream from the
+ corresponding counter:
+ 0 = Disables the event stream.
+ 1 = Enables the event stream. */
+ uint32_t el1pcen : 1; /**< [ 1: 1](R/W) Controls whether the physical timer registers are accessible
+ from nonsecure EL1 and EL0 modes:
+ If EL3 is implemented and EL2 is not implemented, this bit is
+ treated as if it is 1 for all purposes other than reading the
+ register.
+ 0 = The AP_CNTP_CVAL_EL0, AP_CNTP_TVAL_EL0, and AP_CNTP_CTL_EL0 registers
+ are not accessible from nonsecure EL1 and EL0 modes.
+ 1 = The AP_CNTP_CVAL_EL0, AP_CNTP_TVAL_EL0, and AP_CNTP_CTL_EL0 registers
+ are accessible from nonsecure EL1 and EL0 modes. */
+ uint32_t el1pcten : 1; /**< [ 0: 0](R/W) Controls whether the physical counter, AP_CNTPCT_EL0, is
+ accessible from nonsecure EL1 and EL0 modes:
+ If EL3 is implemented and EL2 is not implemented, this bit is
+ treated as if it is 1 for all purposes other than reading the
+ register.
+ 0 = The AP_CNTPCT_EL0 register is not accessible from nonsecure EL1
+ and EL0 modes.
+ 1 = The AP_CNTPCT_EL0 register is accessible from nonsecure EL1 and
+ EL0 modes. */
+#else /* Word 0 - Little Endian */
+ uint32_t el1pcten : 1; /**< [ 0: 0](R/W) Controls whether the physical counter, AP_CNTPCT_EL0, is
+ accessible from nonsecure EL1 and EL0 modes:
+ If EL3 is implemented and EL2 is not implemented, this bit is
+ treated as if it is 1 for all purposes other than reading the
+ register.
+ 0 = The AP_CNTPCT_EL0 register is not accessible from nonsecure EL1
+ and EL0 modes.
+ 1 = The AP_CNTPCT_EL0 register is accessible from nonsecure EL1 and
+ EL0 modes. */
+ uint32_t el1pcen : 1; /**< [ 1: 1](R/W) Controls whether the physical timer registers are accessible
+ from nonsecure EL1 and EL0 modes:
+ If EL3 is implemented and EL2 is not implemented, this bit is
+ treated as if it is 1 for all purposes other than reading the
+ register.
+ 0 = The AP_CNTP_CVAL_EL0, AP_CNTP_TVAL_EL0, and AP_CNTP_CTL_EL0 registers
+ are not accessible from nonsecure EL1 and EL0 modes.
+ 1 = The AP_CNTP_CVAL_EL0, AP_CNTP_TVAL_EL0, and AP_CNTP_CTL_EL0 registers
+ are accessible from nonsecure EL1 and EL0 modes. */
+ uint32_t evnten : 1; /**< [ 2: 2](R/W) Enables the generation of an event stream from the
+ corresponding counter:
+ 0 = Disables the event stream.
+ 1 = Enables the event stream. */
+ uint32_t evntdir : 1; /**< [ 3: 3](R/W) Controls which transition of the counter register ( AP_CNTPCT_EL0
+ or AP_CNTVCT_EL0) trigger bit, defined by EVNTI, generates an
+ event when the event stream is enabled:
+ 0 = A 0 to 1 transition of the trigger bit triggers an event.
+ 1 = A 1 to 0 transition of the trigger bit triggers an event. */
+ uint32_t evnti : 4; /**< [ 7: 4](R/W) Selects which bit (0 to 15) of the corresponding counter
+ register ( AP_CNTPCT_EL0 or AP_CNTVCT_EL0) is the trigger for the
+ event stream generated from that counter, when that stream is
+ enabled. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cnthctl_el2_s cn; */
+};
+typedef union bdk_ap_cnthctl_el2 bdk_ap_cnthctl_el2_t;
+
+#define BDK_AP_CNTHCTL_EL2 BDK_AP_CNTHCTL_EL2_FUNC()
+static inline uint64_t BDK_AP_CNTHCTL_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTHCTL_EL2_FUNC(void)
+{
+ return 0x3040e010000ll;
+}
+
+#define typedef_BDK_AP_CNTHCTL_EL2 bdk_ap_cnthctl_el2_t
+#define bustype_BDK_AP_CNTHCTL_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTHCTL_EL2 "AP_CNTHCTL_EL2"
+#define busnum_BDK_AP_CNTHCTL_EL2 0
+#define arguments_BDK_AP_CNTHCTL_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cnthctl_el2_e2h
+ *
+ * AP Counter-timer Hypervisor Control E2H Register
+ * This register is at the same select as AP_CNTHCTL_EL2 and is used when E2H=1.
+ */
+union bdk_ap_cnthctl_el2_e2h
+{
+ uint32_t u;
+ struct bdk_ap_cnthctl_el2_e2h_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t el1pcen : 1; /**< [ 11: 11](R/W) Controls whether physical timer register accessing instuctions
+ are accessible from nonsecure EL1 and EL0:
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 0 and the instructions are executed at
+ nonsecure EL1, or nonsecure EL0 unless trapped to EL1 as a
+ result of controls in the AP_CNTKCTL_EL1
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=0 AP_CNTP_TVAL_EL0
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=1 AP_CNTP_CTL_EL0
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=2 AP_CNTP_CVAL_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==1.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t el1pten : 1; /**< [ 10: 10](R/W) Controls whether the physical counter is accessible from nonsecure
+ EL1 and EL0.
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 0 and the instructions are executed at nonsecure
+ EL1, or nonsecure EL0 unless trapped to EL1 as a result of controls
+ in AP_CNTKCTL_EL1.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=1 AP_CNTPCT_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE] == 1.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t el0pten : 1; /**< [ 9: 9](R/W) Controls whether the physical timer register accessing instructions are
+ accessible from nonsecure EL0 when AP_HCR_EL2[TGE]==1.
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 1 and the instructions are executed at nonsecure
+ EL0.
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=0 AP_CNTP_TVAL_EL0
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=1 AP_CNTP_CTL_EL0
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=2 AP_CNTP_CVAL_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==0.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t el0vten : 1; /**< [ 8: 8](R/W) Controls whether the virtual timer register accessing instructions are
+ accessible from nonsecure EL0 when AP_HCR_EL2[TGE]==1.
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 1 and the instructions are executed at nonsecure
+ EL0.
+ Op0=3, op1=3, CRn=14, CRm=3, Op2=0 AP_CNTV_TVAL_EL0
+ Op0=3, op1=3, CRn=14, CRm=3, Op2=1 AP_CNTV_CTL_EL0
+ Op0=3, op1=3, CRn=14, CRm=3, Op2=2 AP_CNTV_CVAL_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==0.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t evnti : 4; /**< [ 7: 4](R/W) Selects which bit (0 to 15) of the corresponding counter
+ register ( AP_CNTPCT_EL0 or AP_CNTVCT_EL0) is the trigger for the
+ event stream generated from that counter, when that stream is
+ enabled. */
+ uint32_t evntdir : 1; /**< [ 3: 3](R/W) Controls which transition of the counter register ( AP_CNTPCT_EL0
+ or AP_CNTVCT_EL0) trigger bit, defined by EVNTI, generates an
+ event when the event stream is enabled:
+ 0 = A 0 to 1 transition of the trigger bit triggers an event.
+ 1 = A 1 to 0 transition of the trigger bit triggers an event. */
+ uint32_t evnten : 1; /**< [ 2: 2](R/W) Enables the generation of an event stream from the
+ corresponding counter:
+ 0 = Disables the event stream.
+ 1 = Enables the event stream. */
+ uint32_t el0vcten : 1; /**< [ 1: 1](R/W) Controls whether the virtual counter registers are accessible
+ from nonsecure EL1 and EL0 when AP_HCR_EL2[TGE]==1:
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 1 and the instructions are executed at nonsecure
+ EL0.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=2 AP_CNTVCT_EL0
+ In addition, if EL0PCTEN == 0, then the following System Register
+ accessing instructions in AARCH64, and their equivalent instructions
+ in AARCH32, are trapped to EL2 when executed at nonsecure EL0 when
+ AP_HCR_EL2[TGE]==1.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=0 AP_CNTFRQ_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==0.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t el0pcten : 1; /**< [ 0: 0](R/W) Controls whether physical counter register accessing instructions
+ are accessible from nonsecure EL0 when AP_HCR_EL2[TGE]==1:
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 1 and the instructions are executed at nonsecure
+ EL0.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=1 AP_CNTPCT_EL0
+ In addition, if EL0PCTEN == 0, then the following System Register
+ accessing instructions in AARCH64, and their equivalent instructions
+ in AARCH32, are trapped to EL2 when executed at nonsecure EL0 when
+ AP_HCR_EL2[TGE]==1.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=0 AP_CNTFRQ_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==0.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+#else /* Word 0 - Little Endian */
+ uint32_t el0pcten : 1; /**< [ 0: 0](R/W) Controls whether physical counter register accessing instructions
+ are accessible from nonsecure EL0 when AP_HCR_EL2[TGE]==1:
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 1 and the instructions are executed at nonsecure
+ EL0.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=1 AP_CNTPCT_EL0
+ In addition, if EL0PCTEN == 0, then the following System Register
+ accessing instructions in AARCH64, and their equivalent instructions
+ in AARCH32, are trapped to EL2 when executed at nonsecure EL0 when
+ AP_HCR_EL2[TGE]==1.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=0 AP_CNTFRQ_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==0.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t el0vcten : 1; /**< [ 1: 1](R/W) Controls whether the virtual counter registers are accessible
+ from nonsecure EL1 and EL0 when AP_HCR_EL2[TGE]==1:
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 1 and the instructions are executed at nonsecure
+ EL0.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=2 AP_CNTVCT_EL0
+ In addition, if EL0PCTEN == 0, then the following System Register
+ accessing instructions in AARCH64, and their equivalent instructions
+ in AARCH32, are trapped to EL2 when executed at nonsecure EL0 when
+ AP_HCR_EL2[TGE]==1.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=0 AP_CNTFRQ_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==0.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t evnten : 1; /**< [ 2: 2](R/W) Enables the generation of an event stream from the
+ corresponding counter:
+ 0 = Disables the event stream.
+ 1 = Enables the event stream. */
+ uint32_t evntdir : 1; /**< [ 3: 3](R/W) Controls which transition of the counter register ( AP_CNTPCT_EL0
+ or AP_CNTVCT_EL0) trigger bit, defined by EVNTI, generates an
+ event when the event stream is enabled:
+ 0 = A 0 to 1 transition of the trigger bit triggers an event.
+ 1 = A 1 to 0 transition of the trigger bit triggers an event. */
+ uint32_t evnti : 4; /**< [ 7: 4](R/W) Selects which bit (0 to 15) of the corresponding counter
+ register ( AP_CNTPCT_EL0 or AP_CNTVCT_EL0) is the trigger for the
+ event stream generated from that counter, when that stream is
+ enabled. */
+ uint32_t el0vten : 1; /**< [ 8: 8](R/W) Controls whether the virtual timer register accessing instructions are
+ accessible from nonsecure EL0 when AP_HCR_EL2[TGE]==1.
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 1 and the instructions are executed at nonsecure
+ EL0.
+ Op0=3, op1=3, CRn=14, CRm=3, Op2=0 AP_CNTV_TVAL_EL0
+ Op0=3, op1=3, CRn=14, CRm=3, Op2=1 AP_CNTV_CTL_EL0
+ Op0=3, op1=3, CRn=14, CRm=3, Op2=2 AP_CNTV_CVAL_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==0.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t el0pten : 1; /**< [ 9: 9](R/W) Controls whether the physical timer register accessing instructions are
+ accessible from nonsecure EL0 when AP_HCR_EL2[TGE]==1.
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 1 and the instructions are executed at nonsecure
+ EL0.
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=0 AP_CNTP_TVAL_EL0
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=1 AP_CNTP_CTL_EL0
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=2 AP_CNTP_CVAL_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==0.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t el1pten : 1; /**< [ 10: 10](R/W) Controls whether the physical counter is accessible from nonsecure
+ EL1 and EL0.
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 0 and the instructions are executed at nonsecure
+ EL1, or nonsecure EL0 unless trapped to EL1 as a result of controls
+ in AP_CNTKCTL_EL1.
+ Op0=3, op1=3, CRn=14, CRm=0, Op2=1 AP_CNTPCT_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE] == 1.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t el1pcen : 1; /**< [ 11: 11](R/W) Controls whether physical timer register accessing instuctions
+ are accessible from nonsecure EL1 and EL0:
+ 0 = The following system register accessing instructions in AARCH64,
+ and their equivalent instructions in AARCH32, are trapped to EL2
+ when AP_HCR_EL2[TGE] == 0 and the instructions are executed at
+ nonsecure EL1, or nonsecure EL0 unless trapped to EL1 as a
+ result of controls in the AP_CNTKCTL_EL1
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=0 AP_CNTP_TVAL_EL0
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=1 AP_CNTP_CTL_EL0
+ Op0=3, op1=3, CRn=14, CRm=2, Op2=2 AP_CNTP_CVAL_EL0
+ This bit does not cause any instructions to be trapped to EL2
+ when AP_HCR_EL2[TGE]==1.
+
+ 1 = This bit does not cause any instructions to be trapped to EL2. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cnthctl_el2_e2h_s cn; */
+};
+typedef union bdk_ap_cnthctl_el2_e2h bdk_ap_cnthctl_el2_e2h_t;
+
+#define BDK_AP_CNTHCTL_EL2_E2H BDK_AP_CNTHCTL_EL2_E2H_FUNC()
+static inline uint64_t BDK_AP_CNTHCTL_EL2_E2H_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTHCTL_EL2_E2H_FUNC(void)
+{
+ return 0x3040e010010ll;
+}
+
+#define typedef_BDK_AP_CNTHCTL_EL2_E2H bdk_ap_cnthctl_el2_e2h_t
+#define bustype_BDK_AP_CNTHCTL_EL2_E2H BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTHCTL_EL2_E2H "AP_CNTHCTL_EL2_E2H"
+#define busnum_BDK_AP_CNTHCTL_EL2_E2H 0
+#define arguments_BDK_AP_CNTHCTL_EL2_E2H -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cnthp_ctl_el2
+ *
+ * AP Counter-timer Hypervisor Physical Timer Control Register
+ * Control register for the EL2 physical timer.
+ */
+union bdk_ap_cnthp_ctl_el2
+{
+ uint32_t u;
+ struct bdk_ap_cnthp_ctl_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cnthp_ctl_el2_s cn; */
+};
+typedef union bdk_ap_cnthp_ctl_el2 bdk_ap_cnthp_ctl_el2_t;
+
+#define BDK_AP_CNTHP_CTL_EL2 BDK_AP_CNTHP_CTL_EL2_FUNC()
+static inline uint64_t BDK_AP_CNTHP_CTL_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTHP_CTL_EL2_FUNC(void)
+{
+ return 0x3040e020100ll;
+}
+
+#define typedef_BDK_AP_CNTHP_CTL_EL2 bdk_ap_cnthp_ctl_el2_t
+#define bustype_BDK_AP_CNTHP_CTL_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTHP_CTL_EL2 "AP_CNTHP_CTL_EL2"
+#define busnum_BDK_AP_CNTHP_CTL_EL2 0
+#define arguments_BDK_AP_CNTHP_CTL_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cnthp_cval_el2
+ *
+ * AP Counter-timer Hypervisor Physical Timer Compare Value Register
+ * Holds the compare value for the EL2 physical timer.
+ */
+union bdk_ap_cnthp_cval_el2
+{
+ uint64_t u;
+ struct bdk_ap_cnthp_cval_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) EL2 physical timer compare value. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) EL2 physical timer compare value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cnthp_cval_el2_s cn; */
+};
+typedef union bdk_ap_cnthp_cval_el2 bdk_ap_cnthp_cval_el2_t;
+
+#define BDK_AP_CNTHP_CVAL_EL2 BDK_AP_CNTHP_CVAL_EL2_FUNC()
+static inline uint64_t BDK_AP_CNTHP_CVAL_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTHP_CVAL_EL2_FUNC(void)
+{
+ return 0x3040e020200ll;
+}
+
+#define typedef_BDK_AP_CNTHP_CVAL_EL2 bdk_ap_cnthp_cval_el2_t
+#define bustype_BDK_AP_CNTHP_CVAL_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTHP_CVAL_EL2 "AP_CNTHP_CVAL_EL2"
+#define busnum_BDK_AP_CNTHP_CVAL_EL2 0
+#define arguments_BDK_AP_CNTHP_CVAL_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cnthp_tval_el2
+ *
+ * AP Counter-timer Hypervisor Physical Timer Value Register
+ * Holds the timer value for the EL2 physical timer.
+ */
+union bdk_ap_cnthp_tval_el2
+{
+ uint32_t u;
+ struct bdk_ap_cnthp_tval_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) EL2 physical timer value. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) EL2 physical timer value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cnthp_tval_el2_s cn; */
+};
+typedef union bdk_ap_cnthp_tval_el2 bdk_ap_cnthp_tval_el2_t;
+
+#define BDK_AP_CNTHP_TVAL_EL2 BDK_AP_CNTHP_TVAL_EL2_FUNC()
+static inline uint64_t BDK_AP_CNTHP_TVAL_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTHP_TVAL_EL2_FUNC(void)
+{
+ return 0x3040e020000ll;
+}
+
+#define typedef_BDK_AP_CNTHP_TVAL_EL2 bdk_ap_cnthp_tval_el2_t
+#define bustype_BDK_AP_CNTHP_TVAL_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTHP_TVAL_EL2 "AP_CNTHP_TVAL_EL2"
+#define busnum_BDK_AP_CNTHP_TVAL_EL2 0
+#define arguments_BDK_AP_CNTHP_TVAL_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cnthv_ctl_el2
+ *
+ * AP v8.1 Counter-timer Hypervisor Virtual Timer Control Register
+ * v8.1 Control register for the EL2 virtual timer.
+ */
+union bdk_ap_cnthv_ctl_el2
+{
+ uint32_t u;
+ struct bdk_ap_cnthv_ctl_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cnthv_ctl_el2_s cn; */
+};
+typedef union bdk_ap_cnthv_ctl_el2 bdk_ap_cnthv_ctl_el2_t;
+
+#define BDK_AP_CNTHV_CTL_EL2 BDK_AP_CNTHV_CTL_EL2_FUNC()
+static inline uint64_t BDK_AP_CNTHV_CTL_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTHV_CTL_EL2_FUNC(void)
+{
+ return 0x3040e030100ll;
+}
+
+#define typedef_BDK_AP_CNTHV_CTL_EL2 bdk_ap_cnthv_ctl_el2_t
+#define bustype_BDK_AP_CNTHV_CTL_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTHV_CTL_EL2 "AP_CNTHV_CTL_EL2"
+#define busnum_BDK_AP_CNTHV_CTL_EL2 0
+#define arguments_BDK_AP_CNTHV_CTL_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cnthv_cval_el2
+ *
+ * AP v8.1 Counter-timer Hypervisor Virtual Timer Compare Value Register
+ * v8.1 Holds the compare value for the EL2 virtual timer.
+ */
+union bdk_ap_cnthv_cval_el2
+{
+ uint64_t u;
+ struct bdk_ap_cnthv_cval_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) EL2 physical timer compare value. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) EL2 physical timer compare value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cnthv_cval_el2_s cn; */
+};
+typedef union bdk_ap_cnthv_cval_el2 bdk_ap_cnthv_cval_el2_t;
+
+#define BDK_AP_CNTHV_CVAL_EL2 BDK_AP_CNTHV_CVAL_EL2_FUNC()
+static inline uint64_t BDK_AP_CNTHV_CVAL_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTHV_CVAL_EL2_FUNC(void)
+{
+ return 0x3040e030200ll;
+}
+
+#define typedef_BDK_AP_CNTHV_CVAL_EL2 bdk_ap_cnthv_cval_el2_t
+#define bustype_BDK_AP_CNTHV_CVAL_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTHV_CVAL_EL2 "AP_CNTHV_CVAL_EL2"
+#define busnum_BDK_AP_CNTHV_CVAL_EL2 0
+#define arguments_BDK_AP_CNTHV_CVAL_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cnthv_tval_el2
+ *
+ * AP v8.1 Counter-timer Hypervisor Virtual Timer Value Register
+ * v8.1 Holds the timer value for the EL2 virtual timer.
+ */
+union bdk_ap_cnthv_tval_el2
+{
+ uint32_t u;
+ struct bdk_ap_cnthv_tval_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) EL2 virtual timer value. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) EL2 virtual timer value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cnthv_tval_el2_s cn; */
+};
+typedef union bdk_ap_cnthv_tval_el2 bdk_ap_cnthv_tval_el2_t;
+
+#define BDK_AP_CNTHV_TVAL_EL2 BDK_AP_CNTHV_TVAL_EL2_FUNC()
+static inline uint64_t BDK_AP_CNTHV_TVAL_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTHV_TVAL_EL2_FUNC(void)
+{
+ return 0x3040e030000ll;
+}
+
+#define typedef_BDK_AP_CNTHV_TVAL_EL2 bdk_ap_cnthv_tval_el2_t
+#define bustype_BDK_AP_CNTHV_TVAL_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTHV_TVAL_EL2 "AP_CNTHV_TVAL_EL2"
+#define busnum_BDK_AP_CNTHV_TVAL_EL2 0
+#define arguments_BDK_AP_CNTHV_TVAL_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntkctl_el1
+ *
+ * AP Counter-timer Kernel Control Register
+ * Controls the generation of an event stream from the virtual
+ * counter, and access from EL0 to the physical counter, virtual
+ * counter, EL1 physical timers, and the virtual timer.
+ */
+union bdk_ap_cntkctl_el1
+{
+ uint32_t u;
+ struct bdk_ap_cntkctl_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t el0pten : 1; /**< [ 9: 9](R/W) Controls whether the physical timer registers are accessible
+ from EL0 modes:
+ 0 = The AP_CNTP_CVAL_EL0, AP_CNTP_CTL_EL0, and AP_CNTP_TVAL_EL0 registers
+ are not accessible from EL0.
+ 1 = The AP_CNTP_CVAL_EL0, AP_CNTP_CTL_EL0, and AP_CNTP_TVAL_EL0 registers
+ are accessible from EL0. */
+ uint32_t el0vten : 1; /**< [ 8: 8](R/W) Controls whether the virtual timer registers are accessible
+ from EL0 modes:
+ 0 = The AP_CNTV_CVAL_EL0, AP_CNTV_CTL_EL0, and AP_CNTV_TVAL_EL0 registers
+ are not accessible from EL0.
+ 1 = The AP_CNTV_CVAL_EL0, AP_CNTV_CTL_EL0, and AP_CNTV_TVAL_EL0 registers
+ are accessible from EL0. */
+ uint32_t evnti : 4; /**< [ 7: 4](R/W) Selects which bit (0 to 15) of the corresponding counter
+ register ( AP_CNTPCT_EL0 or AP_CNTVCT_EL0) is the trigger for the
+ event stream generated from that counter, when that stream is
+ enabled. */
+ uint32_t evntdir : 1; /**< [ 3: 3](R/W) Controls which transition of the counter register ( AP_CNTPCT_EL0
+ or AP_CNTVCT_EL0) trigger bit, defined by EVNTI, generates an
+ event when the event stream is enabled:
+ 0 = A 0 to 1 transition of the trigger bit triggers an event.
+ 1 = A 1 to 0 transition of the trigger bit triggers an event. */
+ uint32_t evnten : 1; /**< [ 2: 2](R/W) Enables the generation of an event stream from the
+ corresponding counter:
+ 0 = Disables the event stream.
+ 1 = Enables the event stream. */
+ uint32_t el0vcten : 1; /**< [ 1: 1](R/W) Controls whether the virtual counter, AP_CNTVCT_EL0, and the
+ frequency register AP_CNTFRQ_EL0, are accessible from EL0 modes:
+ 0 = AP_CNTVCT_EL0 is not accessible from EL0. If EL0PCTEN is set to
+ 0, AP_CNTFRQ_EL0 is not accessible from EL0.
+ 1 = AP_CNTVCT_EL0 and AP_CNTFRQ_EL0 are accessible from EL0. */
+ uint32_t el0pcten : 1; /**< [ 0: 0](R/W) Controls whether the physical counter, AP_CNTPCT_EL0, and the
+ frequency register AP_CNTFRQ_EL0, are accessible from EL0 modes:
+ 0 = AP_CNTPCT_EL0 is not accessible from EL0 modes. If EL0VCTEN is
+ set to 0, AP_CNTFRQ_EL0 is not accessible from EL0.
+ 1 = AP_CNTPCT_EL0 and AP_CNTFRQ_EL0 are accessible from EL0. */
+#else /* Word 0 - Little Endian */
+ uint32_t el0pcten : 1; /**< [ 0: 0](R/W) Controls whether the physical counter, AP_CNTPCT_EL0, and the
+ frequency register AP_CNTFRQ_EL0, are accessible from EL0 modes:
+ 0 = AP_CNTPCT_EL0 is not accessible from EL0 modes. If EL0VCTEN is
+ set to 0, AP_CNTFRQ_EL0 is not accessible from EL0.
+ 1 = AP_CNTPCT_EL0 and AP_CNTFRQ_EL0 are accessible from EL0. */
+ uint32_t el0vcten : 1; /**< [ 1: 1](R/W) Controls whether the virtual counter, AP_CNTVCT_EL0, and the
+ frequency register AP_CNTFRQ_EL0, are accessible from EL0 modes:
+ 0 = AP_CNTVCT_EL0 is not accessible from EL0. If EL0PCTEN is set to
+ 0, AP_CNTFRQ_EL0 is not accessible from EL0.
+ 1 = AP_CNTVCT_EL0 and AP_CNTFRQ_EL0 are accessible from EL0. */
+ uint32_t evnten : 1; /**< [ 2: 2](R/W) Enables the generation of an event stream from the
+ corresponding counter:
+ 0 = Disables the event stream.
+ 1 = Enables the event stream. */
+ uint32_t evntdir : 1; /**< [ 3: 3](R/W) Controls which transition of the counter register ( AP_CNTPCT_EL0
+ or AP_CNTVCT_EL0) trigger bit, defined by EVNTI, generates an
+ event when the event stream is enabled:
+ 0 = A 0 to 1 transition of the trigger bit triggers an event.
+ 1 = A 1 to 0 transition of the trigger bit triggers an event. */
+ uint32_t evnti : 4; /**< [ 7: 4](R/W) Selects which bit (0 to 15) of the corresponding counter
+ register ( AP_CNTPCT_EL0 or AP_CNTVCT_EL0) is the trigger for the
+ event stream generated from that counter, when that stream is
+ enabled. */
+ uint32_t el0vten : 1; /**< [ 8: 8](R/W) Controls whether the virtual timer registers are accessible
+ from EL0 modes:
+ 0 = The AP_CNTV_CVAL_EL0, AP_CNTV_CTL_EL0, and AP_CNTV_TVAL_EL0 registers
+ are not accessible from EL0.
+ 1 = The AP_CNTV_CVAL_EL0, AP_CNTV_CTL_EL0, and AP_CNTV_TVAL_EL0 registers
+ are accessible from EL0. */
+ uint32_t el0pten : 1; /**< [ 9: 9](R/W) Controls whether the physical timer registers are accessible
+ from EL0 modes:
+ 0 = The AP_CNTP_CVAL_EL0, AP_CNTP_CTL_EL0, and AP_CNTP_TVAL_EL0 registers
+ are not accessible from EL0.
+ 1 = The AP_CNTP_CVAL_EL0, AP_CNTP_CTL_EL0, and AP_CNTP_TVAL_EL0 registers
+ are accessible from EL0. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntkctl_el1_s cn; */
+};
+typedef union bdk_ap_cntkctl_el1 bdk_ap_cntkctl_el1_t;
+
+#define BDK_AP_CNTKCTL_EL1 BDK_AP_CNTKCTL_EL1_FUNC()
+static inline uint64_t BDK_AP_CNTKCTL_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTKCTL_EL1_FUNC(void)
+{
+ return 0x3000e010000ll;
+}
+
+#define typedef_BDK_AP_CNTKCTL_EL1 bdk_ap_cntkctl_el1_t
+#define bustype_BDK_AP_CNTKCTL_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTKCTL_EL1 "AP_CNTKCTL_EL1"
+#define busnum_BDK_AP_CNTKCTL_EL1 0
+#define arguments_BDK_AP_CNTKCTL_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntkctl_el12
+ *
+ * AP Counter-timer Kernel Control Register
+ * Alias of AP_CNTKCTL_EL1 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_cntkctl_el12
+{
+ uint32_t u;
+ struct bdk_ap_cntkctl_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntkctl_el12_s cn; */
+};
+typedef union bdk_ap_cntkctl_el12 bdk_ap_cntkctl_el12_t;
+
+#define BDK_AP_CNTKCTL_EL12 BDK_AP_CNTKCTL_EL12_FUNC()
+static inline uint64_t BDK_AP_CNTKCTL_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTKCTL_EL12_FUNC(void)
+{
+ return 0x3050e010000ll;
+}
+
+#define typedef_BDK_AP_CNTKCTL_EL12 bdk_ap_cntkctl_el12_t
+#define bustype_BDK_AP_CNTKCTL_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTKCTL_EL12 "AP_CNTKCTL_EL12"
+#define busnum_BDK_AP_CNTKCTL_EL12 0
+#define arguments_BDK_AP_CNTKCTL_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntp_ctl_el0
+ *
+ * AP Counter-timer Physical Timer Control Register
+ * Control register for the EL1 physical timer.
+ */
+union bdk_ap_cntp_ctl_el0
+{
+ uint32_t u;
+ struct bdk_ap_cntp_ctl_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntp_ctl_el0_s cn; */
+};
+typedef union bdk_ap_cntp_ctl_el0 bdk_ap_cntp_ctl_el0_t;
+
+#define BDK_AP_CNTP_CTL_EL0 BDK_AP_CNTP_CTL_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTP_CTL_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTP_CTL_EL0_FUNC(void)
+{
+ return 0x3030e020100ll;
+}
+
+#define typedef_BDK_AP_CNTP_CTL_EL0 bdk_ap_cntp_ctl_el0_t
+#define bustype_BDK_AP_CNTP_CTL_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTP_CTL_EL0 "AP_CNTP_CTL_EL0"
+#define busnum_BDK_AP_CNTP_CTL_EL0 0
+#define arguments_BDK_AP_CNTP_CTL_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntp_ctl_el02
+ *
+ * AP Counter-timer Physical Timer Control Register
+ * Alias of AP_CNTP_CTL_EL0 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_cntp_ctl_el02
+{
+ uint32_t u;
+ struct bdk_ap_cntp_ctl_el02_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntp_ctl_el02_s cn; */
+};
+typedef union bdk_ap_cntp_ctl_el02 bdk_ap_cntp_ctl_el02_t;
+
+#define BDK_AP_CNTP_CTL_EL02 BDK_AP_CNTP_CTL_EL02_FUNC()
+static inline uint64_t BDK_AP_CNTP_CTL_EL02_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTP_CTL_EL02_FUNC(void)
+{
+ return 0x3050e020100ll;
+}
+
+#define typedef_BDK_AP_CNTP_CTL_EL02 bdk_ap_cntp_ctl_el02_t
+#define bustype_BDK_AP_CNTP_CTL_EL02 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTP_CTL_EL02 "AP_CNTP_CTL_EL02"
+#define busnum_BDK_AP_CNTP_CTL_EL02 0
+#define arguments_BDK_AP_CNTP_CTL_EL02 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntp_cval_el0
+ *
+ * AP Counter-timer Physical Timer Compare Value Register
+ * Holds the compare value for the EL1 physical timer.
+ */
+union bdk_ap_cntp_cval_el0
+{
+ uint64_t u;
+ struct bdk_ap_cntp_cval_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) EL1 physical timer compare value. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) EL1 physical timer compare value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntp_cval_el0_s cn; */
+};
+typedef union bdk_ap_cntp_cval_el0 bdk_ap_cntp_cval_el0_t;
+
+#define BDK_AP_CNTP_CVAL_EL0 BDK_AP_CNTP_CVAL_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTP_CVAL_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTP_CVAL_EL0_FUNC(void)
+{
+ return 0x3030e020200ll;
+}
+
+#define typedef_BDK_AP_CNTP_CVAL_EL0 bdk_ap_cntp_cval_el0_t
+#define bustype_BDK_AP_CNTP_CVAL_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTP_CVAL_EL0 "AP_CNTP_CVAL_EL0"
+#define busnum_BDK_AP_CNTP_CVAL_EL0 0
+#define arguments_BDK_AP_CNTP_CVAL_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntp_cval_el02
+ *
+ * AP Counter-timer Physical Timer Compare Value Register
+ * Alias of AP_CNTP_CVAL_EL0 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_cntp_cval_el02
+{
+ uint64_t u;
+ struct bdk_ap_cntp_cval_el02_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntp_cval_el02_s cn; */
+};
+typedef union bdk_ap_cntp_cval_el02 bdk_ap_cntp_cval_el02_t;
+
+#define BDK_AP_CNTP_CVAL_EL02 BDK_AP_CNTP_CVAL_EL02_FUNC()
+static inline uint64_t BDK_AP_CNTP_CVAL_EL02_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTP_CVAL_EL02_FUNC(void)
+{
+ return 0x3050e020200ll;
+}
+
+#define typedef_BDK_AP_CNTP_CVAL_EL02 bdk_ap_cntp_cval_el02_t
+#define bustype_BDK_AP_CNTP_CVAL_EL02 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTP_CVAL_EL02 "AP_CNTP_CVAL_EL02"
+#define busnum_BDK_AP_CNTP_CVAL_EL02 0
+#define arguments_BDK_AP_CNTP_CVAL_EL02 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntp_tval_el0
+ *
+ * AP Counter-timer Physical Timer Value Register
+ * Holds the timer value for the EL1 physical timer.
+ */
+union bdk_ap_cntp_tval_el0
+{
+ uint32_t u;
+ struct bdk_ap_cntp_tval_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) EL1 physical timer value. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) EL1 physical timer value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntp_tval_el0_s cn; */
+};
+typedef union bdk_ap_cntp_tval_el0 bdk_ap_cntp_tval_el0_t;
+
+#define BDK_AP_CNTP_TVAL_EL0 BDK_AP_CNTP_TVAL_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTP_TVAL_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTP_TVAL_EL0_FUNC(void)
+{
+ return 0x3030e020000ll;
+}
+
+#define typedef_BDK_AP_CNTP_TVAL_EL0 bdk_ap_cntp_tval_el0_t
+#define bustype_BDK_AP_CNTP_TVAL_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTP_TVAL_EL0 "AP_CNTP_TVAL_EL0"
+#define busnum_BDK_AP_CNTP_TVAL_EL0 0
+#define arguments_BDK_AP_CNTP_TVAL_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntp_tval_el02
+ *
+ * AP Counter-timer Physical Timer Value Register
+ * Alias of CNTP_TVAL_EL1 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_cntp_tval_el02
+{
+ uint32_t u;
+ struct bdk_ap_cntp_tval_el02_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntp_tval_el02_s cn; */
+};
+typedef union bdk_ap_cntp_tval_el02 bdk_ap_cntp_tval_el02_t;
+
+#define BDK_AP_CNTP_TVAL_EL02 BDK_AP_CNTP_TVAL_EL02_FUNC()
+static inline uint64_t BDK_AP_CNTP_TVAL_EL02_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTP_TVAL_EL02_FUNC(void)
+{
+ return 0x3050e020000ll;
+}
+
+#define typedef_BDK_AP_CNTP_TVAL_EL02 bdk_ap_cntp_tval_el02_t
+#define bustype_BDK_AP_CNTP_TVAL_EL02 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTP_TVAL_EL02 "AP_CNTP_TVAL_EL02"
+#define busnum_BDK_AP_CNTP_TVAL_EL02 0
+#define arguments_BDK_AP_CNTP_TVAL_EL02 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntpct_el0
+ *
+ * AP Counter-timer Physical Count Register
+ * Holds the 64-bit physical count value.
+ */
+union bdk_ap_cntpct_el0
+{
+ uint64_t u;
+ struct bdk_ap_cntpct_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Physical count value. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Physical count value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntpct_el0_s cn; */
+};
+typedef union bdk_ap_cntpct_el0 bdk_ap_cntpct_el0_t;
+
+#define BDK_AP_CNTPCT_EL0 BDK_AP_CNTPCT_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTPCT_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTPCT_EL0_FUNC(void)
+{
+ return 0x3030e000100ll;
+}
+
+#define typedef_BDK_AP_CNTPCT_EL0 bdk_ap_cntpct_el0_t
+#define bustype_BDK_AP_CNTPCT_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTPCT_EL0 "AP_CNTPCT_EL0"
+#define busnum_BDK_AP_CNTPCT_EL0 0
+#define arguments_BDK_AP_CNTPCT_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntps_ctl_el1
+ *
+ * AP Counter-timer Physical Secure Timer Control Register
+ * Control register for the secure physical timer, usually
+ * accessible at EL3 but configurably accessible at EL1 in Secure
+ * state.
+ */
+union bdk_ap_cntps_ctl_el1
+{
+ uint32_t u;
+ struct bdk_ap_cntps_ctl_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntps_ctl_el1_s cn; */
+};
+typedef union bdk_ap_cntps_ctl_el1 bdk_ap_cntps_ctl_el1_t;
+
+#define BDK_AP_CNTPS_CTL_EL1 BDK_AP_CNTPS_CTL_EL1_FUNC()
+static inline uint64_t BDK_AP_CNTPS_CTL_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTPS_CTL_EL1_FUNC(void)
+{
+ return 0x3070e020100ll;
+}
+
+#define typedef_BDK_AP_CNTPS_CTL_EL1 bdk_ap_cntps_ctl_el1_t
+#define bustype_BDK_AP_CNTPS_CTL_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTPS_CTL_EL1 "AP_CNTPS_CTL_EL1"
+#define busnum_BDK_AP_CNTPS_CTL_EL1 0
+#define arguments_BDK_AP_CNTPS_CTL_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntps_cval_el1
+ *
+ * AP Counter-timer Physical Secure Timer Compare Value Register
+ * Holds the compare value for the secure physical timer, usually
+ * accessible at EL3 but configurably accessible at EL1 in Secure
+ * state.
+ */
+union bdk_ap_cntps_cval_el1
+{
+ uint64_t u;
+ struct bdk_ap_cntps_cval_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Secure physical timer compare value. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Secure physical timer compare value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntps_cval_el1_s cn; */
+};
+typedef union bdk_ap_cntps_cval_el1 bdk_ap_cntps_cval_el1_t;
+
+#define BDK_AP_CNTPS_CVAL_EL1 BDK_AP_CNTPS_CVAL_EL1_FUNC()
+static inline uint64_t BDK_AP_CNTPS_CVAL_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTPS_CVAL_EL1_FUNC(void)
+{
+ return 0x3070e020200ll;
+}
+
+#define typedef_BDK_AP_CNTPS_CVAL_EL1 bdk_ap_cntps_cval_el1_t
+#define bustype_BDK_AP_CNTPS_CVAL_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTPS_CVAL_EL1 "AP_CNTPS_CVAL_EL1"
+#define busnum_BDK_AP_CNTPS_CVAL_EL1 0
+#define arguments_BDK_AP_CNTPS_CVAL_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntps_tval_el1
+ *
+ * AP Counter-timer Physical Secure Timer Value Register
+ * This register holds the timer value for the secure physical timer, usually
+ * accessible at EL3 but configurably accessible at EL1 in the secure state.
+ */
+union bdk_ap_cntps_tval_el1
+{
+ uint32_t u;
+ struct bdk_ap_cntps_tval_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Secure physical timer value. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Secure physical timer value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntps_tval_el1_s cn; */
+};
+typedef union bdk_ap_cntps_tval_el1 bdk_ap_cntps_tval_el1_t;
+
+#define BDK_AP_CNTPS_TVAL_EL1 BDK_AP_CNTPS_TVAL_EL1_FUNC()
+static inline uint64_t BDK_AP_CNTPS_TVAL_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTPS_TVAL_EL1_FUNC(void)
+{
+ return 0x3070e020000ll;
+}
+
+#define typedef_BDK_AP_CNTPS_TVAL_EL1 bdk_ap_cntps_tval_el1_t
+#define bustype_BDK_AP_CNTPS_TVAL_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTPS_TVAL_EL1 "AP_CNTPS_TVAL_EL1"
+#define busnum_BDK_AP_CNTPS_TVAL_EL1 0
+#define arguments_BDK_AP_CNTPS_TVAL_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntv_ctl_el0
+ *
+ * AP Counter-timer Virtual Timer Control Register
+ * Control register for the virtual timer.
+ */
+union bdk_ap_cntv_ctl_el0
+{
+ uint32_t u;
+ struct bdk_ap_cntv_ctl_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables the timer.
+ Disabling the timer masks the timer interrupt, but the timer
+ value continues to count down.
+ 0 = Timer disabled.
+ 1 = Timer enabled. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Timer interrupt mask bit.
+ 0 = Timer interrupt is not masked.
+ 1 = Timer interrupt is masked. */
+ uint32_t istatus : 1; /**< [ 2: 2](RO) The status of the timer interrupt. This bit is read-only.
+ A register write that sets IMASK to 1 latches this bit to
+ reflect the status of the interrupt immediately before that
+ write.
+ 0 = Interrupt not asserted.
+ 1 = Interrupt asserted. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntv_ctl_el0_s cn; */
+};
+typedef union bdk_ap_cntv_ctl_el0 bdk_ap_cntv_ctl_el0_t;
+
+#define BDK_AP_CNTV_CTL_EL0 BDK_AP_CNTV_CTL_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTV_CTL_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTV_CTL_EL0_FUNC(void)
+{
+ return 0x3030e030100ll;
+}
+
+#define typedef_BDK_AP_CNTV_CTL_EL0 bdk_ap_cntv_ctl_el0_t
+#define bustype_BDK_AP_CNTV_CTL_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTV_CTL_EL0 "AP_CNTV_CTL_EL0"
+#define busnum_BDK_AP_CNTV_CTL_EL0 0
+#define arguments_BDK_AP_CNTV_CTL_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntv_ctl_el02
+ *
+ * AP Counter-timer Virtual Timer Control Register
+ * Alias of AP_CNTV_CTL_EL0 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_cntv_ctl_el02
+{
+ uint32_t u;
+ struct bdk_ap_cntv_ctl_el02_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntv_ctl_el02_s cn; */
+};
+typedef union bdk_ap_cntv_ctl_el02 bdk_ap_cntv_ctl_el02_t;
+
+#define BDK_AP_CNTV_CTL_EL02 BDK_AP_CNTV_CTL_EL02_FUNC()
+static inline uint64_t BDK_AP_CNTV_CTL_EL02_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTV_CTL_EL02_FUNC(void)
+{
+ return 0x3050e030100ll;
+}
+
+#define typedef_BDK_AP_CNTV_CTL_EL02 bdk_ap_cntv_ctl_el02_t
+#define bustype_BDK_AP_CNTV_CTL_EL02 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTV_CTL_EL02 "AP_CNTV_CTL_EL02"
+#define busnum_BDK_AP_CNTV_CTL_EL02 0
+#define arguments_BDK_AP_CNTV_CTL_EL02 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntv_cval_el0
+ *
+ * AP Counter-timer Virtual Timer Compare Value Register
+ * Holds the compare value for the virtual timer.
+ */
+union bdk_ap_cntv_cval_el0
+{
+ uint64_t u;
+ struct bdk_ap_cntv_cval_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Virtual timer compare value. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Virtual timer compare value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntv_cval_el0_s cn; */
+};
+typedef union bdk_ap_cntv_cval_el0 bdk_ap_cntv_cval_el0_t;
+
+#define BDK_AP_CNTV_CVAL_EL0 BDK_AP_CNTV_CVAL_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTV_CVAL_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTV_CVAL_EL0_FUNC(void)
+{
+ return 0x3030e030200ll;
+}
+
+#define typedef_BDK_AP_CNTV_CVAL_EL0 bdk_ap_cntv_cval_el0_t
+#define bustype_BDK_AP_CNTV_CVAL_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTV_CVAL_EL0 "AP_CNTV_CVAL_EL0"
+#define busnum_BDK_AP_CNTV_CVAL_EL0 0
+#define arguments_BDK_AP_CNTV_CVAL_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntv_cval_el02
+ *
+ * AP Counter-timer Virtual Timer Compare Value Register
+ * Alias of AP_CNTV_CVAL_EL0 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_cntv_cval_el02
+{
+ uint64_t u;
+ struct bdk_ap_cntv_cval_el02_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntv_cval_el02_s cn; */
+};
+typedef union bdk_ap_cntv_cval_el02 bdk_ap_cntv_cval_el02_t;
+
+#define BDK_AP_CNTV_CVAL_EL02 BDK_AP_CNTV_CVAL_EL02_FUNC()
+static inline uint64_t BDK_AP_CNTV_CVAL_EL02_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTV_CVAL_EL02_FUNC(void)
+{
+ return 0x3050e030200ll;
+}
+
+#define typedef_BDK_AP_CNTV_CVAL_EL02 bdk_ap_cntv_cval_el02_t
+#define bustype_BDK_AP_CNTV_CVAL_EL02 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTV_CVAL_EL02 "AP_CNTV_CVAL_EL02"
+#define busnum_BDK_AP_CNTV_CVAL_EL02 0
+#define arguments_BDK_AP_CNTV_CVAL_EL02 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntv_tval_el0
+ *
+ * AP Counter-timer Virtual Timer Value Register
+ * Holds the timer value for the virtual timer.
+ */
+union bdk_ap_cntv_tval_el0
+{
+ uint32_t u;
+ struct bdk_ap_cntv_tval_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Virtual timer value. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Virtual timer value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntv_tval_el0_s cn; */
+};
+typedef union bdk_ap_cntv_tval_el0 bdk_ap_cntv_tval_el0_t;
+
+#define BDK_AP_CNTV_TVAL_EL0 BDK_AP_CNTV_TVAL_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTV_TVAL_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTV_TVAL_EL0_FUNC(void)
+{
+ return 0x3030e030000ll;
+}
+
+#define typedef_BDK_AP_CNTV_TVAL_EL0 bdk_ap_cntv_tval_el0_t
+#define bustype_BDK_AP_CNTV_TVAL_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTV_TVAL_EL0 "AP_CNTV_TVAL_EL0"
+#define busnum_BDK_AP_CNTV_TVAL_EL0 0
+#define arguments_BDK_AP_CNTV_TVAL_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntv_tval_el02
+ *
+ * AP Counter-timer Virtual Timer Value Register
+ * Alias of AP_CNTV_TVAL_EL0 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_cntv_tval_el02
+{
+ uint32_t u;
+ struct bdk_ap_cntv_tval_el02_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntv_tval_el02_s cn; */
+};
+typedef union bdk_ap_cntv_tval_el02 bdk_ap_cntv_tval_el02_t;
+
+#define BDK_AP_CNTV_TVAL_EL02 BDK_AP_CNTV_TVAL_EL02_FUNC()
+static inline uint64_t BDK_AP_CNTV_TVAL_EL02_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTV_TVAL_EL02_FUNC(void)
+{
+ return 0x3050e030000ll;
+}
+
+#define typedef_BDK_AP_CNTV_TVAL_EL02 bdk_ap_cntv_tval_el02_t
+#define bustype_BDK_AP_CNTV_TVAL_EL02 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTV_TVAL_EL02 "AP_CNTV_TVAL_EL02"
+#define busnum_BDK_AP_CNTV_TVAL_EL02 0
+#define arguments_BDK_AP_CNTV_TVAL_EL02 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntvct_el0
+ *
+ * AP Counter-timer Virtual Count Register
+ * Holds the 64-bit virtual count value.
+ */
+union bdk_ap_cntvct_el0
+{
+ uint64_t u;
+ struct bdk_ap_cntvct_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Virtual count value. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Virtual count value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntvct_el0_s cn; */
+};
+typedef union bdk_ap_cntvct_el0 bdk_ap_cntvct_el0_t;
+
+#define BDK_AP_CNTVCT_EL0 BDK_AP_CNTVCT_EL0_FUNC()
+static inline uint64_t BDK_AP_CNTVCT_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTVCT_EL0_FUNC(void)
+{
+ return 0x3030e000200ll;
+}
+
+#define typedef_BDK_AP_CNTVCT_EL0 bdk_ap_cntvct_el0_t
+#define bustype_BDK_AP_CNTVCT_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTVCT_EL0 "AP_CNTVCT_EL0"
+#define busnum_BDK_AP_CNTVCT_EL0 0
+#define arguments_BDK_AP_CNTVCT_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cntvoff_el2
+ *
+ * AP Counter-timer Virtual Offset Register
+ * Holds the 64-bit virtual offset.
+ */
+union bdk_ap_cntvoff_el2
+{
+ uint64_t u;
+ struct bdk_ap_cntvoff_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Virtual offset. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Virtual offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cntvoff_el2_s cn; */
+};
+typedef union bdk_ap_cntvoff_el2 bdk_ap_cntvoff_el2_t;
+
+#define BDK_AP_CNTVOFF_EL2 BDK_AP_CNTVOFF_EL2_FUNC()
+static inline uint64_t BDK_AP_CNTVOFF_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CNTVOFF_EL2_FUNC(void)
+{
+ return 0x3040e000300ll;
+}
+
+#define typedef_BDK_AP_CNTVOFF_EL2 bdk_ap_cntvoff_el2_t
+#define bustype_BDK_AP_CNTVOFF_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CNTVOFF_EL2 "AP_CNTVOFF_EL2"
+#define busnum_BDK_AP_CNTVOFF_EL2 0
+#define arguments_BDK_AP_CNTVOFF_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_contextidr_el1
+ *
+ * AP Context ID Register
+ * Identifies the current Process Identifier.
+ */
+union bdk_ap_contextidr_el1
+{
+ uint32_t u;
+ struct bdk_ap_contextidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t procid : 32; /**< [ 31: 0](R/W) Process Identifier. This field must be programmed with a
+ unique value that identifies the current process. The bottom 8
+ bits of this register are not used to hold the ASID. */
+#else /* Word 0 - Little Endian */
+ uint32_t procid : 32; /**< [ 31: 0](R/W) Process Identifier. This field must be programmed with a
+ unique value that identifies the current process. The bottom 8
+ bits of this register are not used to hold the ASID. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_contextidr_el1_s cn; */
+};
+typedef union bdk_ap_contextidr_el1 bdk_ap_contextidr_el1_t;
+
+#define BDK_AP_CONTEXTIDR_EL1 BDK_AP_CONTEXTIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_CONTEXTIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CONTEXTIDR_EL1_FUNC(void)
+{
+ return 0x3000d000100ll;
+}
+
+#define typedef_BDK_AP_CONTEXTIDR_EL1 bdk_ap_contextidr_el1_t
+#define bustype_BDK_AP_CONTEXTIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CONTEXTIDR_EL1 "AP_CONTEXTIDR_EL1"
+#define busnum_BDK_AP_CONTEXTIDR_EL1 0
+#define arguments_BDK_AP_CONTEXTIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_contextidr_el12
+ *
+ * AP Context ID Register
+ * Alias of AP_CONTEXTIDR_EL1 when accessed at EL2/2 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_contextidr_el12
+{
+ uint32_t u;
+ struct bdk_ap_contextidr_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_contextidr_el12_s cn; */
+};
+typedef union bdk_ap_contextidr_el12 bdk_ap_contextidr_el12_t;
+
+#define BDK_AP_CONTEXTIDR_EL12 BDK_AP_CONTEXTIDR_EL12_FUNC()
+static inline uint64_t BDK_AP_CONTEXTIDR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CONTEXTIDR_EL12_FUNC(void)
+{
+ return 0x3050d000100ll;
+}
+
+#define typedef_BDK_AP_CONTEXTIDR_EL12 bdk_ap_contextidr_el12_t
+#define bustype_BDK_AP_CONTEXTIDR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CONTEXTIDR_EL12 "AP_CONTEXTIDR_EL12"
+#define busnum_BDK_AP_CONTEXTIDR_EL12 0
+#define arguments_BDK_AP_CONTEXTIDR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_contextidr_el2
+ *
+ * AP Context ID EL2 Register
+ * v8.1: Identifies the current Process Identifier.
+ */
+union bdk_ap_contextidr_el2
+{
+ uint32_t u;
+ struct bdk_ap_contextidr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t procid : 32; /**< [ 31: 0](R/W) v8.1: Process Identifier. This field must be programmed with a
+ unique value that identifies the current process. The bottom 8
+ bits of this register are not used to hold the ASID. */
+#else /* Word 0 - Little Endian */
+ uint32_t procid : 32; /**< [ 31: 0](R/W) v8.1: Process Identifier. This field must be programmed with a
+ unique value that identifies the current process. The bottom 8
+ bits of this register are not used to hold the ASID. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_contextidr_el2_s cn; */
+};
+typedef union bdk_ap_contextidr_el2 bdk_ap_contextidr_el2_t;
+
+#define BDK_AP_CONTEXTIDR_EL2 BDK_AP_CONTEXTIDR_EL2_FUNC()
+static inline uint64_t BDK_AP_CONTEXTIDR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CONTEXTIDR_EL2_FUNC(void)
+{
+ return 0x3040d000100ll;
+}
+
+#define typedef_BDK_AP_CONTEXTIDR_EL2 bdk_ap_contextidr_el2_t
+#define bustype_BDK_AP_CONTEXTIDR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CONTEXTIDR_EL2 "AP_CONTEXTIDR_EL2"
+#define busnum_BDK_AP_CONTEXTIDR_EL2 0
+#define arguments_BDK_AP_CONTEXTIDR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cpacr_el1
+ *
+ * AP Architectural Feature Access Control Register
+ * Controls access to Trace, Floating-point, and Advanced SIMD
+ * functionality.
+ */
+union bdk_ap_cpacr_el1
+{
+ uint32_t u;
+ struct bdk_ap_cpacr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t fpen : 2; /**< [ 21: 20](R/W) Causes instructions that access the registers associated with
+ Floating Point and Advanced SIMD execution to trap to EL1 when
+ executed from EL0 or EL1.
+ 0x0 = Causes any instructions in EL0 or EL1 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped.
+ 0x1 = Causes any instructions in EL0 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped, but does not cause any instruction in EL1 to be
+ trapped.
+ 0x2 = Causes any instructions in EL0 or EL1 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped.
+ 0x3 = Does not cause any instruction to be trapped. */
+ uint32_t reserved_0_19 : 20;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_19 : 20;
+ uint32_t fpen : 2; /**< [ 21: 20](R/W) Causes instructions that access the registers associated with
+ Floating Point and Advanced SIMD execution to trap to EL1 when
+ executed from EL0 or EL1.
+ 0x0 = Causes any instructions in EL0 or EL1 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped.
+ 0x1 = Causes any instructions in EL0 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped, but does not cause any instruction in EL1 to be
+ trapped.
+ 0x2 = Causes any instructions in EL0 or EL1 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped.
+ 0x3 = Does not cause any instruction to be trapped. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cpacr_el1_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t reserved_28 : 1;
+ uint32_t reserved_22_27 : 6;
+ uint32_t fpen : 2; /**< [ 21: 20](R/W) Causes instructions that access the registers associated with
+ Floating Point and Advanced SIMD execution to trap to EL1 when
+ executed from EL0 or EL1.
+ 0x0 = Causes any instructions in EL0 or EL1 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped.
+ 0x1 = Causes any instructions in EL0 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped, but does not cause any instruction in EL1 to be
+ trapped.
+ 0x2 = Causes any instructions in EL0 or EL1 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped.
+ 0x3 = Does not cause any instruction to be trapped. */
+ uint32_t reserved_0_19 : 20;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_19 : 20;
+ uint32_t fpen : 2; /**< [ 21: 20](R/W) Causes instructions that access the registers associated with
+ Floating Point and Advanced SIMD execution to trap to EL1 when
+ executed from EL0 or EL1.
+ 0x0 = Causes any instructions in EL0 or EL1 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped.
+ 0x1 = Causes any instructions in EL0 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped, but does not cause any instruction in EL1 to be
+ trapped.
+ 0x2 = Causes any instructions in EL0 or EL1 that use the registers
+ associated with Floating Point and Advanced SIMD execution to
+ be trapped.
+ 0x3 = Does not cause any instruction to be trapped. */
+ uint32_t reserved_22_27 : 6;
+ uint32_t reserved_28 : 1;
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_cpacr_el1 bdk_ap_cpacr_el1_t;
+
+#define BDK_AP_CPACR_EL1 BDK_AP_CPACR_EL1_FUNC()
+static inline uint64_t BDK_AP_CPACR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CPACR_EL1_FUNC(void)
+{
+ return 0x30001000200ll;
+}
+
+#define typedef_BDK_AP_CPACR_EL1 bdk_ap_cpacr_el1_t
+#define bustype_BDK_AP_CPACR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CPACR_EL1 "AP_CPACR_EL1"
+#define busnum_BDK_AP_CPACR_EL1 0
+#define arguments_BDK_AP_CPACR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cpacr_el12
+ *
+ * AP Architectural Feature Access Control Register
+ * Alias of AP_CPACR_EL1 when accessed from EL2 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_cpacr_el12
+{
+ uint32_t u;
+ struct bdk_ap_cpacr_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cpacr_el12_s cn; */
+};
+typedef union bdk_ap_cpacr_el12 bdk_ap_cpacr_el12_t;
+
+#define BDK_AP_CPACR_EL12 BDK_AP_CPACR_EL12_FUNC()
+static inline uint64_t BDK_AP_CPACR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CPACR_EL12_FUNC(void)
+{
+ return 0x30501000200ll;
+}
+
+#define typedef_BDK_AP_CPACR_EL12 bdk_ap_cpacr_el12_t
+#define bustype_BDK_AP_CPACR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CPACR_EL12 "AP_CPACR_EL12"
+#define busnum_BDK_AP_CPACR_EL12 0
+#define arguments_BDK_AP_CPACR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cptr_el2
+ *
+ * AP Architectural Feature Trap EL2 Non-E2H Register
+ * Controls trapping to EL2 of access to CPACR, AP_CPACR_EL1, Trace
+ * functionality and registers associated with Floating Point and
+ * Advanced SIMD execution. Also controls EL2 access to this
+ * functionality.
+ *
+ * This register is at the same select as AP_CPTR_EL2_E2H and is used when E2H=0.
+ */
+union bdk_ap_cptr_el2
+{
+ uint32_t u;
+ struct bdk_ap_cptr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to CPACR or AP_CPACR_EL1 from EL1 to
+ trap to EL2.
+ 0 = Does not cause access to CPACR or AP_CPACR_EL1 to be trapped.
+ 1 = Causes access to CPACR or AP_CPACR_EL1 to be trapped. */
+ uint32_t reserved_14_30 : 17;
+ uint32_t rsvd_12_13 : 2; /**< [ 13: 12](RO) Reserved 1. */
+ uint32_t reserved_11 : 1;
+ uint32_t tfp : 1; /**< [ 10: 10](R/W) This causes instructions that access the registers associated
+ with Floating Point and Advanced SIMD execution to trap to EL2
+ when executed from EL0, EL1, or EL2, unless trapped to EL1.
+
+ 0 = Does not cause any instruction to be trapped.
+ 1 = Causes any instructions that use the registers associated with
+ Floating Point and Advanced SIMD execution to be trapped. */
+ uint32_t rsvd_0_9 : 10; /**< [ 9: 0](RO) Reserved 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t rsvd_0_9 : 10; /**< [ 9: 0](RO) Reserved 1. */
+ uint32_t tfp : 1; /**< [ 10: 10](R/W) This causes instructions that access the registers associated
+ with Floating Point and Advanced SIMD execution to trap to EL2
+ when executed from EL0, EL1, or EL2, unless trapped to EL1.
+
+ 0 = Does not cause any instruction to be trapped.
+ 1 = Causes any instructions that use the registers associated with
+ Floating Point and Advanced SIMD execution to be trapped. */
+ uint32_t reserved_11 : 1;
+ uint32_t rsvd_12_13 : 2; /**< [ 13: 12](RO) Reserved 1. */
+ uint32_t reserved_14_30 : 17;
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to CPACR or AP_CPACR_EL1 from EL1 to
+ trap to EL2.
+ 0 = Does not cause access to CPACR or AP_CPACR_EL1 to be trapped.
+ 1 = Causes access to CPACR or AP_CPACR_EL1 to be trapped. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cptr_el2_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to CPACR or AP_CPACR_EL1 from EL1 to
+ trap to EL2.
+ 0 = Does not cause access to CPACR or AP_CPACR_EL1 to be trapped.
+ 1 = Causes access to CPACR or AP_CPACR_EL1 to be trapped. */
+ uint32_t reserved_29_30 : 2;
+ uint32_t reserved_28 : 1;
+ uint32_t reserved_20_27 : 8;
+ uint32_t reserved_14_19 : 6;
+ uint32_t rsvd_12_13 : 2; /**< [ 13: 12](RO) Reserved 1. */
+ uint32_t reserved_11 : 1;
+ uint32_t tfp : 1; /**< [ 10: 10](R/W) This causes instructions that access the registers associated
+ with Floating Point and Advanced SIMD execution to trap to EL2
+ when executed from EL0, EL1, or EL2, unless trapped to EL1.
+
+ 0 = Does not cause any instruction to be trapped.
+ 1 = Causes any instructions that use the registers associated with
+ Floating Point and Advanced SIMD execution to be trapped. */
+ uint32_t rsvd_0_9 : 10; /**< [ 9: 0](RO) Reserved 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t rsvd_0_9 : 10; /**< [ 9: 0](RO) Reserved 1. */
+ uint32_t tfp : 1; /**< [ 10: 10](R/W) This causes instructions that access the registers associated
+ with Floating Point and Advanced SIMD execution to trap to EL2
+ when executed from EL0, EL1, or EL2, unless trapped to EL1.
+
+ 0 = Does not cause any instruction to be trapped.
+ 1 = Causes any instructions that use the registers associated with
+ Floating Point and Advanced SIMD execution to be trapped. */
+ uint32_t reserved_11 : 1;
+ uint32_t rsvd_12_13 : 2; /**< [ 13: 12](RO) Reserved 1. */
+ uint32_t reserved_14_19 : 6;
+ uint32_t reserved_20_27 : 8;
+ uint32_t reserved_28 : 1;
+ uint32_t reserved_29_30 : 2;
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to CPACR or AP_CPACR_EL1 from EL1 to
+ trap to EL2.
+ 0 = Does not cause access to CPACR or AP_CPACR_EL1 to be trapped.
+ 1 = Causes access to CPACR or AP_CPACR_EL1 to be trapped. */
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_cptr_el2 bdk_ap_cptr_el2_t;
+
+#define BDK_AP_CPTR_EL2 BDK_AP_CPTR_EL2_FUNC()
+static inline uint64_t BDK_AP_CPTR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CPTR_EL2_FUNC(void)
+{
+ return 0x30401010200ll;
+}
+
+#define typedef_BDK_AP_CPTR_EL2 bdk_ap_cptr_el2_t
+#define bustype_BDK_AP_CPTR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CPTR_EL2 "AP_CPTR_EL2"
+#define busnum_BDK_AP_CPTR_EL2 0
+#define arguments_BDK_AP_CPTR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cptr_el2_e2h
+ *
+ * AP Architectural Feature Trap EL2 E2H Register
+ * Controls trapping to EL2 of access to CPACR, AP_CPACR_EL1, Trace
+ * functionality and registers associated with Floating Point and
+ * Advanced SIMD execution. Also controls EL2 access to this
+ * functionality.
+ *
+ * This register is at the same select as AP_CPTR_EL2 and is used when E2H=1.
+ */
+union bdk_ap_cptr_el2_e2h
+{
+ uint32_t u;
+ struct bdk_ap_cptr_el2_e2h_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to CPACR or AP_CPACR_EL1 from EL1 to
+ trap to EL2. When AP_HCR_EL2[TGE] == 0:
+ 0 = Does not cause access to CPACR or AP_CPACR_EL1 to be trapped.
+ 1 = Causes access to CPACR or AP_CPACR_EL1 to be trapped.
+
+ When AP_HCR_EL2[TGE] == 1, this bit is ignored by hardware and
+ does not cause access to the AP_CPACR_EL1 to be trapped. */
+ uint32_t reserved_22_30 : 9;
+ uint32_t fpen : 2; /**< [ 21: 20](R/W) This causes instructions that access the registers associated with
+ Floating Point and Advanced SIMD execution to trap to EL2 when executed
+ from EL0 or EL2.
+ 0x0 = This field value causes any instructions that use the registers
+ associated with Floating Point and Advanced SIMD execution to be
+ trapped in the following cases:
+ * When AP_HCR_EL2[TGE] == 0, when the instruction is executed at
+ EL0, EL1 or EL2 unless the instruction is trapped to EL1 as
+ a result of the AP_CPACR_EL1[FPEN] bit.
+ * When AP_HCR_EL2[TGE] == 1, when the instruction is executed at
+ EL0 or EL2.
+
+ 0x1 = This field value causes any instructions executed at EL0 that use
+ the registerss associated with Floating Point or Advanced SIMD
+ execution to be trapped when AP_HCR_EL2[TGE]==1 only. It does not
+ cause any instruction executed at EL1 or EL2 to be trapped and
+ it does not cause any instruction to be trapped when AP_HCR_EL2[TGE]==0.
+
+ 0x2 = This field value causes any instructions that use the registers
+ associated with Floating Point and Advanced SIMD execution to be
+ trapped in the following cases:
+ * When AP_HCR_EL2[TGE] == 0, when the instruction is executed at
+ EL0, EL1 or EL2 unless the instruction is trapped to EL2 as
+ a result of the AP_CPACR_EL1[FPEN] bit.
+ * When AP_HCR_EL2[TGE] == 1, when the instruction is executed at
+ EL0 or EL2.
+
+ 0x3 = This field value does not cause any instruction to be trapped. */
+ uint32_t reserved_0_19 : 20;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_19 : 20;
+ uint32_t fpen : 2; /**< [ 21: 20](R/W) This causes instructions that access the registers associated with
+ Floating Point and Advanced SIMD execution to trap to EL2 when executed
+ from EL0 or EL2.
+ 0x0 = This field value causes any instructions that use the registers
+ associated with Floating Point and Advanced SIMD execution to be
+ trapped in the following cases:
+ * When AP_HCR_EL2[TGE] == 0, when the instruction is executed at
+ EL0, EL1 or EL2 unless the instruction is trapped to EL1 as
+ a result of the AP_CPACR_EL1[FPEN] bit.
+ * When AP_HCR_EL2[TGE] == 1, when the instruction is executed at
+ EL0 or EL2.
+
+ 0x1 = This field value causes any instructions executed at EL0 that use
+ the registerss associated with Floating Point or Advanced SIMD
+ execution to be trapped when AP_HCR_EL2[TGE]==1 only. It does not
+ cause any instruction executed at EL1 or EL2 to be trapped and
+ it does not cause any instruction to be trapped when AP_HCR_EL2[TGE]==0.
+
+ 0x2 = This field value causes any instructions that use the registers
+ associated with Floating Point and Advanced SIMD execution to be
+ trapped in the following cases:
+ * When AP_HCR_EL2[TGE] == 0, when the instruction is executed at
+ EL0, EL1 or EL2 unless the instruction is trapped to EL2 as
+ a result of the AP_CPACR_EL1[FPEN] bit.
+ * When AP_HCR_EL2[TGE] == 1, when the instruction is executed at
+ EL0 or EL2.
+
+ 0x3 = This field value does not cause any instruction to be trapped. */
+ uint32_t reserved_22_30 : 9;
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to CPACR or AP_CPACR_EL1 from EL1 to
+ trap to EL2. When AP_HCR_EL2[TGE] == 0:
+ 0 = Does not cause access to CPACR or AP_CPACR_EL1 to be trapped.
+ 1 = Causes access to CPACR or AP_CPACR_EL1 to be trapped.
+
+ When AP_HCR_EL2[TGE] == 1, this bit is ignored by hardware and
+ does not cause access to the AP_CPACR_EL1 to be trapped. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cptr_el2_e2h_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to CPACR or AP_CPACR_EL1 from EL1 to
+ trap to EL2. When AP_HCR_EL2[TGE] == 0:
+ 0 = Does not cause access to CPACR or AP_CPACR_EL1 to be trapped.
+ 1 = Causes access to CPACR or AP_CPACR_EL1 to be trapped.
+
+ When AP_HCR_EL2[TGE] == 1, this bit is ignored by hardware and
+ does not cause access to the AP_CPACR_EL1 to be trapped. */
+ uint32_t reserved_29_30 : 2;
+ uint32_t reserved_28 : 1;
+ uint32_t reserved_22_27 : 6;
+ uint32_t fpen : 2; /**< [ 21: 20](R/W) This causes instructions that access the registers associated with
+ Floating Point and Advanced SIMD execution to trap to EL2 when executed
+ from EL0 or EL2.
+ 0x0 = This field value causes any instructions that use the registers
+ associated with Floating Point and Advanced SIMD execution to be
+ trapped in the following cases:
+ * When AP_HCR_EL2[TGE] == 0, when the instruction is executed at
+ EL0, EL1 or EL2 unless the instruction is trapped to EL1 as
+ a result of the AP_CPACR_EL1[FPEN] bit.
+ * When AP_HCR_EL2[TGE] == 1, when the instruction is executed at
+ EL0 or EL2.
+
+ 0x1 = This field value causes any instructions executed at EL0 that use
+ the registerss associated with Floating Point or Advanced SIMD
+ execution to be trapped when AP_HCR_EL2[TGE]==1 only. It does not
+ cause any instruction executed at EL1 or EL2 to be trapped and
+ it does not cause any instruction to be trapped when AP_HCR_EL2[TGE]==0.
+
+ 0x2 = This field value causes any instructions that use the registers
+ associated with Floating Point and Advanced SIMD execution to be
+ trapped in the following cases:
+ * When AP_HCR_EL2[TGE] == 0, when the instruction is executed at
+ EL0, EL1 or EL2 unless the instruction is trapped to EL2 as
+ a result of the AP_CPACR_EL1[FPEN] bit.
+ * When AP_HCR_EL2[TGE] == 1, when the instruction is executed at
+ EL0 or EL2.
+
+ 0x3 = This field value does not cause any instruction to be trapped. */
+ uint32_t reserved_0_19 : 20;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_19 : 20;
+ uint32_t fpen : 2; /**< [ 21: 20](R/W) This causes instructions that access the registers associated with
+ Floating Point and Advanced SIMD execution to trap to EL2 when executed
+ from EL0 or EL2.
+ 0x0 = This field value causes any instructions that use the registers
+ associated with Floating Point and Advanced SIMD execution to be
+ trapped in the following cases:
+ * When AP_HCR_EL2[TGE] == 0, when the instruction is executed at
+ EL0, EL1 or EL2 unless the instruction is trapped to EL1 as
+ a result of the AP_CPACR_EL1[FPEN] bit.
+ * When AP_HCR_EL2[TGE] == 1, when the instruction is executed at
+ EL0 or EL2.
+
+ 0x1 = This field value causes any instructions executed at EL0 that use
+ the registerss associated with Floating Point or Advanced SIMD
+ execution to be trapped when AP_HCR_EL2[TGE]==1 only. It does not
+ cause any instruction executed at EL1 or EL2 to be trapped and
+ it does not cause any instruction to be trapped when AP_HCR_EL2[TGE]==0.
+
+ 0x2 = This field value causes any instructions that use the registers
+ associated with Floating Point and Advanced SIMD execution to be
+ trapped in the following cases:
+ * When AP_HCR_EL2[TGE] == 0, when the instruction is executed at
+ EL0, EL1 or EL2 unless the instruction is trapped to EL2 as
+ a result of the AP_CPACR_EL1[FPEN] bit.
+ * When AP_HCR_EL2[TGE] == 1, when the instruction is executed at
+ EL0 or EL2.
+
+ 0x3 = This field value does not cause any instruction to be trapped. */
+ uint32_t reserved_22_27 : 6;
+ uint32_t reserved_28 : 1;
+ uint32_t reserved_29_30 : 2;
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to CPACR or AP_CPACR_EL1 from EL1 to
+ trap to EL2. When AP_HCR_EL2[TGE] == 0:
+ 0 = Does not cause access to CPACR or AP_CPACR_EL1 to be trapped.
+ 1 = Causes access to CPACR or AP_CPACR_EL1 to be trapped.
+
+ When AP_HCR_EL2[TGE] == 1, this bit is ignored by hardware and
+ does not cause access to the AP_CPACR_EL1 to be trapped. */
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_cptr_el2_e2h bdk_ap_cptr_el2_e2h_t;
+
+#define BDK_AP_CPTR_EL2_E2H BDK_AP_CPTR_EL2_E2H_FUNC()
+static inline uint64_t BDK_AP_CPTR_EL2_E2H_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CPTR_EL2_E2H_FUNC(void)
+{
+ return 0x30401010210ll;
+}
+
+#define typedef_BDK_AP_CPTR_EL2_E2H bdk_ap_cptr_el2_e2h_t
+#define bustype_BDK_AP_CPTR_EL2_E2H BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CPTR_EL2_E2H "AP_CPTR_EL2_E2H"
+#define busnum_BDK_AP_CPTR_EL2_E2H 0
+#define arguments_BDK_AP_CPTR_EL2_E2H -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cptr_el3
+ *
+ * AP Architectural Feature Trap EL3 Register
+ * Controls trapping to EL3 of access to AP_CPACR_EL1, Trace
+ * functionality and registers associated with Floating Point and
+ * Advanced SIMD execution. Also controls EL3 access to this
+ * functionality.
+ */
+union bdk_ap_cptr_el3
+{
+ uint32_t u;
+ struct bdk_ap_cptr_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to the AP_CPACR_EL1 from EL1 or the
+ AP_CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
+
+ 0 = Does not cause access to the AP_CPACR_EL1 or AP_CPTR_EL2 to be
+ trapped.
+ 1 = Causes access to the AP_CPACR_EL1 or AP_CPTR_EL2 to be trapped. */
+ uint32_t reserved_11_30 : 20;
+ uint32_t tfp : 1; /**< [ 10: 10](R/W) This causes instructions that access the registers associated
+ with Floating Point and Advanced SIMD execution to trap to EL3
+ when executed from any Exception level, unless trapped to EL1
+ or EL2.
+ 0 = Does not cause any instruction to be trapped.
+ 1 = Causes any instructions that use the registers associated with
+ Floating Point and Advanced SIMD execution to be trapped. */
+ uint32_t reserved_0_9 : 10;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_9 : 10;
+ uint32_t tfp : 1; /**< [ 10: 10](R/W) This causes instructions that access the registers associated
+ with Floating Point and Advanced SIMD execution to trap to EL3
+ when executed from any Exception level, unless trapped to EL1
+ or EL2.
+ 0 = Does not cause any instruction to be trapped.
+ 1 = Causes any instructions that use the registers associated with
+ Floating Point and Advanced SIMD execution to be trapped. */
+ uint32_t reserved_11_30 : 20;
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to the AP_CPACR_EL1 from EL1 or the
+ AP_CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
+
+ 0 = Does not cause access to the AP_CPACR_EL1 or AP_CPTR_EL2 to be
+ trapped.
+ 1 = Causes access to the AP_CPACR_EL1 or AP_CPTR_EL2 to be trapped. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cptr_el3_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to the AP_CPACR_EL1 from EL1 or the
+ AP_CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
+
+ 0 = Does not cause access to the AP_CPACR_EL1 or AP_CPTR_EL2 to be
+ trapped.
+ 1 = Causes access to the AP_CPACR_EL1 or AP_CPTR_EL2 to be trapped. */
+ uint32_t reserved_21_30 : 10;
+ uint32_t reserved_20 : 1;
+ uint32_t reserved_11_19 : 9;
+ uint32_t tfp : 1; /**< [ 10: 10](R/W) This causes instructions that access the registers associated
+ with Floating Point and Advanced SIMD execution to trap to EL3
+ when executed from any Exception level, unless trapped to EL1
+ or EL2.
+ 0 = Does not cause any instruction to be trapped.
+ 1 = Causes any instructions that use the registers associated with
+ Floating Point and Advanced SIMD execution to be trapped. */
+ uint32_t reserved_0_9 : 10;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_9 : 10;
+ uint32_t tfp : 1; /**< [ 10: 10](R/W) This causes instructions that access the registers associated
+ with Floating Point and Advanced SIMD execution to trap to EL3
+ when executed from any Exception level, unless trapped to EL1
+ or EL2.
+ 0 = Does not cause any instruction to be trapped.
+ 1 = Causes any instructions that use the registers associated with
+ Floating Point and Advanced SIMD execution to be trapped. */
+ uint32_t reserved_11_19 : 9;
+ uint32_t reserved_20 : 1;
+ uint32_t reserved_21_30 : 10;
+ uint32_t tcpac : 1; /**< [ 31: 31](R/W) This causes a direct access to the AP_CPACR_EL1 from EL1 or the
+ AP_CPTR_EL2 from EL2 to trap to EL3 unless it is trapped at EL2.
+
+ 0 = Does not cause access to the AP_CPACR_EL1 or AP_CPTR_EL2 to be
+ trapped.
+ 1 = Causes access to the AP_CPACR_EL1 or AP_CPTR_EL2 to be trapped. */
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_cptr_el3 bdk_ap_cptr_el3_t;
+
+#define BDK_AP_CPTR_EL3 BDK_AP_CPTR_EL3_FUNC()
+static inline uint64_t BDK_AP_CPTR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CPTR_EL3_FUNC(void)
+{
+ return 0x30601010200ll;
+}
+
+#define typedef_BDK_AP_CPTR_EL3 bdk_ap_cptr_el3_t
+#define bustype_BDK_AP_CPTR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CPTR_EL3 "AP_CPTR_EL3"
+#define busnum_BDK_AP_CPTR_EL3 0
+#define arguments_BDK_AP_CPTR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_csselr_el1
+ *
+ * AP Cache Size Selection Register
+ * Selects the current Cache Size ID Register, AP_CCSIDR_EL1, by
+ * specifying the required cache level and the cache type (either
+ * instruction or data cache).
+ */
+union bdk_ap_csselr_el1
+{
+ uint32_t u;
+ struct bdk_ap_csselr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t level : 3; /**< [ 3: 1](R/W) Cache level of required cache. */
+ uint32_t ind : 1; /**< [ 0: 0](R/W) Instruction not Data bit.
+ 0 = Data or unified cache.
+ 1 = Instruction cache. */
+#else /* Word 0 - Little Endian */
+ uint32_t ind : 1; /**< [ 0: 0](R/W) Instruction not Data bit.
+ 0 = Data or unified cache.
+ 1 = Instruction cache. */
+ uint32_t level : 3; /**< [ 3: 1](R/W) Cache level of required cache. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_csselr_el1_s cn; */
+};
+typedef union bdk_ap_csselr_el1 bdk_ap_csselr_el1_t;
+
+#define BDK_AP_CSSELR_EL1 BDK_AP_CSSELR_EL1_FUNC()
+static inline uint64_t BDK_AP_CSSELR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CSSELR_EL1_FUNC(void)
+{
+ return 0x30200000000ll;
+}
+
+#define typedef_BDK_AP_CSSELR_EL1 bdk_ap_csselr_el1_t
+#define bustype_BDK_AP_CSSELR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CSSELR_EL1 "AP_CSSELR_EL1"
+#define busnum_BDK_AP_CSSELR_EL1 0
+#define arguments_BDK_AP_CSSELR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ctr_el0
+ *
+ * AP Cache Type Register
+ * This register provides information about the architecture of the caches.
+ */
+union bdk_ap_ctr_el0
+{
+ uint32_t u;
+ struct bdk_ap_ctr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint32_t reserved_28_30 : 3;
+ uint32_t cwg : 4; /**< [ 27: 24](RO) Cache writeback granule. Log2 of the number of
+ words of the maximum size of memory that can be overwritten as
+ a result of the eviction of a cache entry that has had a
+ memory location in it modified.
+
+ The architectural maximum of 512 words (2KB) must be assumed.
+
+ The cache writeback granule can be determined from maximum
+ cache line size encoded in the Cache Size ID Registers. */
+ uint32_t erg : 4; /**< [ 23: 20](RO) Exclusives reservation granule. Log2 of the number
+ of words of the maximum size of the reservation granule that
+ has been implemented for the Load-Exclusive and
+ Store-Exclusive instructions. */
+ uint32_t dminline : 4; /**< [ 19: 16](RO) Log2 of the number of words in the smallest cache
+ line of all the data caches and unified caches that are
+ controlled by the PE.
+
+ For CNXXXX, 128 bytes. */
+ uint32_t l1ip : 2; /**< [ 15: 14](RO) Level 1 instruction cache policy. Indicates the indexing and
+ tagging policy for the L1 instruction cache.
+
+ 0x1 = ASID-tagged virtual index, virtual tag (AIVIVT).
+ 0x2 = Virtual index, physical tag (VIPT).
+ 0x3 = Physical index, physical tag (PIPT). */
+ uint32_t reserved_4_13 : 10;
+ uint32_t iminline : 4; /**< [ 3: 0](RO) Log2 of the number of words in the smallest cache line of all the instruction
+ caches that are controlled by the PE.
+
+ For CNXXXX, 128 bytes. */
+#else /* Word 0 - Little Endian */
+ uint32_t iminline : 4; /**< [ 3: 0](RO) Log2 of the number of words in the smallest cache line of all the instruction
+ caches that are controlled by the PE.
+
+ For CNXXXX, 128 bytes. */
+ uint32_t reserved_4_13 : 10;
+ uint32_t l1ip : 2; /**< [ 15: 14](RO) Level 1 instruction cache policy. Indicates the indexing and
+ tagging policy for the L1 instruction cache.
+
+ 0x1 = ASID-tagged virtual index, virtual tag (AIVIVT).
+ 0x2 = Virtual index, physical tag (VIPT).
+ 0x3 = Physical index, physical tag (PIPT). */
+ uint32_t dminline : 4; /**< [ 19: 16](RO) Log2 of the number of words in the smallest cache
+ line of all the data caches and unified caches that are
+ controlled by the PE.
+
+ For CNXXXX, 128 bytes. */
+ uint32_t erg : 4; /**< [ 23: 20](RO) Exclusives reservation granule. Log2 of the number
+ of words of the maximum size of the reservation granule that
+ has been implemented for the Load-Exclusive and
+ Store-Exclusive instructions. */
+ uint32_t cwg : 4; /**< [ 27: 24](RO) Cache writeback granule. Log2 of the number of
+ words of the maximum size of memory that can be overwritten as
+ a result of the eviction of a cache entry that has had a
+ memory location in it modified.
+
+ The architectural maximum of 512 words (2KB) must be assumed.
+
+ The cache writeback granule can be determined from maximum
+ cache line size encoded in the Cache Size ID Registers. */
+ uint32_t reserved_28_30 : 3;
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ctr_el0_s cn; */
+};
+typedef union bdk_ap_ctr_el0 bdk_ap_ctr_el0_t;
+
+#define BDK_AP_CTR_EL0 BDK_AP_CTR_EL0_FUNC()
+static inline uint64_t BDK_AP_CTR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CTR_EL0_FUNC(void)
+{
+ return 0x30300000100ll;
+}
+
+#define typedef_BDK_AP_CTR_EL0 bdk_ap_ctr_el0_t
+#define bustype_BDK_AP_CTR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CTR_EL0 "AP_CTR_EL0"
+#define busnum_BDK_AP_CTR_EL0 0
+#define arguments_BDK_AP_CTR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_currentel
+ *
+ * AP Current Exception Level Register
+ * Holds the current Exception level.
+ */
+union bdk_ap_currentel
+{
+ uint32_t u;
+ struct bdk_ap_currentel_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t el : 2; /**< [ 3: 2](RO) Current Exception level.
+ 0x0 = EL0.
+ 0x1 = EL1.
+ 0x2 = EL2.
+ 0x3 = EL3. */
+ uint32_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_1 : 2;
+ uint32_t el : 2; /**< [ 3: 2](RO) Current Exception level.
+ 0x0 = EL0.
+ 0x1 = EL1.
+ 0x2 = EL2.
+ 0x3 = EL3. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_currentel_s cn; */
+};
+typedef union bdk_ap_currentel bdk_ap_currentel_t;
+
+#define BDK_AP_CURRENTEL BDK_AP_CURRENTEL_FUNC()
+static inline uint64_t BDK_AP_CURRENTEL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CURRENTEL_FUNC(void)
+{
+ return 0x30004020200ll;
+}
+
+#define typedef_BDK_AP_CURRENTEL bdk_ap_currentel_t
+#define bustype_BDK_AP_CURRENTEL BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CURRENTEL "AP_CURRENTEL"
+#define busnum_BDK_AP_CURRENTEL 0
+#define arguments_BDK_AP_CURRENTEL -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_access_el1
+ *
+ * AP Cavium Access EL1 Register
+ * This register controls trapping on register accesses.
+ *
+ * Read/write access at EL1, EL2 and EL3.
+ *
+ * Note that AP_HCR_EL2[TIDCP] can also prevent all CVM* access at EL1
+ * and below and takes priority over AP_CVM_ACCESS_EL1.
+ *
+ * Note that AP_CVM_ACCESS_EL1 can grant access to EL0 for AP_CVM_*_EL1
+ * registers. This is non standard. A some point AP_CVM_ACCESS_EL1 may be
+ * depreciated.
+ *
+ * A 1 in the appropriate bit in the AP_CVM_ACCESS_ELn register prevents
+ * any access at lower exception levels.
+ *
+ * Internal:
+ * If access is denied at multiple exception levels then the
+ * trap occurs at the lowest. This is similar to ARM's
+ * AP_CPACR_EL1/AP_CPTR_EL2/AP_CPTR_EL3.
+ */
+union bdk_ap_cvm_access_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_access_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 1 traps
+ to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_Icache* or AP_CVM_Dcache* with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then
+ EL2. */
+#else /* Word 0 - Little Endian */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_Icache* or AP_CVM_Dcache* with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 1 traps
+ to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cvm_access_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 1 traps
+ to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_Icache* or AP_CVM_Dcache* with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then
+ EL2. */
+#else /* Word 0 - Little Endian */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_Icache* or AP_CVM_Dcache* with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 1 traps
+ to EL1 unless AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && !AP_SCR_EL3[NS], then EL2. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_cvm_access_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 1 traps
+ to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_Icache* or AP_CVM_Dcache* with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then
+ EL2. */
+#else /* Word 0 - Little Endian */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_Icache* or AP_CVM_Dcache* with the exception
+ level lower than 1 traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then
+ EL2. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 1 traps
+ to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 1
+ traps to EL1 unless AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 1 traps to EL1 unless
+ AP_HCR_EL2[TGE] && AP_SCR_EL3[NS], then EL2. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_cvm_access_el1 bdk_ap_cvm_access_el1_t;
+
+#define BDK_AP_CVM_ACCESS_EL1 BDK_AP_CVM_ACCESS_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_ACCESS_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_ACCESS_EL1_FUNC(void)
+{
+ return 0x3000b000300ll;
+}
+
+#define typedef_BDK_AP_CVM_ACCESS_EL1 bdk_ap_cvm_access_el1_t
+#define bustype_BDK_AP_CVM_ACCESS_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_ACCESS_EL1 "AP_CVM_ACCESS_EL1"
+#define busnum_BDK_AP_CVM_ACCESS_EL1 0
+#define arguments_BDK_AP_CVM_ACCESS_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_access_el2
+ *
+ * AP Cavium Access EL2 Register
+ * This register controls trap/access of Cavium registers. Read/write access at EL2 and EL3.
+ */
+union bdk_ap_cvm_access_el2
+{
+ uint64_t u;
+ struct bdk_ap_cvm_access_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 2 traps to EL2 unless
+ prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 2 traps to EL2 unless prohibited by
+ AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 2 traps
+ to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception
+ level lower than 2 traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or
+ AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 2 traps to
+ EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 2 traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or
+ AP_SCR_EL3[NS] = 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 2 traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or
+ AP_SCR_EL3[NS] = 0. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 2 traps to
+ EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception
+ level lower than 2 traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or
+ AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 2 traps
+ to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 2 traps to EL2 unless prohibited by
+ AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 2 traps to EL2 unless
+ prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cvm_access_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 2 traps to EL2 unless
+ prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 2 traps
+ to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception
+ level lower than 2 traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or
+ AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 2 traps to
+ EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 2 traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or
+ AP_SCR_EL3[NS] = 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 2 traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or
+ AP_SCR_EL3[NS] = 0. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 2 traps to
+ EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 2
+ traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception
+ level lower than 2 traps to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or
+ AP_SCR_EL3[NS] = 0. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read operation to AP_CVM_EVATTID with the exception level lower than 2 traps
+ to EL2 unless prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 2 traps to EL2 unless
+ prohibited by AP_CVM_ACCESS_EL1 or AP_SCR_EL3[NS] = 0. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_cvm_access_el2_s cn9; */
+};
+typedef union bdk_ap_cvm_access_el2 bdk_ap_cvm_access_el2_t;
+
+#define BDK_AP_CVM_ACCESS_EL2 BDK_AP_CVM_ACCESS_EL2_FUNC()
+static inline uint64_t BDK_AP_CVM_ACCESS_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_ACCESS_EL2_FUNC(void)
+{
+ return 0x3040b000300ll;
+}
+
+#define typedef_BDK_AP_CVM_ACCESS_EL2 bdk_ap_cvm_access_el2_t
+#define bustype_BDK_AP_CVM_ACCESS_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_ACCESS_EL2 "AP_CVM_ACCESS_EL2"
+#define busnum_BDK_AP_CVM_ACCESS_EL2 0
+#define arguments_BDK_AP_CVM_ACCESS_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_access_el3
+ *
+ * AP Cavium Access EL3 Register
+ * Internal:
+ * Software should expose the CvmCACHE instruction to EL2 or
+ * EL1 with extreme caution. Exposing this instruction to lower
+ * exception levels may cause nonsecure state to mess with secure
+ * state, which would cause a security hole.
+ */
+union bdk_ap_cvm_access_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_access_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 3 traps to EL3. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 3 traps to EL3 unless prohibited by
+ AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read or write operation to AP_CVM_EVATTID with the exception level lower than 3 traps to
+ EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception level
+ lower
+ than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 3 traps to
+ EL3
+ unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 3 traps to
+ EL3
+ unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 3 traps to
+ EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2,
+ AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2,
+ AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 3 traps to
+ EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 3 traps to
+ EL3
+ unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 3 traps to
+ EL3
+ unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception level
+ lower
+ than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read or write operation to AP_CVM_EVATTID with the exception level lower than 3 traps to
+ EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 3 traps to EL3 unless prohibited by
+ AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 3 traps to EL3. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cvm_access_el3_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 3 traps to EL3. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read or write operation to AP_CVM_EVATTID with the exception level lower than 3 traps to
+ EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception level
+ lower
+ than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 3 traps to
+ EL3
+ unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 3 traps to
+ EL3
+ unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 3 traps to
+ EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2,
+ AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2,
+ AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 3 traps to
+ EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 3 traps to
+ EL3
+ unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 3 traps to
+ EL3
+ unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception level
+ lower
+ than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read or write operation to AP_CVM_EVATTID with the exception level lower than 3 traps to
+ EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 3 traps to EL3. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_cvm_access_el3_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 3 traps to EL3. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 3 traps to EL3 unless prohibited by
+ AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read or write operation to AP_CVM_EVATTID with the exception level lower than
+ 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception
+ level lower than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2,
+ AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2,
+ AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cvm_ctl : 1; /**< [ 0: 0](R/W) A read or write operation to AP_CVM_CTL or AP_CVM_MEMCTL with the exception
+ level lower than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2,
+ AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t powthrottle : 1; /**< [ 1: 1](R/W) A read or write operation to PowThrottle with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_bist : 1; /**< [ 2: 2](R/W) A read or write operation to AP_CVM_BIST* with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_err : 1; /**< [ 3: 3](R/W) A read or write operation to AP_CVM_ERR* with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_i_d_cache : 1; /**< [ 4: 4](R/W) A read or write operation to AP_CVM_ICACHE* or AP_CVM_DCACHE* with the exception
+ level lower than 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2,
+ AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_evattid : 1; /**< [ 5: 5](R/W) A read or write operation to AP_CVM_EVATTID with the exception level lower than
+ 3 traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_statprof : 1; /**< [ 6: 6](R/W) A read or write operation to AP_CVM_STATPROFCTL_EL1 or AP_CVM_STATPROFCMP_EL1
+ with the exception level lower than 3 traps to EL3 unless prohibited by
+ AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_pn : 1; /**< [ 7: 7](R/W) A read or write operation to AP_CVM_PN_EL1 with the exception level lower than 3
+ traps to EL3 unless prohibited by AP_CVM_ACCESS_EL2, AP_CVM_ACCESS_EL1, or
+ AP_HCR_EL2[TIDCP]. */
+ uint64_t cvm_cache : 1; /**< [ 8: 8](R/W) A Cvm_Cache instruction with the exception level lower than 3 traps to EL3. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_cvm_access_el3 bdk_ap_cvm_access_el3_t;
+
+#define BDK_AP_CVM_ACCESS_EL3 BDK_AP_CVM_ACCESS_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_ACCESS_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_ACCESS_EL3_FUNC(void)
+{
+ return 0x3060b000300ll;
+}
+
+#define typedef_BDK_AP_CVM_ACCESS_EL3 bdk_ap_cvm_access_el3_t
+#define bustype_BDK_AP_CVM_ACCESS_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_ACCESS_EL3 "AP_CVM_ACCESS_EL3"
+#define busnum_BDK_AP_CVM_ACCESS_EL3 0
+#define arguments_BDK_AP_CVM_ACCESS_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_bist0_el1
+ *
+ * AP Cavium BIST0 Register
+ * This register indicates BIST status, where a 1 in a bit position indicates defective.
+ */
+union bdk_ap_cvm_bist0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_bist0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t fuse_controller_reports_hard_repair : 1;/**< [ 34: 34](RO) Fuse controller reports hard repair. */
+ uint64_t clear_bist_was_run : 1; /**< [ 33: 33](RO) Clear BIST was run. */
+ uint64_t manufacturing_mode : 1; /**< [ 32: 32](RO) Manufacturing mode. */
+ uint64_t icache_data_banks_with_unrepairable_defects : 8;/**< [ 31: 24](RO) Icache data banks with unrepairable defects. */
+ uint64_t icache_data_banks_with_defects : 8;/**< [ 23: 16](RO) Icache data banks with defects. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t aes_roms_defective : 1; /**< [ 7: 7](RO) AES ROMs defective. */
+ uint64_t fpu_roms_defective : 1; /**< [ 6: 6](RO) FPU ROMs defective. */
+ uint64_t fpu_rf_defective : 1; /**< [ 5: 5](RO) FPU/SIMD RF defective. */
+ uint64_t integer_rf_defective : 1; /**< [ 4: 4](RO) Integer RF defective. */
+ uint64_t jrt_defective : 1; /**< [ 3: 3](RO) JRT defective. */
+ uint64_t bht_defective : 1; /**< [ 2: 2](RO) BHT defective. */
+ uint64_t icache_tag_defective : 1; /**< [ 1: 1](RO) Icache tag defective. */
+ uint64_t icache_data_defective : 1; /**< [ 0: 0](RO) Icache data defective/unrepairable. */
+#else /* Word 0 - Little Endian */
+ uint64_t icache_data_defective : 1; /**< [ 0: 0](RO) Icache data defective/unrepairable. */
+ uint64_t icache_tag_defective : 1; /**< [ 1: 1](RO) Icache tag defective. */
+ uint64_t bht_defective : 1; /**< [ 2: 2](RO) BHT defective. */
+ uint64_t jrt_defective : 1; /**< [ 3: 3](RO) JRT defective. */
+ uint64_t integer_rf_defective : 1; /**< [ 4: 4](RO) Integer RF defective. */
+ uint64_t fpu_rf_defective : 1; /**< [ 5: 5](RO) FPU/SIMD RF defective. */
+ uint64_t fpu_roms_defective : 1; /**< [ 6: 6](RO) FPU ROMs defective. */
+ uint64_t aes_roms_defective : 1; /**< [ 7: 7](RO) AES ROMs defective. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t icache_data_banks_with_defects : 8;/**< [ 23: 16](RO) Icache data banks with defects. */
+ uint64_t icache_data_banks_with_unrepairable_defects : 8;/**< [ 31: 24](RO) Icache data banks with unrepairable defects. */
+ uint64_t manufacturing_mode : 1; /**< [ 32: 32](RO) Manufacturing mode. */
+ uint64_t clear_bist_was_run : 1; /**< [ 33: 33](RO) Clear BIST was run. */
+ uint64_t fuse_controller_reports_hard_repair : 1;/**< [ 34: 34](RO) Fuse controller reports hard repair. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_bist0_el1_s cn8; */
+ struct bdk_ap_cvm_bist0_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t fuse_controller_reports_hard_repair : 1;/**< [ 34: 34](RO) Fuse controller reports hard repair. */
+ uint64_t reserved_33 : 1;
+ uint64_t manufacturing_mode : 1; /**< [ 32: 32](RO) Manufacturing mode. */
+ uint64_t icache_data_banks_with_unrepairable_defects : 8;/**< [ 31: 24](RO) Icache data banks with unrepairable defects. */
+ uint64_t icache_data_banks_with_defects : 8;/**< [ 23: 16](RO) Icache data banks with defects. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t aes_roms_defective : 1; /**< [ 7: 7](RO) AES ROMs defective. */
+ uint64_t fpu_roms_defective : 1; /**< [ 6: 6](RO) FPU ROMs defective. */
+ uint64_t fpu_rf_defective : 1; /**< [ 5: 5](RO) FPU/SIMD RF defective. */
+ uint64_t integer_rf_defective : 1; /**< [ 4: 4](RO) Integer RF defective. */
+ uint64_t jrt_defective : 1; /**< [ 3: 3](RO) JRT defective. */
+ uint64_t bht_defective : 1; /**< [ 2: 2](RO) BHT defective. */
+ uint64_t icache_tag_defective : 1; /**< [ 1: 1](RO) Icache tag defective. */
+ uint64_t icache_data_defective : 1; /**< [ 0: 0](RO) Icache data defective/unrepairable. */
+#else /* Word 0 - Little Endian */
+ uint64_t icache_data_defective : 1; /**< [ 0: 0](RO) Icache data defective/unrepairable. */
+ uint64_t icache_tag_defective : 1; /**< [ 1: 1](RO) Icache tag defective. */
+ uint64_t bht_defective : 1; /**< [ 2: 2](RO) BHT defective. */
+ uint64_t jrt_defective : 1; /**< [ 3: 3](RO) JRT defective. */
+ uint64_t integer_rf_defective : 1; /**< [ 4: 4](RO) Integer RF defective. */
+ uint64_t fpu_rf_defective : 1; /**< [ 5: 5](RO) FPU/SIMD RF defective. */
+ uint64_t fpu_roms_defective : 1; /**< [ 6: 6](RO) FPU ROMs defective. */
+ uint64_t aes_roms_defective : 1; /**< [ 7: 7](RO) AES ROMs defective. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t icache_data_banks_with_defects : 8;/**< [ 23: 16](RO) Icache data banks with defects. */
+ uint64_t icache_data_banks_with_unrepairable_defects : 8;/**< [ 31: 24](RO) Icache data banks with unrepairable defects. */
+ uint64_t manufacturing_mode : 1; /**< [ 32: 32](RO) Manufacturing mode. */
+ uint64_t reserved_33 : 1;
+ uint64_t fuse_controller_reports_hard_repair : 1;/**< [ 34: 34](RO) Fuse controller reports hard repair. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_cvm_bist0_el1 bdk_ap_cvm_bist0_el1_t;
+
+#define BDK_AP_CVM_BIST0_EL1 BDK_AP_CVM_BIST0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_BIST0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_BIST0_EL1_FUNC(void)
+{
+ return 0x3000b010000ll;
+}
+
+#define typedef_BDK_AP_CVM_BIST0_EL1 bdk_ap_cvm_bist0_el1_t
+#define bustype_BDK_AP_CVM_BIST0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_BIST0_EL1 "AP_CVM_BIST0_EL1"
+#define busnum_BDK_AP_CVM_BIST0_EL1 0
+#define arguments_BDK_AP_CVM_BIST0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_bist1_el1
+ *
+ * AP Cavium BIST1 Register
+ * This register indicates BIST status, where a 1 in a bit position indicates defective.
+ */
+union bdk_ap_cvm_bist1_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_bist1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t icache_bank7 : 6; /**< [ 47: 42](RO) Icache bank7 bad set, 0x3F means no defect. */
+ uint64_t icache_bank6 : 6; /**< [ 41: 36](RO) Icache bank6 bad set, 0x3F means no defect. */
+ uint64_t icache_bank5 : 6; /**< [ 35: 30](RO) Icache bank5 bad set, 0x3F means no defect. */
+ uint64_t icache_bank4 : 6; /**< [ 29: 24](RO) Icache bank4 bad set, 0x3F means no defect. */
+ uint64_t icache_bank3 : 6; /**< [ 23: 18](RO) Icache bank3 bad set, 0x3F means no defect. */
+ uint64_t icache_bank2 : 6; /**< [ 17: 12](RO) Icache bank2 bad set, 0x3F means no defect. */
+ uint64_t icache_bank1 : 6; /**< [ 11: 6](RO) Icache bank1 bad set, 0x3F means no defect. */
+ uint64_t icache_bank0 : 6; /**< [ 5: 0](RO) Icache bank0 bad set, 0x3F means no defect. */
+#else /* Word 0 - Little Endian */
+ uint64_t icache_bank0 : 6; /**< [ 5: 0](RO) Icache bank0 bad set, 0x3F means no defect. */
+ uint64_t icache_bank1 : 6; /**< [ 11: 6](RO) Icache bank1 bad set, 0x3F means no defect. */
+ uint64_t icache_bank2 : 6; /**< [ 17: 12](RO) Icache bank2 bad set, 0x3F means no defect. */
+ uint64_t icache_bank3 : 6; /**< [ 23: 18](RO) Icache bank3 bad set, 0x3F means no defect. */
+ uint64_t icache_bank4 : 6; /**< [ 29: 24](RO) Icache bank4 bad set, 0x3F means no defect. */
+ uint64_t icache_bank5 : 6; /**< [ 35: 30](RO) Icache bank5 bad set, 0x3F means no defect. */
+ uint64_t icache_bank6 : 6; /**< [ 41: 36](RO) Icache bank6 bad set, 0x3F means no defect. */
+ uint64_t icache_bank7 : 6; /**< [ 47: 42](RO) Icache bank7 bad set, 0x3F means no defect. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_bist1_el1_s cn8; */
+ struct bdk_ap_cvm_bist1_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_cvm_bist1_el1 bdk_ap_cvm_bist1_el1_t;
+
+#define BDK_AP_CVM_BIST1_EL1 BDK_AP_CVM_BIST1_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_BIST1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_BIST1_EL1_FUNC(void)
+{
+ return 0x3000b010100ll;
+}
+
+#define typedef_BDK_AP_CVM_BIST1_EL1 bdk_ap_cvm_bist1_el1_t
+#define bustype_BDK_AP_CVM_BIST1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_BIST1_EL1 "AP_CVM_BIST1_EL1"
+#define busnum_BDK_AP_CVM_BIST1_EL1 0
+#define arguments_BDK_AP_CVM_BIST1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_bist2_el1
+ *
+ * AP Cavium BIST2 Register
+ * This register indicates BIST status, where a 1 in a bit position indicates defective.
+ */
+union bdk_ap_cvm_bist2_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_bist2_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t wcu : 1; /**< [ 8: 8](RO) WCU defective. */
+ uint64_t dut : 1; /**< [ 7: 7](RO) DUT defective. */
+ uint64_t wbf : 1; /**< [ 6: 6](RO) WBF defective. */
+ uint64_t maf : 1; /**< [ 5: 5](RO) MAF defective. */
+ uint64_t utlb : 1; /**< [ 4: 4](RO) UTLB defective. */
+ uint64_t mtlb : 1; /**< [ 3: 3](RO) MTLB defective. */
+ uint64_t l1dp : 1; /**< [ 2: 2](RO) Dcache PTAG defective. */
+ uint64_t l1dv : 1; /**< [ 1: 1](RO) Dcache VTAG defective. */
+ uint64_t l1dd : 1; /**< [ 0: 0](RO) Dcache Data defective. */
+#else /* Word 0 - Little Endian */
+ uint64_t l1dd : 1; /**< [ 0: 0](RO) Dcache Data defective. */
+ uint64_t l1dv : 1; /**< [ 1: 1](RO) Dcache VTAG defective. */
+ uint64_t l1dp : 1; /**< [ 2: 2](RO) Dcache PTAG defective. */
+ uint64_t mtlb : 1; /**< [ 3: 3](RO) MTLB defective. */
+ uint64_t utlb : 1; /**< [ 4: 4](RO) UTLB defective. */
+ uint64_t maf : 1; /**< [ 5: 5](RO) MAF defective. */
+ uint64_t wbf : 1; /**< [ 6: 6](RO) WBF defective. */
+ uint64_t dut : 1; /**< [ 7: 7](RO) DUT defective. */
+ uint64_t wcu : 1; /**< [ 8: 8](RO) WCU defective. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_bist2_el1_s cn8; */
+ struct bdk_ap_cvm_bist2_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_cvm_bist2_el1 bdk_ap_cvm_bist2_el1_t;
+
+#define BDK_AP_CVM_BIST2_EL1 BDK_AP_CVM_BIST2_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_BIST2_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_BIST2_EL1_FUNC(void)
+{
+ return 0x3000b010400ll;
+}
+
+#define typedef_BDK_AP_CVM_BIST2_EL1 bdk_ap_cvm_bist2_el1_t
+#define bustype_BDK_AP_CVM_BIST2_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_BIST2_EL1 "AP_CVM_BIST2_EL1"
+#define busnum_BDK_AP_CVM_BIST2_EL1 0
+#define arguments_BDK_AP_CVM_BIST2_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_bist3_el1
+ *
+ * AP Cavium BIST3 Register
+ * This register indicates BIST status, where a 1 in a bit position indicates defective.
+ */
+union bdk_ap_cvm_bist3_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_bist3_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t set7 : 6; /**< [ 47: 42](RO) Dcache set7 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set6 : 6; /**< [ 41: 36](RO) Dcache set6 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set5 : 6; /**< [ 35: 30](RO) Dcache set5 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set4 : 6; /**< [ 29: 24](RO) Dcache set4 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set3 : 6; /**< [ 23: 18](RO) Dcache set3 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set2 : 6; /**< [ 17: 12](RO) Dcache set2 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set1 : 6; /**< [ 11: 6](RO) Dcache set1 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set0 : 6; /**< [ 5: 0](RO) Dcache set0 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+#else /* Word 0 - Little Endian */
+ uint64_t set0 : 6; /**< [ 5: 0](RO) Dcache set0 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set1 : 6; /**< [ 11: 6](RO) Dcache set1 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set2 : 6; /**< [ 17: 12](RO) Dcache set2 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set3 : 6; /**< [ 23: 18](RO) Dcache set3 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set4 : 6; /**< [ 29: 24](RO) Dcache set4 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set5 : 6; /**< [ 35: 30](RO) Dcache set5 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set6 : 6; /**< [ 41: 36](RO) Dcache set6 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t set7 : 6; /**< [ 47: 42](RO) Dcache set7 bad way, 0x3F = no defect, 0x3E = multiple defects. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_bist3_el1_s cn8; */
+ struct bdk_ap_cvm_bist3_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t reserved_0_47 : 48;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_47 : 48;
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_cvm_bist3_el1 bdk_ap_cvm_bist3_el1_t;
+
+#define BDK_AP_CVM_BIST3_EL1 BDK_AP_CVM_BIST3_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_BIST3_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_BIST3_EL1_FUNC(void)
+{
+ return 0x3000b010500ll;
+}
+
+#define typedef_BDK_AP_CVM_BIST3_EL1 bdk_ap_cvm_bist3_el1_t
+#define bustype_BDK_AP_CVM_BIST3_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_BIST3_EL1 "AP_CVM_BIST3_EL1"
+#define busnum_BDK_AP_CVM_BIST3_EL1 0
+#define arguments_BDK_AP_CVM_BIST3_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_cpid_el2
+ *
+ * AP Cavium Cache Partitioning EL2 Register
+ * This register provides Cavium-specific control information.
+ */
+union bdk_ap_cvm_cpid_el2
+{
+ uint64_t u;
+ struct bdk_ap_cvm_cpid_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t cpid : 7; /**< [ 6: 0](R/W) Cache partition ID. */
+#else /* Word 0 - Little Endian */
+ uint64_t cpid : 7; /**< [ 6: 0](R/W) Cache partition ID. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_cpid_el2_s cn; */
+};
+typedef union bdk_ap_cvm_cpid_el2 bdk_ap_cvm_cpid_el2_t;
+
+#define BDK_AP_CVM_CPID_EL2 BDK_AP_CVM_CPID_EL2_FUNC()
+static inline uint64_t BDK_AP_CVM_CPID_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_CPID_EL2_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3040b060400ll;
+ __bdk_csr_fatal("AP_CVM_CPID_EL2", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_CPID_EL2 bdk_ap_cvm_cpid_el2_t
+#define bustype_BDK_AP_CVM_CPID_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_CPID_EL2 "AP_CVM_CPID_EL2"
+#define busnum_BDK_AP_CVM_CPID_EL2 0
+#define arguments_BDK_AP_CVM_CPID_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_cpid_el3
+ *
+ * AP Cavium Cache Partitioning EL3 Register
+ * This register provides Cavium-specific control information.
+ */
+union bdk_ap_cvm_cpid_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_cpid_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t cpid : 7; /**< [ 6: 0](R/W) Cache partition ID. */
+#else /* Word 0 - Little Endian */
+ uint64_t cpid : 7; /**< [ 6: 0](R/W) Cache partition ID. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_cpid_el3_s cn; */
+};
+typedef union bdk_ap_cvm_cpid_el3 bdk_ap_cvm_cpid_el3_t;
+
+#define BDK_AP_CVM_CPID_EL3 BDK_AP_CVM_CPID_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_CPID_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_CPID_EL3_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3060b060400ll;
+ __bdk_csr_fatal("AP_CVM_CPID_EL3", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_CPID_EL3 bdk_ap_cvm_cpid_el3_t
+#define bustype_BDK_AP_CVM_CPID_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_CPID_EL3 "AP_CVM_CPID_EL3"
+#define busnum_BDK_AP_CVM_CPID_EL3 0
+#define arguments_BDK_AP_CVM_CPID_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_dcachedata0_el1
+ *
+ * AP Cavium Dcache Data 0 Register
+ */
+union bdk_ap_cvm_dcachedata0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_dcachedata0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Dcache data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Dcache data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_dcachedata0_el1_s cn; */
+};
+typedef union bdk_ap_cvm_dcachedata0_el1 bdk_ap_cvm_dcachedata0_el1_t;
+
+#define BDK_AP_CVM_DCACHEDATA0_EL1 BDK_AP_CVM_DCACHEDATA0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_DCACHEDATA0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DCACHEDATA0_EL1_FUNC(void)
+{
+ return 0x3000b030400ll;
+}
+
+#define typedef_BDK_AP_CVM_DCACHEDATA0_EL1 bdk_ap_cvm_dcachedata0_el1_t
+#define bustype_BDK_AP_CVM_DCACHEDATA0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DCACHEDATA0_EL1 "AP_CVM_DCACHEDATA0_EL1"
+#define busnum_BDK_AP_CVM_DCACHEDATA0_EL1 0
+#define arguments_BDK_AP_CVM_DCACHEDATA0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_dcachedata1_el1
+ *
+ * AP Cavium Dcache Data 1 Register
+ */
+union bdk_ap_cvm_dcachedata1_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_dcachedata1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t parity : 8; /**< [ 7: 0](RO) Parity bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t parity : 8; /**< [ 7: 0](RO) Parity bits. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_dcachedata1_el1_s cn; */
+};
+typedef union bdk_ap_cvm_dcachedata1_el1 bdk_ap_cvm_dcachedata1_el1_t;
+
+#define BDK_AP_CVM_DCACHEDATA1_EL1 BDK_AP_CVM_DCACHEDATA1_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_DCACHEDATA1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DCACHEDATA1_EL1_FUNC(void)
+{
+ return 0x3000b030500ll;
+}
+
+#define typedef_BDK_AP_CVM_DCACHEDATA1_EL1 bdk_ap_cvm_dcachedata1_el1_t
+#define bustype_BDK_AP_CVM_DCACHEDATA1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DCACHEDATA1_EL1 "AP_CVM_DCACHEDATA1_EL1"
+#define busnum_BDK_AP_CVM_DCACHEDATA1_EL1 0
+#define arguments_BDK_AP_CVM_DCACHEDATA1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_dcacheptag0_el1
+ *
+ * AP Cavium Dcache Ptag 0 Register
+ */
+union bdk_ap_cvm_dcacheptag0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_dcacheptag0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_44_63 : 20;
+ uint64_t nsec : 1; /**< [ 43: 43](RO) Not-shared. */
+ uint64_t ptag : 33; /**< [ 42: 10](RO) Physical tag \<42:10\>. */
+ uint64_t reserved_1_9 : 9;
+ uint64_t valid : 1; /**< [ 0: 0](RO) Valid. */
+#else /* Word 0 - Little Endian */
+ uint64_t valid : 1; /**< [ 0: 0](RO) Valid. */
+ uint64_t reserved_1_9 : 9;
+ uint64_t ptag : 33; /**< [ 42: 10](RO) Physical tag \<42:10\>. */
+ uint64_t nsec : 1; /**< [ 43: 43](RO) Not-shared. */
+ uint64_t reserved_44_63 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_dcacheptag0_el1_s cn; */
+};
+typedef union bdk_ap_cvm_dcacheptag0_el1 bdk_ap_cvm_dcacheptag0_el1_t;
+
+#define BDK_AP_CVM_DCACHEPTAG0_EL1 BDK_AP_CVM_DCACHEPTAG0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_DCACHEPTAG0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DCACHEPTAG0_EL1_FUNC(void)
+{
+ return 0x3000b040400ll;
+}
+
+#define typedef_BDK_AP_CVM_DCACHEPTAG0_EL1 bdk_ap_cvm_dcacheptag0_el1_t
+#define bustype_BDK_AP_CVM_DCACHEPTAG0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DCACHEPTAG0_EL1 "AP_CVM_DCACHEPTAG0_EL1"
+#define busnum_BDK_AP_CVM_DCACHEPTAG0_EL1 0
+#define arguments_BDK_AP_CVM_DCACHEPTAG0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_dcachevtag0_el1
+ *
+ * AP Cavium Dcache Vtag 0 Register
+ */
+union bdk_ap_cvm_dcachevtag0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_dcachevtag0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t eva_vmid : 4; /**< [ 59: 56](RO) Entry EVA_VMID. */
+ uint64_t reserved_49_55 : 7;
+ uint64_t r : 1; /**< [ 48: 48](RO) Entry R. */
+ uint64_t vtag : 38; /**< [ 47: 10](RO) Entry VTAG\<47:10\>. */
+ uint64_t eva_asid : 6; /**< [ 9: 4](RO) Entry EVA_ASID. */
+ uint64_t ng : 1; /**< [ 3: 3](RO) Entry NG. */
+ uint64_t el_1or0 : 1; /**< [ 2: 2](RO) Entry is EL0 or EL1. */
+ uint64_t nsec : 1; /**< [ 1: 1](RO) Entry is NS. */
+ uint64_t valid : 1; /**< [ 0: 0](RO) Entry valid. */
+#else /* Word 0 - Little Endian */
+ uint64_t valid : 1; /**< [ 0: 0](RO) Entry valid. */
+ uint64_t nsec : 1; /**< [ 1: 1](RO) Entry is NS. */
+ uint64_t el_1or0 : 1; /**< [ 2: 2](RO) Entry is EL0 or EL1. */
+ uint64_t ng : 1; /**< [ 3: 3](RO) Entry NG. */
+ uint64_t eva_asid : 6; /**< [ 9: 4](RO) Entry EVA_ASID. */
+ uint64_t vtag : 38; /**< [ 47: 10](RO) Entry VTAG\<47:10\>. */
+ uint64_t r : 1; /**< [ 48: 48](RO) Entry R. */
+ uint64_t reserved_49_55 : 7;
+ uint64_t eva_vmid : 4; /**< [ 59: 56](RO) Entry EVA_VMID. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_dcachevtag0_el1_s cn; */
+};
+typedef union bdk_ap_cvm_dcachevtag0_el1 bdk_ap_cvm_dcachevtag0_el1_t;
+
+#define BDK_AP_CVM_DCACHEVTAG0_EL1 BDK_AP_CVM_DCACHEVTAG0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_DCACHEVTAG0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DCACHEVTAG0_EL1_FUNC(void)
+{
+ return 0x3000b030600ll;
+}
+
+#define typedef_BDK_AP_CVM_DCACHEVTAG0_EL1 bdk_ap_cvm_dcachevtag0_el1_t
+#define bustype_BDK_AP_CVM_DCACHEVTAG0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DCACHEVTAG0_EL1 "AP_CVM_DCACHEVTAG0_EL1"
+#define busnum_BDK_AP_CVM_DCACHEVTAG0_EL1 0
+#define arguments_BDK_AP_CVM_DCACHEVTAG0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_dcachevtag1_el1
+ *
+ * AP Cavium Dcache Vtag 1 Register
+ */
+union bdk_ap_cvm_dcachevtag1_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_dcachevtag1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t ent1 : 9; /**< [ 24: 16](RO) ENT1. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t ent2 : 9; /**< [ 8: 0](RO) ENT2. */
+#else /* Word 0 - Little Endian */
+ uint64_t ent2 : 9; /**< [ 8: 0](RO) ENT2. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t ent1 : 9; /**< [ 24: 16](RO) ENT1. */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_dcachevtag1_el1_s cn; */
+};
+typedef union bdk_ap_cvm_dcachevtag1_el1 bdk_ap_cvm_dcachevtag1_el1_t;
+
+#define BDK_AP_CVM_DCACHEVTAG1_EL1 BDK_AP_CVM_DCACHEVTAG1_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_DCACHEVTAG1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DCACHEVTAG1_EL1_FUNC(void)
+{
+ return 0x3000b030700ll;
+}
+
+#define typedef_BDK_AP_CVM_DCACHEVTAG1_EL1 bdk_ap_cvm_dcachevtag1_el1_t
+#define bustype_BDK_AP_CVM_DCACHEVTAG1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DCACHEVTAG1_EL1 "AP_CVM_DCACHEVTAG1_EL1"
+#define busnum_BDK_AP_CVM_DCACHEVTAG1_EL1 0
+#define arguments_BDK_AP_CVM_DCACHEVTAG1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug0_el3
+ *
+ * INTERNAL: AP Cavium Debug 0 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug0_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug0_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t current_pc : 64; /**< [ 63: 0](RO) Current PC. */
+#else /* Word 0 - Little Endian */
+ uint64_t current_pc : 64; /**< [ 63: 0](RO) Current PC. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug0_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug0_el3 bdk_ap_cvm_debug0_el3_t;
+
+#define BDK_AP_CVM_DEBUG0_EL3 BDK_AP_CVM_DEBUG0_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG0_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG0_EL3_FUNC(void)
+{
+ return 0x3060b040000ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG0_EL3 bdk_ap_cvm_debug0_el3_t
+#define bustype_BDK_AP_CVM_DEBUG0_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG0_EL3 "AP_CVM_DEBUG0_EL3"
+#define busnum_BDK_AP_CVM_DEBUG0_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG0_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug1_el3
+ *
+ * INTERNAL: AP Cavium Debug 1 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug1_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug1_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t current_fetch : 64; /**< [ 63: 0](RO) Current fetcher address. */
+#else /* Word 0 - Little Endian */
+ uint64_t current_fetch : 64; /**< [ 63: 0](RO) Current fetcher address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug1_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug1_el3 bdk_ap_cvm_debug1_el3_t;
+
+#define BDK_AP_CVM_DEBUG1_EL3 BDK_AP_CVM_DEBUG1_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG1_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG1_EL3_FUNC(void)
+{
+ return 0x3060b040100ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG1_EL3 bdk_ap_cvm_debug1_el3_t
+#define bustype_BDK_AP_CVM_DEBUG1_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG1_EL3 "AP_CVM_DEBUG1_EL3"
+#define busnum_BDK_AP_CVM_DEBUG1_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG1_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug2_el3
+ *
+ * INTERNAL: AP Cavium Debug 2 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug2_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug2_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t last_ifill : 64; /**< [ 63: 0](RO) Last ifill address. */
+#else /* Word 0 - Little Endian */
+ uint64_t last_ifill : 64; /**< [ 63: 0](RO) Last ifill address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug2_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug2_el3 bdk_ap_cvm_debug2_el3_t;
+
+#define BDK_AP_CVM_DEBUG2_EL3 BDK_AP_CVM_DEBUG2_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG2_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG2_EL3_FUNC(void)
+{
+ return 0x3060b040200ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG2_EL3 bdk_ap_cvm_debug2_el3_t
+#define bustype_BDK_AP_CVM_DEBUG2_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG2_EL3 "AP_CVM_DEBUG2_EL3"
+#define busnum_BDK_AP_CVM_DEBUG2_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG2_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug3_el3
+ *
+ * INTERNAL: AP Cavium Debug 3 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug3_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug3_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t last_committed : 64; /**< [ 63: 0](RO) Last committed instruction PC. */
+#else /* Word 0 - Little Endian */
+ uint64_t last_committed : 64; /**< [ 63: 0](RO) Last committed instruction PC. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug3_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug3_el3 bdk_ap_cvm_debug3_el3_t;
+
+#define BDK_AP_CVM_DEBUG3_EL3 BDK_AP_CVM_DEBUG3_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG3_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG3_EL3_FUNC(void)
+{
+ return 0x3060b040300ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG3_EL3 bdk_ap_cvm_debug3_el3_t
+#define bustype_BDK_AP_CVM_DEBUG3_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG3_EL3 "AP_CVM_DEBUG3_EL3"
+#define busnum_BDK_AP_CVM_DEBUG3_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG3_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug4_el3
+ *
+ * INTERNAL: AP Cavium Debug 4 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug4_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug4_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t dual_issue_reason : 8; /**< [ 23: 16](RO) Reason dual issue didn't occur. */
+ uint64_t issue_reason : 8; /**< [ 15: 8](RO) Reason issue didn't occur. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t mem_stall_4a : 1; /**< [ 4: 4](RO) Memory Stall stage 4a. */
+ uint64_t waiting_for_pfill_4a : 1; /**< [ 3: 3](RO) Waiting for PFILL stage 4a. */
+ uint64_t waiting_for_ifill_4a : 1; /**< [ 2: 2](RO) Waiting for IFILL stage 4a. */
+ uint64_t exception_level : 2; /**< [ 1: 0](RO) Current exception level. */
+#else /* Word 0 - Little Endian */
+ uint64_t exception_level : 2; /**< [ 1: 0](RO) Current exception level. */
+ uint64_t waiting_for_ifill_4a : 1; /**< [ 2: 2](RO) Waiting for IFILL stage 4a. */
+ uint64_t waiting_for_pfill_4a : 1; /**< [ 3: 3](RO) Waiting for PFILL stage 4a. */
+ uint64_t mem_stall_4a : 1; /**< [ 4: 4](RO) Memory Stall stage 4a. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t issue_reason : 8; /**< [ 15: 8](RO) Reason issue didn't occur. */
+ uint64_t dual_issue_reason : 8; /**< [ 23: 16](RO) Reason dual issue didn't occur. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug4_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug4_el3 bdk_ap_cvm_debug4_el3_t;
+
+#define BDK_AP_CVM_DEBUG4_EL3 BDK_AP_CVM_DEBUG4_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG4_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG4_EL3_FUNC(void)
+{
+ return 0x3060b050000ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG4_EL3 bdk_ap_cvm_debug4_el3_t
+#define bustype_BDK_AP_CVM_DEBUG4_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG4_EL3 "AP_CVM_DEBUG4_EL3"
+#define busnum_BDK_AP_CVM_DEBUG4_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG4_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug6_el3
+ *
+ * INTERNAL: AP Cavium Debug 6 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug6_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug6_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t power_off : 1; /**< [ 40: 40](RO) */
+ uint64_t power_longterm : 8; /**< [ 39: 32](RO) */
+ uint64_t power_setting : 8; /**< [ 31: 24](RO) */
+ uint64_t reserved_22_23 : 2;
+ uint64_t interval_power : 22; /**< [ 21: 0](RO) */
+#else /* Word 0 - Little Endian */
+ uint64_t interval_power : 22; /**< [ 21: 0](RO) */
+ uint64_t reserved_22_23 : 2;
+ uint64_t power_setting : 8; /**< [ 31: 24](RO) */
+ uint64_t power_longterm : 8; /**< [ 39: 32](RO) */
+ uint64_t power_off : 1; /**< [ 40: 40](RO) */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug6_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug6_el3 bdk_ap_cvm_debug6_el3_t;
+
+#define BDK_AP_CVM_DEBUG6_EL3 BDK_AP_CVM_DEBUG6_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG6_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG6_EL3_FUNC(void)
+{
+ return 0x3060b050200ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG6_EL3 bdk_ap_cvm_debug6_el3_t
+#define bustype_BDK_AP_CVM_DEBUG6_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG6_EL3 "AP_CVM_DEBUG6_EL3"
+#define busnum_BDK_AP_CVM_DEBUG6_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG6_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug7_el3
+ *
+ * INTERNAL: AP Cavium Debug 7 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug7_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug7_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug7_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug7_el3 bdk_ap_cvm_debug7_el3_t;
+
+#define BDK_AP_CVM_DEBUG7_EL3 BDK_AP_CVM_DEBUG7_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG7_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG7_EL3_FUNC(void)
+{
+ return 0x3060b050300ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG7_EL3 bdk_ap_cvm_debug7_el3_t
+#define bustype_BDK_AP_CVM_DEBUG7_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG7_EL3 "AP_CVM_DEBUG7_EL3"
+#define busnum_BDK_AP_CVM_DEBUG7_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG7_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug8_el3
+ *
+ * INTERNAL: AP Cavium Debug 8 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug8_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug8_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rdb_dsc_set_arry_1 : 18; /**< [ 63: 46](RO) Bits 17..0 of rdb_dsc_set_arry[1]. */
+ uint64_t rdb_dsc_set_arry_0 : 36; /**< [ 45: 10](RO) Rdb_dsc_set_arry[0]. */
+ uint64_t uwr_ack_def_cnt : 2; /**< [ 9: 8](RO) Upstream write message ack count. */
+ uint64_t sgi_ack_def_cnt : 2; /**< [ 7: 6](RO) SGI generate message ack count. */
+ uint64_t dct_ack_def_cnt : 2; /**< [ 5: 4](RO) Deactivate message ack count. */
+ uint64_t act_ack_def_cnt : 2; /**< [ 3: 2](RO) Activate message ack count. */
+ uint64_t clr_ack_def_cnt : 2; /**< [ 1: 0](RO) Clear message ack count. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr_ack_def_cnt : 2; /**< [ 1: 0](RO) Clear message ack count. */
+ uint64_t act_ack_def_cnt : 2; /**< [ 3: 2](RO) Activate message ack count. */
+ uint64_t dct_ack_def_cnt : 2; /**< [ 5: 4](RO) Deactivate message ack count. */
+ uint64_t sgi_ack_def_cnt : 2; /**< [ 7: 6](RO) SGI generate message ack count. */
+ uint64_t uwr_ack_def_cnt : 2; /**< [ 9: 8](RO) Upstream write message ack count. */
+ uint64_t rdb_dsc_set_arry_0 : 36; /**< [ 45: 10](RO) Rdb_dsc_set_arry[0]. */
+ uint64_t rdb_dsc_set_arry_1 : 18; /**< [ 63: 46](RO) Bits 17..0 of rdb_dsc_set_arry[1]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug8_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug8_el3 bdk_ap_cvm_debug8_el3_t;
+
+#define BDK_AP_CVM_DEBUG8_EL3 BDK_AP_CVM_DEBUG8_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG8_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG8_EL3_FUNC(void)
+{
+ return 0x3060b070000ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG8_EL3 bdk_ap_cvm_debug8_el3_t
+#define bustype_BDK_AP_CVM_DEBUG8_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG8_EL3 "AP_CVM_DEBUG8_EL3"
+#define busnum_BDK_AP_CVM_DEBUG8_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG8_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_debug9_el3
+ *
+ * INTERNAL: AP Cavium Debug 9 Register
+ *
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_debug9_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_debug9_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t dsc_fsm_enc_state_1 : 4; /**< [ 47: 44](RO) DSC FSM ENC state\<1\>. */
+ uint64_t dsc_fsm_enc_state_0 : 4; /**< [ 43: 40](RO) DSC FSM ENC state\<0\>. */
+ uint64_t clr_fsm_enc_state : 3; /**< [ 39: 37](RO) CLR FSM ENC state. */
+ uint64_t qsc_fsm_enc_state : 3; /**< [ 36: 34](RO) QSC FSM ENC state. */
+ uint64_t dsc_fifo : 4; /**< [ 33: 30](RO) DSC FIFO. */
+ uint64_t ppi_fifo : 4; /**< [ 29: 26](RO) PPI FIFO. */
+ uint64_t cdc_fifo : 4; /**< [ 25: 22](RO) CDC FIFO. */
+ uint64_t eac_fifo : 4; /**< [ 21: 18](RO) EAC FIFO. */
+ uint64_t rdb_dsc_set_arry_1 : 18; /**< [ 17: 0](RO) rdb_dsc_set_arry[1]\<35:18\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t rdb_dsc_set_arry_1 : 18; /**< [ 17: 0](RO) rdb_dsc_set_arry[1]\<35:18\>. */
+ uint64_t eac_fifo : 4; /**< [ 21: 18](RO) EAC FIFO. */
+ uint64_t cdc_fifo : 4; /**< [ 25: 22](RO) CDC FIFO. */
+ uint64_t ppi_fifo : 4; /**< [ 29: 26](RO) PPI FIFO. */
+ uint64_t dsc_fifo : 4; /**< [ 33: 30](RO) DSC FIFO. */
+ uint64_t qsc_fsm_enc_state : 3; /**< [ 36: 34](RO) QSC FSM ENC state. */
+ uint64_t clr_fsm_enc_state : 3; /**< [ 39: 37](RO) CLR FSM ENC state. */
+ uint64_t dsc_fsm_enc_state_0 : 4; /**< [ 43: 40](RO) DSC FSM ENC state\<0\>. */
+ uint64_t dsc_fsm_enc_state_1 : 4; /**< [ 47: 44](RO) DSC FSM ENC state\<1\>. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_debug9_el3_s cn; */
+};
+typedef union bdk_ap_cvm_debug9_el3 bdk_ap_cvm_debug9_el3_t;
+
+#define BDK_AP_CVM_DEBUG9_EL3 BDK_AP_CVM_DEBUG9_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DEBUG9_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DEBUG9_EL3_FUNC(void)
+{
+ return 0x3060b070100ll;
+}
+
+#define typedef_BDK_AP_CVM_DEBUG9_EL3 bdk_ap_cvm_debug9_el3_t
+#define bustype_BDK_AP_CVM_DEBUG9_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DEBUG9_EL3 "AP_CVM_DEBUG9_EL3"
+#define busnum_BDK_AP_CVM_DEBUG9_EL3 0
+#define arguments_BDK_AP_CVM_DEBUG9_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_dll_observabilty_el3
+ *
+ * INTERNAL: AP Cavium DLL Observability Register
+ */
+union bdk_ap_cvm_dll_observabilty_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_dll_observabilty_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t max_seen : 12; /**< [ 59: 48](RO) Maximum setting seen. */
+ uint64_t min_seen : 12; /**< [ 47: 36](RO) Minimum setting seen. */
+ uint64_t rclk_dll_lock : 1; /**< [ 35: 35](RO) rclk_dll__lock. */
+ uint64_t dll_state : 3; /**< [ 34: 32](RO) dll_state\<2:0\>. */
+ uint64_t dll_setting : 12; /**< [ 31: 20](RO) dll_setting\<11:0\>. */
+ uint64_t raw_dly_elem_enable : 16; /**< [ 19: 4](RO) raw_dly_elem_enable\<15:0\>. */
+ uint64_t clk_invert : 1; /**< [ 3: 3](RO) clk_invert. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 2: 2](RO) pd_pos_rclk_refclk. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 1: 1](RO) pdl_rclk_refclk. */
+ uint64_t pdr_rclk_refclk : 1; /**< [ 0: 0](RO) pdr_rclk_refclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t pdr_rclk_refclk : 1; /**< [ 0: 0](RO) pdr_rclk_refclk. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 1: 1](RO) pdl_rclk_refclk. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 2: 2](RO) pd_pos_rclk_refclk. */
+ uint64_t clk_invert : 1; /**< [ 3: 3](RO) clk_invert. */
+ uint64_t raw_dly_elem_enable : 16; /**< [ 19: 4](RO) raw_dly_elem_enable\<15:0\>. */
+ uint64_t dll_setting : 12; /**< [ 31: 20](RO) dll_setting\<11:0\>. */
+ uint64_t dll_state : 3; /**< [ 34: 32](RO) dll_state\<2:0\>. */
+ uint64_t rclk_dll_lock : 1; /**< [ 35: 35](RO) rclk_dll__lock. */
+ uint64_t min_seen : 12; /**< [ 47: 36](RO) Minimum setting seen. */
+ uint64_t max_seen : 12; /**< [ 59: 48](RO) Maximum setting seen. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_dll_observabilty_el3_s cn; */
+};
+typedef union bdk_ap_cvm_dll_observabilty_el3 bdk_ap_cvm_dll_observabilty_el3_t;
+
+#define BDK_AP_CVM_DLL_OBSERVABILTY_EL3 BDK_AP_CVM_DLL_OBSERVABILTY_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_DLL_OBSERVABILTY_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_DLL_OBSERVABILTY_EL3_FUNC(void)
+{
+ return 0x3060b050100ll;
+}
+
+#define typedef_BDK_AP_CVM_DLL_OBSERVABILTY_EL3 bdk_ap_cvm_dll_observabilty_el3_t
+#define bustype_BDK_AP_CVM_DLL_OBSERVABILTY_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_DLL_OBSERVABILTY_EL3 "AP_CVM_DLL_OBSERVABILTY_EL3"
+#define busnum_BDK_AP_CVM_DLL_OBSERVABILTY_EL3 0
+#define arguments_BDK_AP_CVM_DLL_OBSERVABILTY_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_erricache_el1
+ *
+ * AP Cavium Error Icache Register
+ */
+union bdk_ap_cvm_erricache_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_erricache_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ uint64_t set : 6; /**< [ 16: 11](R/W) Set which had the parity error. */
+ uint64_t va : 8; /**< [ 10: 3](R/W) VA\<10:3\> of address which had the parity error. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t icache_data_error : 1; /**< [ 0: 0](R/W) Icache corrected a data error. */
+#else /* Word 0 - Little Endian */
+ uint64_t icache_data_error : 1; /**< [ 0: 0](R/W) Icache corrected a data error. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t va : 8; /**< [ 10: 3](R/W) VA\<10:3\> of address which had the parity error. */
+ uint64_t set : 6; /**< [ 16: 11](R/W) Set which had the parity error. */
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_erricache_el1_s cn; */
+};
+typedef union bdk_ap_cvm_erricache_el1 bdk_ap_cvm_erricache_el1_t;
+
+#define BDK_AP_CVM_ERRICACHE_EL1 BDK_AP_CVM_ERRICACHE_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_ERRICACHE_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_ERRICACHE_EL1_FUNC(void)
+{
+ return 0x3000b020000ll;
+}
+
+#define typedef_BDK_AP_CVM_ERRICACHE_EL1 bdk_ap_cvm_erricache_el1_t
+#define bustype_BDK_AP_CVM_ERRICACHE_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_ERRICACHE_EL1 "AP_CVM_ERRICACHE_EL1"
+#define busnum_BDK_AP_CVM_ERRICACHE_EL1 0
+#define arguments_BDK_AP_CVM_ERRICACHE_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_errmem_el1
+ *
+ * AP Cavium Error Memory Register
+ */
+union bdk_ap_cvm_errmem_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_errmem_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t wbfsbeerr : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t gsynctonosw : 1; /**< [ 47: 47](R/W) Global sync timeout, no report to software. */
+ uint64_t gsynctodis : 1; /**< [ 46: 46](R/W) Global sync timeout disable. */
+ uint64_t gsyncto : 1; /**< [ 45: 45](R/W/H) Global sync timeout. */
+ uint64_t wcumultdis : 1; /**< [ 44: 44](R/W) WCU multiple match error disable. */
+ uint64_t wcumult : 1; /**< [ 43: 43](R/W/H) WCU multiple match error. */
+ uint64_t mtlbmultdis : 1; /**< [ 42: 42](R/W) MTLB multiple match error disable. */
+ uint64_t mtlbmult : 1; /**< [ 41: 41](R/W/H) MTLB multiple match error. */
+ uint64_t reserved_40 : 1;
+ uint64_t barriertonosw : 1; /**< [ 39: 39](R/W) Barrier timeout, no report to software. */
+ uint64_t barriertodis : 1; /**< [ 38: 38](R/W) Barrier timeout disable. */
+ uint64_t barrierto : 1; /**< [ 37: 37](R/W/H) Barrier timeout. */
+ uint64_t reserved_36 : 1;
+ uint64_t rbftonosw : 1; /**< [ 35: 35](R/W) Read buffer timeout, no report to software. */
+ uint64_t rbftodis : 1; /**< [ 34: 34](R/W) Read buffer timeout disable. */
+ uint64_t rbfto : 1; /**< [ 33: 33](R/W/H) Read buffer timeout. */
+ uint64_t wcuperrforce : 1; /**< [ 32: 32](R/W/H) WCU parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wcuperrnosw : 1; /**< [ 31: 31](R/W) WCU parity error, no report to software. */
+ uint64_t wcuperrdis : 1; /**< [ 30: 30](R/W) WCU parity error disable. */
+ uint64_t wcuperr : 1; /**< [ 29: 29](R/W/H) WCU corrected parity error. */
+ uint64_t wbfdbeforce : 1; /**< [ 28: 28](R/W/H) Write-buffer DBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfsbeforce : 1; /**< [ 27: 27](R/W/H) Write-buffer SBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfperrnosw : 1; /**< [ 26: 26](R/W) Write-buffer single-bit error, no report to software. */
+ uint64_t wbfperrdis : 1; /**< [ 25: 25](R/W) Write-buffer double-bit error disable. */
+ uint64_t wbfperr : 1; /**< [ 24: 24](R/W/H) Write-buffer double-bit error. */
+ uint64_t mafperrforce : 1; /**< [ 23: 23](R/W/H) MAF parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mafperrnosw : 1; /**< [ 22: 22](R/W) MAF parity error, no report to software. */
+ uint64_t mafperrdis : 1; /**< [ 21: 21](R/W) MAF parity error disable. */
+ uint64_t mafperr : 1; /**< [ 20: 20](R/W/H) MAF parity error. */
+ uint64_t utlbperrforce : 1; /**< [ 19: 19](R/W/H) uTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t utlbperrnosw : 1; /**< [ 18: 18](R/W) uTLB correctable parity error, no report to software. */
+ uint64_t utlbperrdis : 1; /**< [ 17: 17](R/W) uTLB correctable parity error disable. */
+ uint64_t utlbperr : 1; /**< [ 16: 16](R/W/H) uTLB corrected a parity error. */
+ uint64_t mtlbperrforce : 1; /**< [ 15: 15](R/W/H) MTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mtlbperrnosw : 1; /**< [ 14: 14](R/W) MTLB correctable parity error, no report to software. */
+ uint64_t mtlbperrdis : 1; /**< [ 13: 13](R/W) MTLB correctable parity error disable. */
+ uint64_t mtlbperr : 1; /**< [ 12: 12](R/W/H) MTLB corrected a parity error. */
+ uint64_t l1dperrforce : 1; /**< [ 11: 11](R/W/H) Dcache correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t l1dperrnosw : 1; /**< [ 10: 10](R/W) Dcache correctable parity error, no report to software. */
+ uint64_t l1dperrdis : 1; /**< [ 9: 9](R/W) Dcache correctable parity error disable. */
+ uint64_t l1dperr : 1; /**< [ 8: 8](R/W/H) Dcache corrected a parity error. */
+ uint64_t l1dway : 5; /**< [ 7: 3](R/W/H) Indicates Dcache way. */
+ uint64_t l1dset : 3; /**< [ 2: 0](R/W/H) Indicates Dcache set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l1dset : 3; /**< [ 2: 0](R/W/H) Indicates Dcache set. */
+ uint64_t l1dway : 5; /**< [ 7: 3](R/W/H) Indicates Dcache way. */
+ uint64_t l1dperr : 1; /**< [ 8: 8](R/W/H) Dcache corrected a parity error. */
+ uint64_t l1dperrdis : 1; /**< [ 9: 9](R/W) Dcache correctable parity error disable. */
+ uint64_t l1dperrnosw : 1; /**< [ 10: 10](R/W) Dcache correctable parity error, no report to software. */
+ uint64_t l1dperrforce : 1; /**< [ 11: 11](R/W/H) Dcache correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mtlbperr : 1; /**< [ 12: 12](R/W/H) MTLB corrected a parity error. */
+ uint64_t mtlbperrdis : 1; /**< [ 13: 13](R/W) MTLB correctable parity error disable. */
+ uint64_t mtlbperrnosw : 1; /**< [ 14: 14](R/W) MTLB correctable parity error, no report to software. */
+ uint64_t mtlbperrforce : 1; /**< [ 15: 15](R/W/H) MTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t utlbperr : 1; /**< [ 16: 16](R/W/H) uTLB corrected a parity error. */
+ uint64_t utlbperrdis : 1; /**< [ 17: 17](R/W) uTLB correctable parity error disable. */
+ uint64_t utlbperrnosw : 1; /**< [ 18: 18](R/W) uTLB correctable parity error, no report to software. */
+ uint64_t utlbperrforce : 1; /**< [ 19: 19](R/W/H) uTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mafperr : 1; /**< [ 20: 20](R/W/H) MAF parity error. */
+ uint64_t mafperrdis : 1; /**< [ 21: 21](R/W) MAF parity error disable. */
+ uint64_t mafperrnosw : 1; /**< [ 22: 22](R/W) MAF parity error, no report to software. */
+ uint64_t mafperrforce : 1; /**< [ 23: 23](R/W/H) MAF parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfperr : 1; /**< [ 24: 24](R/W/H) Write-buffer double-bit error. */
+ uint64_t wbfperrdis : 1; /**< [ 25: 25](R/W) Write-buffer double-bit error disable. */
+ uint64_t wbfperrnosw : 1; /**< [ 26: 26](R/W) Write-buffer single-bit error, no report to software. */
+ uint64_t wbfsbeforce : 1; /**< [ 27: 27](R/W/H) Write-buffer SBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfdbeforce : 1; /**< [ 28: 28](R/W/H) Write-buffer DBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wcuperr : 1; /**< [ 29: 29](R/W/H) WCU corrected parity error. */
+ uint64_t wcuperrdis : 1; /**< [ 30: 30](R/W) WCU parity error disable. */
+ uint64_t wcuperrnosw : 1; /**< [ 31: 31](R/W) WCU parity error, no report to software. */
+ uint64_t wcuperrforce : 1; /**< [ 32: 32](R/W/H) WCU parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t rbfto : 1; /**< [ 33: 33](R/W/H) Read buffer timeout. */
+ uint64_t rbftodis : 1; /**< [ 34: 34](R/W) Read buffer timeout disable. */
+ uint64_t rbftonosw : 1; /**< [ 35: 35](R/W) Read buffer timeout, no report to software. */
+ uint64_t reserved_36 : 1;
+ uint64_t barrierto : 1; /**< [ 37: 37](R/W/H) Barrier timeout. */
+ uint64_t barriertodis : 1; /**< [ 38: 38](R/W) Barrier timeout disable. */
+ uint64_t barriertonosw : 1; /**< [ 39: 39](R/W) Barrier timeout, no report to software. */
+ uint64_t reserved_40 : 1;
+ uint64_t mtlbmult : 1; /**< [ 41: 41](R/W/H) MTLB multiple match error. */
+ uint64_t mtlbmultdis : 1; /**< [ 42: 42](R/W) MTLB multiple match error disable. */
+ uint64_t wcumult : 1; /**< [ 43: 43](R/W/H) WCU multiple match error. */
+ uint64_t wcumultdis : 1; /**< [ 44: 44](R/W) WCU multiple match error disable. */
+ uint64_t gsyncto : 1; /**< [ 45: 45](R/W/H) Global sync timeout. */
+ uint64_t gsynctodis : 1; /**< [ 46: 46](R/W) Global sync timeout disable. */
+ uint64_t gsynctonosw : 1; /**< [ 47: 47](R/W) Global sync timeout, no report to software. */
+ uint64_t wbfsbeerr : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cvm_errmem_el1_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t wbfsbeerr : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t gsynctonosw : 1; /**< [ 47: 47](R/W) Global sync timeout, no report to software. */
+ uint64_t gsynctodis : 1; /**< [ 46: 46](R/W) Global sync timeout disable. */
+ uint64_t gsyncto : 1; /**< [ 45: 45](R/W/H) Global sync timeout. */
+ uint64_t wcumultdis : 1; /**< [ 44: 44](R/W) WCU multiple match error disable. */
+ uint64_t wcumult : 1; /**< [ 43: 43](R/W/H) WCU multiple match error. */
+ uint64_t mtlbmultdis : 1; /**< [ 42: 42](R/W) MTLB multiple match error disable. */
+ uint64_t mtlbmult : 1; /**< [ 41: 41](R/W/H) MTLB multiple match error. */
+ uint64_t barriertoforce : 1; /**< [ 40: 40](R/W/H) Barrier timeout force. Bit is cleared when error is forced. */
+ uint64_t barriertonosw : 1; /**< [ 39: 39](R/W) Barrier timeout, no report to software. */
+ uint64_t barriertodis : 1; /**< [ 38: 38](R/W) Barrier timeout disable. */
+ uint64_t barrierto : 1; /**< [ 37: 37](R/W/H) Barrier timeout. */
+ uint64_t rbftoforce : 1; /**< [ 36: 36](R/W/H) Read buffer timeout force. Bit is cleared when error is forced. */
+ uint64_t rbftonosw : 1; /**< [ 35: 35](R/W) Read buffer timeout, no report to software. */
+ uint64_t rbftodis : 1; /**< [ 34: 34](R/W) Read buffer timeout disable. */
+ uint64_t rbfto : 1; /**< [ 33: 33](R/W/H) Read buffer timeout. */
+ uint64_t wcuperrforce : 1; /**< [ 32: 32](R/W/H) WCU parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wcuperrnosw : 1; /**< [ 31: 31](R/W) WCU parity error, no report to software. */
+ uint64_t wcuperrdis : 1; /**< [ 30: 30](R/W) WCU parity error disable. */
+ uint64_t wcuperr : 1; /**< [ 29: 29](R/W/H) WCU corrected parity error. */
+ uint64_t wbfdbeforce : 1; /**< [ 28: 28](R/W/H) Write-buffer DBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfsbeforce : 1; /**< [ 27: 27](R/W/H) Write-buffer SBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfperrnosw : 1; /**< [ 26: 26](R/W) Write-buffer single-bit error, no report to software. */
+ uint64_t wbfperrdis : 1; /**< [ 25: 25](R/W) Write-buffer double-bit error disable. */
+ uint64_t wbfperr : 1; /**< [ 24: 24](R/W/H) Write-buffer double-bit error. */
+ uint64_t mafperrforce : 1; /**< [ 23: 23](R/W/H) MAF parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mafperrnosw : 1; /**< [ 22: 22](R/W) MAF parity error, no report to software. */
+ uint64_t mafperrdis : 1; /**< [ 21: 21](R/W) MAF parity error disable. */
+ uint64_t mafperr : 1; /**< [ 20: 20](R/W/H) MAF parity error. */
+ uint64_t utlbperrforce : 1; /**< [ 19: 19](R/W/H) uTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t utlbperrnosw : 1; /**< [ 18: 18](R/W) uTLB correctable parity error, no report to software. */
+ uint64_t utlbperrdis : 1; /**< [ 17: 17](R/W) uTLB correctable parity error disable. */
+ uint64_t utlbperr : 1; /**< [ 16: 16](R/W/H) uTLB corrected a parity error. */
+ uint64_t mtlbperrforce : 1; /**< [ 15: 15](R/W/H) MTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mtlbperrnosw : 1; /**< [ 14: 14](R/W) MTLB correctable parity error, no report to software. */
+ uint64_t mtlbperrdis : 1; /**< [ 13: 13](R/W) MTLB correctable parity error disable. */
+ uint64_t mtlbperr : 1; /**< [ 12: 12](R/W/H) MTLB corrected a parity error. */
+ uint64_t l1dperrforce : 1; /**< [ 11: 11](R/W/H) Dcache correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t l1dperrnosw : 1; /**< [ 10: 10](R/W) Dcache correctable parity error, no report to software. */
+ uint64_t l1dperrdis : 1; /**< [ 9: 9](R/W) Dcache correctable parity error disable. */
+ uint64_t l1dperr : 1; /**< [ 8: 8](R/W/H) Dcache corrected a parity error. */
+ uint64_t l1dway : 5; /**< [ 7: 3](R/W/H) Indicates Dcache way. */
+ uint64_t l1dset : 3; /**< [ 2: 0](R/W/H) Indicates Dcache set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l1dset : 3; /**< [ 2: 0](R/W/H) Indicates Dcache set. */
+ uint64_t l1dway : 5; /**< [ 7: 3](R/W/H) Indicates Dcache way. */
+ uint64_t l1dperr : 1; /**< [ 8: 8](R/W/H) Dcache corrected a parity error. */
+ uint64_t l1dperrdis : 1; /**< [ 9: 9](R/W) Dcache correctable parity error disable. */
+ uint64_t l1dperrnosw : 1; /**< [ 10: 10](R/W) Dcache correctable parity error, no report to software. */
+ uint64_t l1dperrforce : 1; /**< [ 11: 11](R/W/H) Dcache correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mtlbperr : 1; /**< [ 12: 12](R/W/H) MTLB corrected a parity error. */
+ uint64_t mtlbperrdis : 1; /**< [ 13: 13](R/W) MTLB correctable parity error disable. */
+ uint64_t mtlbperrnosw : 1; /**< [ 14: 14](R/W) MTLB correctable parity error, no report to software. */
+ uint64_t mtlbperrforce : 1; /**< [ 15: 15](R/W/H) MTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t utlbperr : 1; /**< [ 16: 16](R/W/H) uTLB corrected a parity error. */
+ uint64_t utlbperrdis : 1; /**< [ 17: 17](R/W) uTLB correctable parity error disable. */
+ uint64_t utlbperrnosw : 1; /**< [ 18: 18](R/W) uTLB correctable parity error, no report to software. */
+ uint64_t utlbperrforce : 1; /**< [ 19: 19](R/W/H) uTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mafperr : 1; /**< [ 20: 20](R/W/H) MAF parity error. */
+ uint64_t mafperrdis : 1; /**< [ 21: 21](R/W) MAF parity error disable. */
+ uint64_t mafperrnosw : 1; /**< [ 22: 22](R/W) MAF parity error, no report to software. */
+ uint64_t mafperrforce : 1; /**< [ 23: 23](R/W/H) MAF parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfperr : 1; /**< [ 24: 24](R/W/H) Write-buffer double-bit error. */
+ uint64_t wbfperrdis : 1; /**< [ 25: 25](R/W) Write-buffer double-bit error disable. */
+ uint64_t wbfperrnosw : 1; /**< [ 26: 26](R/W) Write-buffer single-bit error, no report to software. */
+ uint64_t wbfsbeforce : 1; /**< [ 27: 27](R/W/H) Write-buffer SBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfdbeforce : 1; /**< [ 28: 28](R/W/H) Write-buffer DBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wcuperr : 1; /**< [ 29: 29](R/W/H) WCU corrected parity error. */
+ uint64_t wcuperrdis : 1; /**< [ 30: 30](R/W) WCU parity error disable. */
+ uint64_t wcuperrnosw : 1; /**< [ 31: 31](R/W) WCU parity error, no report to software. */
+ uint64_t wcuperrforce : 1; /**< [ 32: 32](R/W/H) WCU parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t rbfto : 1; /**< [ 33: 33](R/W/H) Read buffer timeout. */
+ uint64_t rbftodis : 1; /**< [ 34: 34](R/W) Read buffer timeout disable. */
+ uint64_t rbftonosw : 1; /**< [ 35: 35](R/W) Read buffer timeout, no report to software. */
+ uint64_t rbftoforce : 1; /**< [ 36: 36](R/W/H) Read buffer timeout force. Bit is cleared when error is forced. */
+ uint64_t barrierto : 1; /**< [ 37: 37](R/W/H) Barrier timeout. */
+ uint64_t barriertodis : 1; /**< [ 38: 38](R/W) Barrier timeout disable. */
+ uint64_t barriertonosw : 1; /**< [ 39: 39](R/W) Barrier timeout, no report to software. */
+ uint64_t barriertoforce : 1; /**< [ 40: 40](R/W/H) Barrier timeout force. Bit is cleared when error is forced. */
+ uint64_t mtlbmult : 1; /**< [ 41: 41](R/W/H) MTLB multiple match error. */
+ uint64_t mtlbmultdis : 1; /**< [ 42: 42](R/W) MTLB multiple match error disable. */
+ uint64_t wcumult : 1; /**< [ 43: 43](R/W/H) WCU multiple match error. */
+ uint64_t wcumultdis : 1; /**< [ 44: 44](R/W) WCU multiple match error disable. */
+ uint64_t gsyncto : 1; /**< [ 45: 45](R/W/H) Global sync timeout. */
+ uint64_t gsynctodis : 1; /**< [ 46: 46](R/W) Global sync timeout disable. */
+ uint64_t gsynctonosw : 1; /**< [ 47: 47](R/W) Global sync timeout, no report to software. */
+ uint64_t wbfsbeerr : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_ap_cvm_errmem_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t reserved_0_48 : 49;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_48 : 49;
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_ap_cvm_errmem_el1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t wbfsbeerr : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t gsynctonosw : 1; /**< [ 47: 47](R/W) Global sync timeout, no report to software. */
+ uint64_t gsynctodis : 1; /**< [ 46: 46](R/W) Global sync timeout disable. */
+ uint64_t gsyncto : 1; /**< [ 45: 45](R/W/H) Global sync timeout. */
+ uint64_t wcumultdis : 1; /**< [ 44: 44](R/W) WCU multiple match error disable. */
+ uint64_t wcumult : 1; /**< [ 43: 43](R/W/H) WCU multiple match error. */
+ uint64_t mtlbmultdis : 1; /**< [ 42: 42](R/W) MTLB multiple match error disable. */
+ uint64_t mtlbmult : 1; /**< [ 41: 41](R/W/H) MTLB multiple match error. */
+ uint64_t spare40 : 1; /**< [ 40: 40](R/W/H) Reserved. */
+ uint64_t barriertonosw : 1; /**< [ 39: 39](R/W) Barrier timeout, no report to software. */
+ uint64_t barriertodis : 1; /**< [ 38: 38](R/W) Barrier timeout disable. */
+ uint64_t barrierto : 1; /**< [ 37: 37](R/W/H) Barrier timeout. */
+ uint64_t spare36 : 1; /**< [ 36: 36](R/W/H) Reserved. */
+ uint64_t rbftonosw : 1; /**< [ 35: 35](R/W) Read buffer timeout, no report to software. */
+ uint64_t rbftodis : 1; /**< [ 34: 34](R/W) Read buffer timeout disable. */
+ uint64_t rbfto : 1; /**< [ 33: 33](R/W/H) Read buffer timeout. */
+ uint64_t wcuperrforce : 1; /**< [ 32: 32](R/W/H) WCU parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wcuperrnosw : 1; /**< [ 31: 31](R/W) WCU parity error, no report to software. */
+ uint64_t wcuperrdis : 1; /**< [ 30: 30](R/W) WCU parity error disable. */
+ uint64_t wcuperr : 1; /**< [ 29: 29](R/W/H) WCU corrected parity error. */
+ uint64_t wbfdbeforce : 1; /**< [ 28: 28](R/W/H) Write-buffer DBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfsbeforce : 1; /**< [ 27: 27](R/W/H) Write-buffer SBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfperrnosw : 1; /**< [ 26: 26](R/W) Write-buffer single-bit error, no report to software. */
+ uint64_t wbfperrdis : 1; /**< [ 25: 25](R/W) Write-buffer double-bit error disable. */
+ uint64_t wbfperr : 1; /**< [ 24: 24](R/W/H) Write-buffer double-bit error. */
+ uint64_t mafperrforce : 1; /**< [ 23: 23](R/W/H) MAF parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mafperrnosw : 1; /**< [ 22: 22](R/W) MAF parity error, no report to software. */
+ uint64_t mafperrdis : 1; /**< [ 21: 21](R/W) MAF parity error disable. */
+ uint64_t mafperr : 1; /**< [ 20: 20](R/W/H) MAF parity error. */
+ uint64_t utlbperrforce : 1; /**< [ 19: 19](R/W/H) uTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t utlbperrnosw : 1; /**< [ 18: 18](R/W) uTLB correctable parity error, no report to software. */
+ uint64_t utlbperrdis : 1; /**< [ 17: 17](R/W) uTLB correctable parity error disable. */
+ uint64_t utlbperr : 1; /**< [ 16: 16](R/W/H) uTLB corrected a parity error. */
+ uint64_t mtlbperrforce : 1; /**< [ 15: 15](R/W/H) MTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mtlbperrnosw : 1; /**< [ 14: 14](R/W) MTLB correctable parity error, no report to software. */
+ uint64_t mtlbperrdis : 1; /**< [ 13: 13](R/W) MTLB correctable parity error disable. */
+ uint64_t mtlbperr : 1; /**< [ 12: 12](R/W/H) MTLB corrected a parity error. */
+ uint64_t l1dperrforce : 1; /**< [ 11: 11](R/W/H) Dcache correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t l1dperrnosw : 1; /**< [ 10: 10](R/W) Dcache correctable parity error, no report to software. */
+ uint64_t l1dperrdis : 1; /**< [ 9: 9](R/W) Dcache correctable parity error disable. */
+ uint64_t l1dperr : 1; /**< [ 8: 8](R/W/H) Dcache corrected a parity error. */
+ uint64_t l1dway : 5; /**< [ 7: 3](R/W/H) Indicates Dcache way. */
+ uint64_t l1dset : 3; /**< [ 2: 0](R/W/H) Indicates Dcache set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l1dset : 3; /**< [ 2: 0](R/W/H) Indicates Dcache set. */
+ uint64_t l1dway : 5; /**< [ 7: 3](R/W/H) Indicates Dcache way. */
+ uint64_t l1dperr : 1; /**< [ 8: 8](R/W/H) Dcache corrected a parity error. */
+ uint64_t l1dperrdis : 1; /**< [ 9: 9](R/W) Dcache correctable parity error disable. */
+ uint64_t l1dperrnosw : 1; /**< [ 10: 10](R/W) Dcache correctable parity error, no report to software. */
+ uint64_t l1dperrforce : 1; /**< [ 11: 11](R/W/H) Dcache correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mtlbperr : 1; /**< [ 12: 12](R/W/H) MTLB corrected a parity error. */
+ uint64_t mtlbperrdis : 1; /**< [ 13: 13](R/W) MTLB correctable parity error disable. */
+ uint64_t mtlbperrnosw : 1; /**< [ 14: 14](R/W) MTLB correctable parity error, no report to software. */
+ uint64_t mtlbperrforce : 1; /**< [ 15: 15](R/W/H) MTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t utlbperr : 1; /**< [ 16: 16](R/W/H) uTLB corrected a parity error. */
+ uint64_t utlbperrdis : 1; /**< [ 17: 17](R/W) uTLB correctable parity error disable. */
+ uint64_t utlbperrnosw : 1; /**< [ 18: 18](R/W) uTLB correctable parity error, no report to software. */
+ uint64_t utlbperrforce : 1; /**< [ 19: 19](R/W/H) uTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mafperr : 1; /**< [ 20: 20](R/W/H) MAF parity error. */
+ uint64_t mafperrdis : 1; /**< [ 21: 21](R/W) MAF parity error disable. */
+ uint64_t mafperrnosw : 1; /**< [ 22: 22](R/W) MAF parity error, no report to software. */
+ uint64_t mafperrforce : 1; /**< [ 23: 23](R/W/H) MAF parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfperr : 1; /**< [ 24: 24](R/W/H) Write-buffer double-bit error. */
+ uint64_t wbfperrdis : 1; /**< [ 25: 25](R/W) Write-buffer double-bit error disable. */
+ uint64_t wbfperrnosw : 1; /**< [ 26: 26](R/W) Write-buffer single-bit error, no report to software. */
+ uint64_t wbfsbeforce : 1; /**< [ 27: 27](R/W/H) Write-buffer SBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfdbeforce : 1; /**< [ 28: 28](R/W/H) Write-buffer DBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wcuperr : 1; /**< [ 29: 29](R/W/H) WCU corrected parity error. */
+ uint64_t wcuperrdis : 1; /**< [ 30: 30](R/W) WCU parity error disable. */
+ uint64_t wcuperrnosw : 1; /**< [ 31: 31](R/W) WCU parity error, no report to software. */
+ uint64_t wcuperrforce : 1; /**< [ 32: 32](R/W/H) WCU parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t rbfto : 1; /**< [ 33: 33](R/W/H) Read buffer timeout. */
+ uint64_t rbftodis : 1; /**< [ 34: 34](R/W) Read buffer timeout disable. */
+ uint64_t rbftonosw : 1; /**< [ 35: 35](R/W) Read buffer timeout, no report to software. */
+ uint64_t spare36 : 1; /**< [ 36: 36](R/W/H) Reserved. */
+ uint64_t barrierto : 1; /**< [ 37: 37](R/W/H) Barrier timeout. */
+ uint64_t barriertodis : 1; /**< [ 38: 38](R/W) Barrier timeout disable. */
+ uint64_t barriertonosw : 1; /**< [ 39: 39](R/W) Barrier timeout, no report to software. */
+ uint64_t spare40 : 1; /**< [ 40: 40](R/W/H) Reserved. */
+ uint64_t mtlbmult : 1; /**< [ 41: 41](R/W/H) MTLB multiple match error. */
+ uint64_t mtlbmultdis : 1; /**< [ 42: 42](R/W) MTLB multiple match error disable. */
+ uint64_t wcumult : 1; /**< [ 43: 43](R/W/H) WCU multiple match error. */
+ uint64_t wcumultdis : 1; /**< [ 44: 44](R/W) WCU multiple match error disable. */
+ uint64_t gsyncto : 1; /**< [ 45: 45](R/W/H) Global sync timeout. */
+ uint64_t gsynctodis : 1; /**< [ 46: 46](R/W) Global sync timeout disable. */
+ uint64_t gsynctonosw : 1; /**< [ 47: 47](R/W) Global sync timeout, no report to software. */
+ uint64_t wbfsbeerr : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_ap_cvm_errmem_el1_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t wbfsbeerr : 1; /**< [ 48: 48](R/W/H) Write-buffer single-bit error. */
+ uint64_t gsynctonosw : 1; /**< [ 47: 47](R/W) Global sync timeout, no report to software. */
+ uint64_t gsynctodis : 1; /**< [ 46: 46](R/W) Global sync timeout disable. */
+ uint64_t gsyncto : 1; /**< [ 45: 45](R/W/H) Global sync timeout. */
+ uint64_t wcumultdis : 1; /**< [ 44: 44](R/W) WCU multiple match error disable. */
+ uint64_t wcumult : 1; /**< [ 43: 43](R/W/H) WCU multiple match error. */
+ uint64_t mtlbmultdis : 1; /**< [ 42: 42](R/W) MTLB multiple match error disable. */
+ uint64_t mtlbmult : 1; /**< [ 41: 41](R/W/H) MTLB multiple match error. */
+ uint64_t spare40 : 1; /**< [ 40: 40](R/W/H) Reserved. */
+ uint64_t barriertonosw : 1; /**< [ 39: 39](R/W) Barrier timeout, no report to software. */
+ uint64_t barriertodis : 1; /**< [ 38: 38](R/W) Barrier timeout disable. */
+ uint64_t barrierto : 1; /**< [ 37: 37](R/W/H) Barrier timeout. */
+ uint64_t spare36 : 1; /**< [ 36: 36](R/W/H) Reserved. */
+ uint64_t rbftonosw : 1; /**< [ 35: 35](R/W) Read buffer timeout, no report to software. */
+ uint64_t rbftodis : 1; /**< [ 34: 34](R/W) Read buffer timeout disable. */
+ uint64_t rbfto : 1; /**< [ 33: 33](R/W/H) Read buffer timeout. */
+ uint64_t wcuperrforce : 1; /**< [ 32: 32](R/W/H) WCU parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wcuperrnosw : 1; /**< [ 31: 31](R/W) WCU parity error, no report to software. */
+ uint64_t wcuperrdis : 1; /**< [ 30: 30](R/W) WCU parity error disable. */
+ uint64_t wcuperr : 1; /**< [ 29: 29](R/W/H) WCU corrected parity error. */
+ uint64_t wbfdbeforce : 1; /**< [ 28: 28](R/W/H) Write-buffer DBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfsbeforce : 1; /**< [ 27: 27](R/W/H) Write-buffer SBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfperrnosw : 1; /**< [ 26: 26](R/W) Write-buffer single-bit error, no report to software. */
+ uint64_t wbfperrdis : 1; /**< [ 25: 25](R/W) Write-buffer double-bit error disable. */
+ uint64_t wbfperr : 1; /**< [ 24: 24](R/W/H) Write-buffer double-bit error. */
+ uint64_t mafperrforce : 1; /**< [ 23: 23](R/W/H) MAF parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mafperrnosw : 1; /**< [ 22: 22](R/W) MAF parity error, no report to software. */
+ uint64_t mafperrdis : 1; /**< [ 21: 21](R/W) MAF parity error disable. */
+ uint64_t mafperr : 1; /**< [ 20: 20](R/W/H) MAF parity error. */
+ uint64_t utlbperrforce : 1; /**< [ 19: 19](R/W/H) uTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t utlbperrnosw : 1; /**< [ 18: 18](R/W) uTLB correctable parity error, no report to software. */
+ uint64_t utlbperrdis : 1; /**< [ 17: 17](R/W) uTLB correctable parity error disable. */
+ uint64_t utlbperr : 1; /**< [ 16: 16](R/W/H) uTLB corrected a parity error. */
+ uint64_t mtlbperrforce : 1; /**< [ 15: 15](R/W/H) MTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mtlbperrnosw : 1; /**< [ 14: 14](R/W) MTLB correctable parity error, no report to software. */
+ uint64_t mtlbperrdis : 1; /**< [ 13: 13](R/W) MTLB correctable parity error disable. */
+ uint64_t mtlbperr : 1; /**< [ 12: 12](R/W/H) MTLB corrected a parity error. */
+ uint64_t l1dperrforce : 1; /**< [ 11: 11](R/W/H) Dcache correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t l1dperrnosw : 1; /**< [ 10: 10](R/W) Dcache correctable parity error, no report to software. */
+ uint64_t l1dperrdis : 1; /**< [ 9: 9](R/W) Dcache correctable parity error disable. */
+ uint64_t l1dperr : 1; /**< [ 8: 8](R/W/H) Dcache corrected a parity error. */
+ uint64_t l1dway : 5; /**< [ 7: 3](R/W/H) Indicates Dcache way. */
+ uint64_t l1dset : 3; /**< [ 2: 0](R/W/H) Indicates Dcache set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l1dset : 3; /**< [ 2: 0](R/W/H) Indicates Dcache set. */
+ uint64_t l1dway : 5; /**< [ 7: 3](R/W/H) Indicates Dcache way. */
+ uint64_t l1dperr : 1; /**< [ 8: 8](R/W/H) Dcache corrected a parity error. */
+ uint64_t l1dperrdis : 1; /**< [ 9: 9](R/W) Dcache correctable parity error disable. */
+ uint64_t l1dperrnosw : 1; /**< [ 10: 10](R/W) Dcache correctable parity error, no report to software. */
+ uint64_t l1dperrforce : 1; /**< [ 11: 11](R/W/H) Dcache correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mtlbperr : 1; /**< [ 12: 12](R/W/H) MTLB corrected a parity error. */
+ uint64_t mtlbperrdis : 1; /**< [ 13: 13](R/W) MTLB correctable parity error disable. */
+ uint64_t mtlbperrnosw : 1; /**< [ 14: 14](R/W) MTLB correctable parity error, no report to software. */
+ uint64_t mtlbperrforce : 1; /**< [ 15: 15](R/W/H) MTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t utlbperr : 1; /**< [ 16: 16](R/W/H) uTLB corrected a parity error. */
+ uint64_t utlbperrdis : 1; /**< [ 17: 17](R/W) uTLB correctable parity error disable. */
+ uint64_t utlbperrnosw : 1; /**< [ 18: 18](R/W) uTLB correctable parity error, no report to software. */
+ uint64_t utlbperrforce : 1; /**< [ 19: 19](R/W/H) uTLB correctable parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t mafperr : 1; /**< [ 20: 20](R/W/H) MAF parity error. */
+ uint64_t mafperrdis : 1; /**< [ 21: 21](R/W) MAF parity error disable. */
+ uint64_t mafperrnosw : 1; /**< [ 22: 22](R/W) MAF parity error, no report to software. */
+ uint64_t mafperrforce : 1; /**< [ 23: 23](R/W/H) MAF parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfperr : 1; /**< [ 24: 24](R/W/H) Write-buffer double-bit error. */
+ uint64_t wbfperrdis : 1; /**< [ 25: 25](R/W) Write-buffer double-bit error disable. */
+ uint64_t wbfperrnosw : 1; /**< [ 26: 26](R/W) Write-buffer single-bit error, no report to software. */
+ uint64_t wbfsbeforce : 1; /**< [ 27: 27](R/W/H) Write-buffer SBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wbfdbeforce : 1; /**< [ 28: 28](R/W/H) Write-buffer DBE force. Bit is cleared when error is forced on next write operation. */
+ uint64_t wcuperr : 1; /**< [ 29: 29](R/W/H) WCU corrected parity error. */
+ uint64_t wcuperrdis : 1; /**< [ 30: 30](R/W) WCU parity error disable. */
+ uint64_t wcuperrnosw : 1; /**< [ 31: 31](R/W) WCU parity error, no report to software. */
+ uint64_t wcuperrforce : 1; /**< [ 32: 32](R/W/H) WCU parity error force. Bit is cleared when error is forced on next write operation. */
+ uint64_t rbfto : 1; /**< [ 33: 33](R/W/H) Read buffer timeout. */
+ uint64_t rbftodis : 1; /**< [ 34: 34](R/W) Read buffer timeout disable. */
+ uint64_t rbftonosw : 1; /**< [ 35: 35](R/W) Read buffer timeout, no report to software. */
+ uint64_t spare36 : 1; /**< [ 36: 36](R/W/H) Reserved. */
+ uint64_t barrierto : 1; /**< [ 37: 37](R/W/H) Barrier timeout. */
+ uint64_t barriertodis : 1; /**< [ 38: 38](R/W) Barrier timeout disable. */
+ uint64_t barriertonosw : 1; /**< [ 39: 39](R/W) Barrier timeout, no report to software. */
+ uint64_t spare40 : 1; /**< [ 40: 40](R/W/H) Reserved. */
+ uint64_t mtlbmult : 1; /**< [ 41: 41](R/W/H) MTLB multiple match error. */
+ uint64_t mtlbmultdis : 1; /**< [ 42: 42](R/W) MTLB multiple match error disable. */
+ uint64_t wcumult : 1; /**< [ 43: 43](R/W/H) WCU multiple match error. */
+ uint64_t wcumultdis : 1; /**< [ 44: 44](R/W) WCU multiple match error disable. */
+ uint64_t gsyncto : 1; /**< [ 45: 45](R/W/H) Global sync timeout. */
+ uint64_t gsynctodis : 1; /**< [ 46: 46](R/W) Global sync timeout disable. */
+ uint64_t gsynctonosw : 1; /**< [ 47: 47](R/W) Global sync timeout, no report to software. */
+ uint64_t wbfsbeerr : 1; /**< [ 48: 48](R/W/H) Write-buffer single-bit error. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_ap_cvm_errmem_el1_cn81xx cn88xxp2; */
+};
+typedef union bdk_ap_cvm_errmem_el1 bdk_ap_cvm_errmem_el1_t;
+
+#define BDK_AP_CVM_ERRMEM_EL1 BDK_AP_CVM_ERRMEM_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_ERRMEM_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_ERRMEM_EL1_FUNC(void)
+{
+ return 0x3000b020400ll;
+}
+
+#define typedef_BDK_AP_CVM_ERRMEM_EL1 bdk_ap_cvm_errmem_el1_t
+#define bustype_BDK_AP_CVM_ERRMEM_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_ERRMEM_EL1 "AP_CVM_ERRMEM_EL1"
+#define busnum_BDK_AP_CVM_ERRMEM_EL1 0
+#define arguments_BDK_AP_CVM_ERRMEM_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_evattid_el1
+ *
+ * AP Cavium EVATTID Register
+ * This register is for diagnostic use only.
+ */
+union bdk_ap_cvm_evattid_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_evattid_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t el3_vmid : 4; /**< [ 59: 56](R/W) Current EL3 EVA VMID. */
+ uint64_t reserved_55 : 1;
+ uint64_t el3_asid : 7; /**< [ 54: 48](R/W) Current EL3 EVA ASID. */
+ uint64_t el2_vmid : 4; /**< [ 47: 44](R/W) Current EL2 EVA VMID. */
+ uint64_t reserved_31_43 : 13;
+ uint64_t el2_asid_e2h : 7; /**< [ 30: 24](R/W) Current EL2 E2H EVA ASID. */
+ uint64_t el1_vmid_s : 4; /**< [ 23: 20](R/W) Current EL1 secure EVA VMID. */
+ uint64_t reserved_12_19 : 8;
+ uint64_t el1_vmid_ns : 4; /**< [ 11: 8](R/W) Current EL1 nonsecure EVA VMID. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t el1_vmid_ns : 4; /**< [ 11: 8](R/W) Current EL1 nonsecure EVA VMID. */
+ uint64_t reserved_12_19 : 8;
+ uint64_t el1_vmid_s : 4; /**< [ 23: 20](R/W) Current EL1 secure EVA VMID. */
+ uint64_t el2_asid_e2h : 7; /**< [ 30: 24](R/W) Current EL2 E2H EVA ASID. */
+ uint64_t reserved_31_43 : 13;
+ uint64_t el2_vmid : 4; /**< [ 47: 44](R/W) Current EL2 EVA VMID. */
+ uint64_t el3_asid : 7; /**< [ 54: 48](R/W) Current EL3 EVA ASID. */
+ uint64_t reserved_55 : 1;
+ uint64_t el3_vmid : 4; /**< [ 59: 56](R/W) Current EL3 EVA VMID. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cvm_evattid_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t el2_asid : 6; /**< [ 37: 32](R/W) Current EL2 EVA ASID. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t el1_vmid : 4; /**< [ 19: 16](R/W) Current EL1 EVA VMID. */
+ uint64_t reserved_6_15 : 10;
+ uint64_t el1_asid : 6; /**< [ 5: 0](R/W) Current EL1 EVA ASID. */
+#else /* Word 0 - Little Endian */
+ uint64_t el1_asid : 6; /**< [ 5: 0](R/W) Current EL1 EVA ASID. */
+ uint64_t reserved_6_15 : 10;
+ uint64_t el1_vmid : 4; /**< [ 19: 16](R/W) Current EL1 EVA VMID. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t el2_asid : 6; /**< [ 37: 32](R/W) Current EL2 EVA ASID. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_cvm_evattid_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t el3_vmid : 4; /**< [ 59: 56](R/W) Current EL3 EVA VMID. */
+ uint64_t reserved_55 : 1;
+ uint64_t el3_asid : 7; /**< [ 54: 48](R/W) Current EL3 EVA ASID. */
+ uint64_t el2_vmid : 4; /**< [ 47: 44](R/W) Current EL2 EVA VMID. */
+ uint64_t reserved_43 : 1;
+ uint64_t el2_asid : 7; /**< [ 42: 36](R/W) Current EL2 EVA ASID. */
+ uint64_t el2_vmid_e2h : 4; /**< [ 35: 32](R/W) Current EL2 E2H EVA VMID. */
+ uint64_t reserved_31 : 1;
+ uint64_t el2_asid_e2h : 7; /**< [ 30: 24](R/W) Current EL2 E2H EVA ASID. */
+ uint64_t el1_vmid_s : 4; /**< [ 23: 20](R/W) Current EL1 secure EVA VMID. */
+ uint64_t reserved_19 : 1;
+ uint64_t el1_asid_s : 7; /**< [ 18: 12](R/W) Current EL1 secure EVA ASID. */
+ uint64_t el1_vmid_ns : 4; /**< [ 11: 8](R/W) Current EL1 nonsecure EVA VMID. */
+ uint64_t reserved_7 : 1;
+ uint64_t el1_asid_ns : 7; /**< [ 6: 0](R/W) Current EL1 nonsecure EVA ASID. */
+#else /* Word 0 - Little Endian */
+ uint64_t el1_asid_ns : 7; /**< [ 6: 0](R/W) Current EL1 nonsecure EVA ASID. */
+ uint64_t reserved_7 : 1;
+ uint64_t el1_vmid_ns : 4; /**< [ 11: 8](R/W) Current EL1 nonsecure EVA VMID. */
+ uint64_t el1_asid_s : 7; /**< [ 18: 12](R/W) Current EL1 secure EVA ASID. */
+ uint64_t reserved_19 : 1;
+ uint64_t el1_vmid_s : 4; /**< [ 23: 20](R/W) Current EL1 secure EVA VMID. */
+ uint64_t el2_asid_e2h : 7; /**< [ 30: 24](R/W) Current EL2 E2H EVA ASID. */
+ uint64_t reserved_31 : 1;
+ uint64_t el2_vmid_e2h : 4; /**< [ 35: 32](R/W) Current EL2 E2H EVA VMID. */
+ uint64_t el2_asid : 7; /**< [ 42: 36](R/W) Current EL2 EVA ASID. */
+ uint64_t reserved_43 : 1;
+ uint64_t el2_vmid : 4; /**< [ 47: 44](R/W) Current EL2 EVA VMID. */
+ uint64_t el3_asid : 7; /**< [ 54: 48](R/W) Current EL3 EVA ASID. */
+ uint64_t reserved_55 : 1;
+ uint64_t el3_vmid : 4; /**< [ 59: 56](R/W) Current EL3 EVA VMID. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_cvm_evattid_el1 bdk_ap_cvm_evattid_el1_t;
+
+#define BDK_AP_CVM_EVATTID_EL1 BDK_AP_CVM_EVATTID_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_EVATTID_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_EVATTID_EL1_FUNC(void)
+{
+ return 0x3000b020500ll;
+}
+
+#define typedef_BDK_AP_CVM_EVATTID_EL1 bdk_ap_cvm_evattid_el1_t
+#define bustype_BDK_AP_CVM_EVATTID_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_EVATTID_EL1 "AP_CVM_EVATTID_EL1"
+#define busnum_BDK_AP_CVM_EVATTID_EL1 0
+#define arguments_BDK_AP_CVM_EVATTID_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_icachedata0_el1
+ *
+ * INTERNAL: AP Cavium Icache Data 0 Register
+ */
+union bdk_ap_cvm_icachedata0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_icachedata0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Icache data\<63:0\> from an Icache read operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Icache data\<63:0\> from an Icache read operation. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_icachedata0_el1_s cn; */
+};
+typedef union bdk_ap_cvm_icachedata0_el1 bdk_ap_cvm_icachedata0_el1_t;
+
+#define BDK_AP_CVM_ICACHEDATA0_EL1 BDK_AP_CVM_ICACHEDATA0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_ICACHEDATA0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_ICACHEDATA0_EL1_FUNC(void)
+{
+ return 0x3000b030000ll;
+}
+
+#define typedef_BDK_AP_CVM_ICACHEDATA0_EL1 bdk_ap_cvm_icachedata0_el1_t
+#define bustype_BDK_AP_CVM_ICACHEDATA0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_ICACHEDATA0_EL1 "AP_CVM_ICACHEDATA0_EL1"
+#define busnum_BDK_AP_CVM_ICACHEDATA0_EL1 0
+#define arguments_BDK_AP_CVM_ICACHEDATA0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_icachedata1_el1
+ *
+ * INTERNAL: AP Cavium Icache Data 1 Register
+ */
+union bdk_ap_cvm_icachedata1_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_icachedata1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t data : 2; /**< [ 1: 0](RO) Icache data\<65:64\> from an Icache read operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 2; /**< [ 1: 0](RO) Icache data\<65:64\> from an Icache read operation. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_icachedata1_el1_s cn; */
+};
+typedef union bdk_ap_cvm_icachedata1_el1 bdk_ap_cvm_icachedata1_el1_t;
+
+#define BDK_AP_CVM_ICACHEDATA1_EL1 BDK_AP_CVM_ICACHEDATA1_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_ICACHEDATA1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_ICACHEDATA1_EL1_FUNC(void)
+{
+ return 0x3000b030100ll;
+}
+
+#define typedef_BDK_AP_CVM_ICACHEDATA1_EL1 bdk_ap_cvm_icachedata1_el1_t
+#define bustype_BDK_AP_CVM_ICACHEDATA1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_ICACHEDATA1_EL1 "AP_CVM_ICACHEDATA1_EL1"
+#define busnum_BDK_AP_CVM_ICACHEDATA1_EL1 0
+#define arguments_BDK_AP_CVM_ICACHEDATA1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_icachetag0_el1
+ *
+ * INTERNAL: AP Cavium Icache Tag 0 Register
+ */
+union bdk_ap_cvm_icachetag0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_icachetag0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t valid : 1; /**< [ 51: 51](RO) Valid. */
+ uint64_t va : 40; /**< [ 50: 11](RO) VA in tags. */
+ uint64_t asid_valid_ignored : 1; /**< [ 10: 10](RO) ASID valid is ignored. */
+ uint64_t asid_index : 6; /**< [ 9: 4](RO) ASID index. */
+ uint64_t vmid_index : 4; /**< [ 3: 0](RO) VMID index. */
+#else /* Word 0 - Little Endian */
+ uint64_t vmid_index : 4; /**< [ 3: 0](RO) VMID index. */
+ uint64_t asid_index : 6; /**< [ 9: 4](RO) ASID index. */
+ uint64_t asid_valid_ignored : 1; /**< [ 10: 10](RO) ASID valid is ignored. */
+ uint64_t va : 40; /**< [ 50: 11](RO) VA in tags. */
+ uint64_t valid : 1; /**< [ 51: 51](RO) Valid. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_icachetag0_el1_s cn; */
+};
+typedef union bdk_ap_cvm_icachetag0_el1 bdk_ap_cvm_icachetag0_el1_t;
+
+#define BDK_AP_CVM_ICACHETAG0_EL1 BDK_AP_CVM_ICACHETAG0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_ICACHETAG0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_ICACHETAG0_EL1_FUNC(void)
+{
+ return 0x3000b030200ll;
+}
+
+#define typedef_BDK_AP_CVM_ICACHETAG0_EL1 bdk_ap_cvm_icachetag0_el1_t
+#define bustype_BDK_AP_CVM_ICACHETAG0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_ICACHETAG0_EL1 "AP_CVM_ICACHETAG0_EL1"
+#define busnum_BDK_AP_CVM_ICACHETAG0_EL1 0
+#define arguments_BDK_AP_CVM_ICACHETAG0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_memdebug0_el3
+ *
+ * INTERNAL: AP Cavium Memory Debug 0 Register
+ */
+union bdk_ap_cvm_memdebug0_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_memdebug0_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#else /* Word 0 - Little Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_memdebug0_el3_s cn; */
+};
+typedef union bdk_ap_cvm_memdebug0_el3 bdk_ap_cvm_memdebug0_el3_t;
+
+#define BDK_AP_CVM_MEMDEBUG0_EL3 BDK_AP_CVM_MEMDEBUG0_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_MEMDEBUG0_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_MEMDEBUG0_EL3_FUNC(void)
+{
+ return 0x3060b040400ll;
+}
+
+#define typedef_BDK_AP_CVM_MEMDEBUG0_EL3 bdk_ap_cvm_memdebug0_el3_t
+#define bustype_BDK_AP_CVM_MEMDEBUG0_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_MEMDEBUG0_EL3 "AP_CVM_MEMDEBUG0_EL3"
+#define busnum_BDK_AP_CVM_MEMDEBUG0_EL3 0
+#define arguments_BDK_AP_CVM_MEMDEBUG0_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_memdebug1_el3
+ *
+ * INTERNAL: AP Cavium Memory Debug 1 Register
+ */
+union bdk_ap_cvm_memdebug1_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_memdebug1_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#else /* Word 0 - Little Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_memdebug1_el3_s cn; */
+};
+typedef union bdk_ap_cvm_memdebug1_el3 bdk_ap_cvm_memdebug1_el3_t;
+
+#define BDK_AP_CVM_MEMDEBUG1_EL3 BDK_AP_CVM_MEMDEBUG1_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_MEMDEBUG1_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_MEMDEBUG1_EL3_FUNC(void)
+{
+ return 0x3060b040500ll;
+}
+
+#define typedef_BDK_AP_CVM_MEMDEBUG1_EL3 bdk_ap_cvm_memdebug1_el3_t
+#define bustype_BDK_AP_CVM_MEMDEBUG1_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_MEMDEBUG1_EL3 "AP_CVM_MEMDEBUG1_EL3"
+#define busnum_BDK_AP_CVM_MEMDEBUG1_EL3 0
+#define arguments_BDK_AP_CVM_MEMDEBUG1_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_memdebug2_el3
+ *
+ * INTERNAL: AP Cavium Memory Debug 2 Register
+ */
+union bdk_ap_cvm_memdebug2_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_memdebug2_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#else /* Word 0 - Little Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_memdebug2_el3_s cn; */
+};
+typedef union bdk_ap_cvm_memdebug2_el3 bdk_ap_cvm_memdebug2_el3_t;
+
+#define BDK_AP_CVM_MEMDEBUG2_EL3 BDK_AP_CVM_MEMDEBUG2_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_MEMDEBUG2_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_MEMDEBUG2_EL3_FUNC(void)
+{
+ return 0x3060b040600ll;
+}
+
+#define typedef_BDK_AP_CVM_MEMDEBUG2_EL3 bdk_ap_cvm_memdebug2_el3_t
+#define bustype_BDK_AP_CVM_MEMDEBUG2_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_MEMDEBUG2_EL3 "AP_CVM_MEMDEBUG2_EL3"
+#define busnum_BDK_AP_CVM_MEMDEBUG2_EL3 0
+#define arguments_BDK_AP_CVM_MEMDEBUG2_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_memdebug3_el3
+ *
+ * INTERNAL: AP Cavium Memory Debug 3 Register
+ */
+union bdk_ap_cvm_memdebug3_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_memdebug3_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#else /* Word 0 - Little Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_memdebug3_el3_s cn; */
+};
+typedef union bdk_ap_cvm_memdebug3_el3 bdk_ap_cvm_memdebug3_el3_t;
+
+#define BDK_AP_CVM_MEMDEBUG3_EL3 BDK_AP_CVM_MEMDEBUG3_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_MEMDEBUG3_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_MEMDEBUG3_EL3_FUNC(void)
+{
+ return 0x3060b040700ll;
+}
+
+#define typedef_BDK_AP_CVM_MEMDEBUG3_EL3 bdk_ap_cvm_memdebug3_el3_t
+#define bustype_BDK_AP_CVM_MEMDEBUG3_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_MEMDEBUG3_EL3 "AP_CVM_MEMDEBUG3_EL3"
+#define busnum_BDK_AP_CVM_MEMDEBUG3_EL3 0
+#define arguments_BDK_AP_CVM_MEMDEBUG3_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_memdebug4_el3
+ *
+ * INTERNAL: AP Cavium Memory Debug 4 Register
+ */
+union bdk_ap_cvm_memdebug4_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_memdebug4_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#else /* Word 0 - Little Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_memdebug4_el3_s cn; */
+};
+typedef union bdk_ap_cvm_memdebug4_el3 bdk_ap_cvm_memdebug4_el3_t;
+
+#define BDK_AP_CVM_MEMDEBUG4_EL3 BDK_AP_CVM_MEMDEBUG4_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_MEMDEBUG4_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_MEMDEBUG4_EL3_FUNC(void)
+{
+ return 0x3060b050400ll;
+}
+
+#define typedef_BDK_AP_CVM_MEMDEBUG4_EL3 bdk_ap_cvm_memdebug4_el3_t
+#define bustype_BDK_AP_CVM_MEMDEBUG4_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_MEMDEBUG4_EL3 "AP_CVM_MEMDEBUG4_EL3"
+#define busnum_BDK_AP_CVM_MEMDEBUG4_EL3 0
+#define arguments_BDK_AP_CVM_MEMDEBUG4_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_memdebug5_el3
+ *
+ * INTERNAL: AP Cavium Memory Debug 5 Register
+ */
+union bdk_ap_cvm_memdebug5_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_memdebug5_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#else /* Word 0 - Little Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_memdebug5_el3_s cn; */
+};
+typedef union bdk_ap_cvm_memdebug5_el3 bdk_ap_cvm_memdebug5_el3_t;
+
+#define BDK_AP_CVM_MEMDEBUG5_EL3 BDK_AP_CVM_MEMDEBUG5_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_MEMDEBUG5_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_MEMDEBUG5_EL3_FUNC(void)
+{
+ return 0x3060b050500ll;
+}
+
+#define typedef_BDK_AP_CVM_MEMDEBUG5_EL3 bdk_ap_cvm_memdebug5_el3_t
+#define bustype_BDK_AP_CVM_MEMDEBUG5_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_MEMDEBUG5_EL3 "AP_CVM_MEMDEBUG5_EL3"
+#define busnum_BDK_AP_CVM_MEMDEBUG5_EL3 0
+#define arguments_BDK_AP_CVM_MEMDEBUG5_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_memdebug6_el3
+ *
+ * INTERNAL: AP Cavium Memory Debug 6 Register
+ */
+union bdk_ap_cvm_memdebug6_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_memdebug6_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#else /* Word 0 - Little Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_memdebug6_el3_s cn; */
+};
+typedef union bdk_ap_cvm_memdebug6_el3 bdk_ap_cvm_memdebug6_el3_t;
+
+#define BDK_AP_CVM_MEMDEBUG6_EL3 BDK_AP_CVM_MEMDEBUG6_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_MEMDEBUG6_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_MEMDEBUG6_EL3_FUNC(void)
+{
+ return 0x3060b050600ll;
+}
+
+#define typedef_BDK_AP_CVM_MEMDEBUG6_EL3 bdk_ap_cvm_memdebug6_el3_t
+#define bustype_BDK_AP_CVM_MEMDEBUG6_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_MEMDEBUG6_EL3 "AP_CVM_MEMDEBUG6_EL3"
+#define busnum_BDK_AP_CVM_MEMDEBUG6_EL3 0
+#define arguments_BDK_AP_CVM_MEMDEBUG6_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_memdebug7_el3
+ *
+ * INTERNAL: AP Cavium Memory Debug 7 Register
+ */
+union bdk_ap_cvm_memdebug7_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_memdebug7_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#else /* Word 0 - Little Endian */
+ uint64_t debug : 64; /**< [ 63: 0](RO) Undocumented debug. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_memdebug7_el3_s cn; */
+};
+typedef union bdk_ap_cvm_memdebug7_el3 bdk_ap_cvm_memdebug7_el3_t;
+
+#define BDK_AP_CVM_MEMDEBUG7_EL3 BDK_AP_CVM_MEMDEBUG7_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_MEMDEBUG7_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_MEMDEBUG7_EL3_FUNC(void)
+{
+ return 0x3060b050700ll;
+}
+
+#define typedef_BDK_AP_CVM_MEMDEBUG7_EL3 bdk_ap_cvm_memdebug7_el3_t
+#define bustype_BDK_AP_CVM_MEMDEBUG7_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_MEMDEBUG7_EL3 "AP_CVM_MEMDEBUG7_EL3"
+#define busnum_BDK_AP_CVM_MEMDEBUG7_EL3 0
+#define arguments_BDK_AP_CVM_MEMDEBUG7_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_nvbar_el3
+ *
+ * AP Cavium DEL3T Address Register
+ */
+union bdk_ap_cvm_nvbar_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_nvbar_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vector_address : 53; /**< [ 63: 11](R/W) Cavium-specific exception vector address. */
+ uint64_t reserved_0_10 : 11;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_10 : 11;
+ uint64_t vector_address : 53; /**< [ 63: 11](R/W) Cavium-specific exception vector address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_nvbar_el3_s cn; */
+};
+typedef union bdk_ap_cvm_nvbar_el3 bdk_ap_cvm_nvbar_el3_t;
+
+#define BDK_AP_CVM_NVBAR_EL3 BDK_AP_CVM_NVBAR_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_NVBAR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_NVBAR_EL3_FUNC(void)
+{
+ return 0x3060b060000ll;
+}
+
+#define typedef_BDK_AP_CVM_NVBAR_EL3 bdk_ap_cvm_nvbar_el3_t
+#define bustype_BDK_AP_CVM_NVBAR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_NVBAR_EL3 "AP_CVM_NVBAR_EL3"
+#define busnum_BDK_AP_CVM_NVBAR_EL3 0
+#define arguments_BDK_AP_CVM_NVBAR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_pn_el1
+ *
+ * AP Cavium Processor Number Register
+ * This register is accessible at EL1, but subject to the access controls in AP_CVM_ACCESS_EL1/EL2/EL3
+ */
+union bdk_ap_cvm_pn_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_pn_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t clu : 8; /**< [ 39: 32](RO) The cluster this core resides in. */
+ uint64_t reserved_16_31 : 16;
+ uint64_t pn : 16; /**< [ 15: 0](RO) The flat processor number value for the core. */
+#else /* Word 0 - Little Endian */
+ uint64_t pn : 16; /**< [ 15: 0](RO) The flat processor number value for the core. */
+ uint64_t reserved_16_31 : 16;
+ uint64_t clu : 8; /**< [ 39: 32](RO) The cluster this core resides in. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_pn_el1_s cn; */
+};
+typedef union bdk_ap_cvm_pn_el1 bdk_ap_cvm_pn_el1_t;
+
+#define BDK_AP_CVM_PN_EL1 BDK_AP_CVM_PN_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_PN_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_PN_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000b040200ll;
+ __bdk_csr_fatal("AP_CVM_PN_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_PN_EL1 bdk_ap_cvm_pn_el1_t
+#define bustype_BDK_AP_CVM_PN_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_PN_EL1 "AP_CVM_PN_EL1"
+#define busnum_BDK_AP_CVM_PN_EL1 0
+#define arguments_BDK_AP_CVM_PN_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_power_el1
+ *
+ * AP Cavium Power Control Register
+ * This register controls power management.
+ */
+union bdk_ap_cvm_power_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_power_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t maxpow : 8; /**< [ 63: 56](R/W) Reserved.
+ Internal:
+ Maximum power. */
+ uint64_t average_power : 8; /**< [ 55: 48](R/W) Average power.
+ Time-averaged dynamic-power estimate for this core, in mA/GHz.
+ An approximation of this core's power is calculated with:
+
+ _ core_power = core_const * core_powered_on + [AVERAGE_POWER] * voltage * freq.
+
+ Where:
+
+ _ core_power is in mW.
+
+ _ core_const is a per-core constant leakage from the HRM power application note, and is in
+ mA.
+
+ _ core_powered_on is a boolean indicating power applied, from RST_PP_POWER\<core_number\>.
+
+ _ voltage is determined by the platform, perhaps by reading a VRM setting.
+
+ _ freq is in GHz and is from RST_BOOT[C_MUL] * 0.050, assuming standard 50 MHz ref-clock. */
+ uint64_t current_setting : 8; /**< [ 47: 40](R/W) Reserved.
+ Internal:
+ Current setting. */
+ uint64_t hrm_adjustment : 8; /**< [ 39: 32](R/W) Reserved.
+ Internal:
+ HRM adjustment. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t override : 1; /**< [ 28: 28](R/W) Reserved.
+ Internal:
+ Override. */
+ uint64_t disable_stagger : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Disable stagger. */
+ uint64_t period : 3; /**< [ 26: 24](R/W) Reserved.
+ Internal:
+ Period. */
+ uint64_t powlim : 8; /**< [ 23: 16](R/W) Reserved.
+ Internal:
+ Power limit. */
+ uint64_t max_setting : 8; /**< [ 15: 8](R/W) Reserved.
+ Internal:
+ Maximum setting. */
+ uint64_t min_setting : 8; /**< [ 7: 0](R/W) Reserved.
+ Internal:
+ Minimum setting. */
+#else /* Word 0 - Little Endian */
+ uint64_t min_setting : 8; /**< [ 7: 0](R/W) Reserved.
+ Internal:
+ Minimum setting. */
+ uint64_t max_setting : 8; /**< [ 15: 8](R/W) Reserved.
+ Internal:
+ Maximum setting. */
+ uint64_t powlim : 8; /**< [ 23: 16](R/W) Reserved.
+ Internal:
+ Power limit. */
+ uint64_t period : 3; /**< [ 26: 24](R/W) Reserved.
+ Internal:
+ Period. */
+ uint64_t disable_stagger : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Disable stagger. */
+ uint64_t override : 1; /**< [ 28: 28](R/W) Reserved.
+ Internal:
+ Override. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t hrm_adjustment : 8; /**< [ 39: 32](R/W) Reserved.
+ Internal:
+ HRM adjustment. */
+ uint64_t current_setting : 8; /**< [ 47: 40](R/W) Reserved.
+ Internal:
+ Current setting. */
+ uint64_t average_power : 8; /**< [ 55: 48](R/W) Average power.
+ Time-averaged dynamic-power estimate for this core, in mA/GHz.
+ An approximation of this core's power is calculated with:
+
+ _ core_power = core_const * core_powered_on + [AVERAGE_POWER] * voltage * freq.
+
+ Where:
+
+ _ core_power is in mW.
+
+ _ core_const is a per-core constant leakage from the HRM power application note, and is in
+ mA.
+
+ _ core_powered_on is a boolean indicating power applied, from RST_PP_POWER\<core_number\>.
+
+ _ voltage is determined by the platform, perhaps by reading a VRM setting.
+
+ _ freq is in GHz and is from RST_BOOT[C_MUL] * 0.050, assuming standard 50 MHz ref-clock. */
+ uint64_t maxpow : 8; /**< [ 63: 56](R/W) Reserved.
+ Internal:
+ Maximum power. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_power_el1_s cn8; */
+ struct bdk_ap_cvm_power_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t maxpow : 8; /**< [ 63: 56](R/W) Reserved.
+ Internal:
+ Maximum power. */
+ uint64_t average_power : 8; /**< [ 55: 48](R/W) Average power.
+ Time-averaged dynamic-power estimate for this core, in mA/GHz.
+ An approximation of this core's power is calculated with:
+
+ _ core_power = core_const * core_powered_on + [AVERAGE_POWER] * voltage * freq.
+
+ Where:
+
+ _ core_power is in mW.
+
+ _ core_const is a per-core constant leakage from the HRM power application note, and is in
+ mA.
+
+ _ core_powered_on is a boolean indicating power applied, from RST_PP_POWER\<core_number\>.
+
+ _ voltage is determined by the platform, perhaps by reading a AVS setting.
+
+ _ freq is in GHz and is from RST_BOOT[C_MUL] * 0.050, assuming standard 50 MHz ref-clock. */
+ uint64_t current_setting : 8; /**< [ 47: 40](R/W) Reserved.
+ Internal:
+ Current setting. */
+ uint64_t hrm_adjustment : 8; /**< [ 39: 32](R/W) Reserved.
+ Internal:
+ HRM adjustment. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t override : 1; /**< [ 28: 28](R/W) Reserved.
+ Internal:
+ Override. */
+ uint64_t disable_stagger : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Disable stagger. */
+ uint64_t period : 3; /**< [ 26: 24](R/W) Reserved.
+ Internal:
+ Period. */
+ uint64_t powlim : 8; /**< [ 23: 16](R/W) Reserved.
+ Internal:
+ Power limit. */
+ uint64_t max_setting : 8; /**< [ 15: 8](R/W) Reserved.
+ Internal:
+ Maximum setting. */
+ uint64_t min_setting : 8; /**< [ 7: 0](R/W) Reserved.
+ Internal:
+ Minimum setting. */
+#else /* Word 0 - Little Endian */
+ uint64_t min_setting : 8; /**< [ 7: 0](R/W) Reserved.
+ Internal:
+ Minimum setting. */
+ uint64_t max_setting : 8; /**< [ 15: 8](R/W) Reserved.
+ Internal:
+ Maximum setting. */
+ uint64_t powlim : 8; /**< [ 23: 16](R/W) Reserved.
+ Internal:
+ Power limit. */
+ uint64_t period : 3; /**< [ 26: 24](R/W) Reserved.
+ Internal:
+ Period. */
+ uint64_t disable_stagger : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Disable stagger. */
+ uint64_t override : 1; /**< [ 28: 28](R/W) Reserved.
+ Internal:
+ Override. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t hrm_adjustment : 8; /**< [ 39: 32](R/W) Reserved.
+ Internal:
+ HRM adjustment. */
+ uint64_t current_setting : 8; /**< [ 47: 40](R/W) Reserved.
+ Internal:
+ Current setting. */
+ uint64_t average_power : 8; /**< [ 55: 48](R/W) Average power.
+ Time-averaged dynamic-power estimate for this core, in mA/GHz.
+ An approximation of this core's power is calculated with:
+
+ _ core_power = core_const * core_powered_on + [AVERAGE_POWER] * voltage * freq.
+
+ Where:
+
+ _ core_power is in mW.
+
+ _ core_const is a per-core constant leakage from the HRM power application note, and is in
+ mA.
+
+ _ core_powered_on is a boolean indicating power applied, from RST_PP_POWER\<core_number\>.
+
+ _ voltage is determined by the platform, perhaps by reading a AVS setting.
+
+ _ freq is in GHz and is from RST_BOOT[C_MUL] * 0.050, assuming standard 50 MHz ref-clock. */
+ uint64_t maxpow : 8; /**< [ 63: 56](R/W) Reserved.
+ Internal:
+ Maximum power. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_cvm_power_el1 bdk_ap_cvm_power_el1_t;
+
+#define BDK_AP_CVM_POWER_EL1 BDK_AP_CVM_POWER_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_POWER_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_POWER_EL1_FUNC(void)
+{
+ return 0x3000b000200ll;
+}
+
+#define typedef_BDK_AP_CVM_POWER_EL1 bdk_ap_cvm_power_el1_t
+#define bustype_BDK_AP_CVM_POWER_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_POWER_EL1 "AP_CVM_POWER_EL1"
+#define busnum_BDK_AP_CVM_POWER_EL1 0
+#define arguments_BDK_AP_CVM_POWER_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_scratch#_el1
+ *
+ * AP Cavium Scratchpad Register
+ * This register provides aid to post silicon debug as a scratchpad for software.
+ */
+union bdk_ap_cvm_scratchx_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_scratchx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Scratch. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Scratch. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_scratchx_el1_s cn; */
+};
+typedef union bdk_ap_cvm_scratchx_el1 bdk_ap_cvm_scratchx_el1_t;
+
+static inline uint64_t BDK_AP_CVM_SCRATCHX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_SCRATCHX_EL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3000b050000ll + 0x100ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_CVM_SCRATCHX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_SCRATCHX_EL1(a) bdk_ap_cvm_scratchx_el1_t
+#define bustype_BDK_AP_CVM_SCRATCHX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_SCRATCHX_EL1(a) "AP_CVM_SCRATCHX_EL1"
+#define busnum_BDK_AP_CVM_SCRATCHX_EL1(a) (a)
+#define arguments_BDK_AP_CVM_SCRATCHX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_statprofcmp_el1
+ *
+ * AP Cavium Statistical Profiling Comparator Value Register
+ */
+union bdk_ap_cvm_statprofcmp_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_statprofcmp_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cmp_val : 64; /**< [ 63: 0](R/W) When enabled (AP_CVM_STATPROFCTL_EL1[DIR_SAMPLE]=1), this register provides
+ the value of the address or op-code and mask to be used in directed sample mode.
+ The compare mode is indicated by AP_CVM_STATPROFCTL_EL1.OC_PC */
+#else /* Word 0 - Little Endian */
+ uint64_t cmp_val : 64; /**< [ 63: 0](R/W) When enabled (AP_CVM_STATPROFCTL_EL1[DIR_SAMPLE]=1), this register provides
+ the value of the address or op-code and mask to be used in directed sample mode.
+ The compare mode is indicated by AP_CVM_STATPROFCTL_EL1.OC_PC */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_statprofcmp_el1_s cn; */
+};
+typedef union bdk_ap_cvm_statprofcmp_el1 bdk_ap_cvm_statprofcmp_el1_t;
+
+#define BDK_AP_CVM_STATPROFCMP_EL1 BDK_AP_CVM_STATPROFCMP_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_STATPROFCMP_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_STATPROFCMP_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000b040100ll;
+ __bdk_csr_fatal("AP_CVM_STATPROFCMP_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_STATPROFCMP_EL1 bdk_ap_cvm_statprofcmp_el1_t
+#define bustype_BDK_AP_CVM_STATPROFCMP_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_STATPROFCMP_EL1 "AP_CVM_STATPROFCMP_EL1"
+#define busnum_BDK_AP_CVM_STATPROFCMP_EL1 0
+#define arguments_BDK_AP_CVM_STATPROFCMP_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_statprofctl_el1
+ *
+ * AP Cavium Statistical Profiling Configuration Register
+ */
+union bdk_ap_cvm_statprofctl_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_statprofctl_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t el : 3; /**< [ 5: 3](R/W) When in directed sample mode, indicates whether the instruction matching
+ logic includes a comparison of the EL of the target instruction.
+ 0x0 = Do not include an EL compare.
+ 0x4 = Instruction match if value match and instruction EL=0.
+ 0x5 = Instruction match if value match and instruction EL=1.
+ 0x6 = Instruction match if value match and instruction EL=2.
+ 0x7 = Instruction match if value match and instruction EL=3. */
+ uint64_t oc_pc : 1; /**< [ 2: 2](R/W) When in directed sample mode, indicates whether the instruction to be
+ sample is found by matching the PC or the OpCode.
+ 0 = Comparator matches AP_CVM_STATPROFCMP_EL1[52:2] against instruction PC.
+ 1 = Comparator matches AP_CVM_STATPROFCMP_EL1[31:0] against instruction opcode
+ with bits enabled for comparison with a corresponding 1 in AP_CVM_STATPROFCMP_EL1[63:32]. */
+ uint64_t dir_sample : 1; /**< [ 1: 1](R/W) When set, replaces statistical profile's random sample selection logic
+ with the output of the instruction and/or address comparators from the
+ trace logic. This provides the ability to profile a specific instruction.
+ Note that this feature will not function if trace is enabled. */
+ uint64_t ernd : 1; /**< [ 0: 0](R/W) Provides the value for AP_PMSIDR_EL1[ERND]. This field describes how
+ randomization is used in selecting the sample. See AP_PMSIDR_EL1[ERND]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ernd : 1; /**< [ 0: 0](R/W) Provides the value for AP_PMSIDR_EL1[ERND]. This field describes how
+ randomization is used in selecting the sample. See AP_PMSIDR_EL1[ERND]. */
+ uint64_t dir_sample : 1; /**< [ 1: 1](R/W) When set, replaces statistical profile's random sample selection logic
+ with the output of the instruction and/or address comparators from the
+ trace logic. This provides the ability to profile a specific instruction.
+ Note that this feature will not function if trace is enabled. */
+ uint64_t oc_pc : 1; /**< [ 2: 2](R/W) When in directed sample mode, indicates whether the instruction to be
+ sample is found by matching the PC or the OpCode.
+ 0 = Comparator matches AP_CVM_STATPROFCMP_EL1[52:2] against instruction PC.
+ 1 = Comparator matches AP_CVM_STATPROFCMP_EL1[31:0] against instruction opcode
+ with bits enabled for comparison with a corresponding 1 in AP_CVM_STATPROFCMP_EL1[63:32]. */
+ uint64_t el : 3; /**< [ 5: 3](R/W) When in directed sample mode, indicates whether the instruction matching
+ logic includes a comparison of the EL of the target instruction.
+ 0x0 = Do not include an EL compare.
+ 0x4 = Instruction match if value match and instruction EL=0.
+ 0x5 = Instruction match if value match and instruction EL=1.
+ 0x6 = Instruction match if value match and instruction EL=2.
+ 0x7 = Instruction match if value match and instruction EL=3. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_statprofctl_el1_s cn; */
+};
+typedef union bdk_ap_cvm_statprofctl_el1 bdk_ap_cvm_statprofctl_el1_t;
+
+#define BDK_AP_CVM_STATPROFCTL_EL1 BDK_AP_CVM_STATPROFCTL_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_STATPROFCTL_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_STATPROFCTL_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000b040000ll;
+ __bdk_csr_fatal("AP_CVM_STATPROFCTL_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_STATPROFCTL_EL1 bdk_ap_cvm_statprofctl_el1_t
+#define bustype_BDK_AP_CVM_STATPROFCTL_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_STATPROFCTL_EL1 "AP_CVM_STATPROFCTL_EL1"
+#define busnum_BDK_AP_CVM_STATPROFCTL_EL1 0
+#define arguments_BDK_AP_CVM_STATPROFCTL_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_trapaddr#_el3
+ *
+ * AP Cavium Trap Address Register
+ * This register provides ternary match bits for physical address traps.
+ *
+ * Usage Constraints:
+ * This register is R/W at EL3.
+ *
+ * Traps and Enables:
+ * There are no traps nor enables affecting this register.
+ *
+ * Configurations:
+ * R/W fields in this register reset to IMPLEMENTATION DEFINED values that might be UNKNOWN.
+ * Cavium implementations will reset to 0x0.
+ */
+union bdk_ap_cvm_trapaddrx_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_trapaddrx_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t stld : 2; /**< [ 63: 62](R/W) 0x0 = Don't match (though redundant with enable bits in AP_CVM_TRAPCTL()_EL3).
+ 0x1 = Match load.
+ 0x2 = Match store.
+ 0x3 = Match load and store. */
+ uint64_t reserved_52_61 : 10;
+ uint64_t pa : 45; /**< [ 51: 7](R/W) Physical address match bits \<51:7\>. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t pa : 45; /**< [ 51: 7](R/W) Physical address match bits \<51:7\>. */
+ uint64_t reserved_52_61 : 10;
+ uint64_t stld : 2; /**< [ 63: 62](R/W) 0x0 = Don't match (though redundant with enable bits in AP_CVM_TRAPCTL()_EL3).
+ 0x1 = Match load.
+ 0x2 = Match store.
+ 0x3 = Match load and store. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_trapaddrx_el3_s cn; */
+};
+typedef union bdk_ap_cvm_trapaddrx_el3 bdk_ap_cvm_trapaddrx_el3_t;
+
+static inline uint64_t BDK_AP_CVM_TRAPADDRX_EL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_TRAPADDRX_EL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a>=4)&&(a<=7)))
+ return 0x3060b070000ll + 0x100ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_CVM_TRAPADDRX_EL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_TRAPADDRX_EL3(a) bdk_ap_cvm_trapaddrx_el3_t
+#define bustype_BDK_AP_CVM_TRAPADDRX_EL3(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_TRAPADDRX_EL3(a) "AP_CVM_TRAPADDRX_EL3"
+#define busnum_BDK_AP_CVM_TRAPADDRX_EL3(a) (a)
+#define arguments_BDK_AP_CVM_TRAPADDRX_EL3(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_trapaddrena#_el3
+ *
+ * AP Cavium Trap Address Enable Register
+ * This register provides ternary enable bits for physical address traps.
+ *
+ * Usage Constraints:
+ * This register is R/W at EL3.
+ *
+ * Traps and Enables:
+ * There are no traps nor enables affecting this register.
+ *
+ * Configurations:
+ * R/W fields in this register reset to IMPLEMENTATION DEFINED values that might be UNKNOWN.
+ * Cavium implementations will reset to 0x0.
+ */
+union bdk_ap_cvm_trapaddrenax_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_trapaddrenax_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t pa_ena : 45; /**< [ 51: 7](R/W) Physical address match enable bits \<51:7\>. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t pa_ena : 45; /**< [ 51: 7](R/W) Physical address match enable bits \<51:7\>. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_trapaddrenax_el3_s cn; */
+};
+typedef union bdk_ap_cvm_trapaddrenax_el3 bdk_ap_cvm_trapaddrenax_el3_t;
+
+static inline uint64_t BDK_AP_CVM_TRAPADDRENAX_EL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_TRAPADDRENAX_EL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a>=4)&&(a<=7)))
+ return 0x3060b090000ll + 0x100ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_CVM_TRAPADDRENAX_EL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_TRAPADDRENAX_EL3(a) bdk_ap_cvm_trapaddrenax_el3_t
+#define bustype_BDK_AP_CVM_TRAPADDRENAX_EL3(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_TRAPADDRENAX_EL3(a) "AP_CVM_TRAPADDRENAX_EL3"
+#define busnum_BDK_AP_CVM_TRAPADDRENAX_EL3(a) (a)
+#define arguments_BDK_AP_CVM_TRAPADDRENAX_EL3(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_trapctl#_el3
+ *
+ * AP Cavium Trap Control Register
+ * This register provides control and identification of the Cavium physical address and
+ * instruction trap functionality. There are eight of these registers. Registers zero
+ * through three apply to the instruction matchers and registers four through seven apply
+ * to the address matchers.
+ *
+ * Usage Constraints:
+ * This register is R/W at EL3.
+ *
+ * Traps and Enables:
+ * There are no traps nor enables affecting this register.
+ *
+ * Configurations:
+ * R/W fields in this register reset to IMPLEMENTATION DEFINED values that might be UNKNOWN.
+ * Cavium implementations will reset to 0x0.
+ */
+union bdk_ap_cvm_trapctlx_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_trapctlx_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t el2 : 1; /**< [ 36: 36](R/W) Trap accesses from EL2. */
+ uint64_t el1ns : 1; /**< [ 35: 35](R/W) Trap accesses from EL1NS. */
+ uint64_t el1s : 1; /**< [ 34: 34](R/W) Trap accesses from EL1S. */
+ uint64_t el0ns : 1; /**< [ 33: 33](R/W) Trap accesses from EL0NS. */
+ uint64_t el0s : 1; /**< [ 32: 32](R/W) Trap accesses from EL0S. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t action : 4; /**< [ 7: 4](R/W) Trap action:
+ 0x0 = Trap to EL3 on a match.
+ 0x1 = Flush the instruction pipeline and reissue instruction. For instruction matches
+ only, otherwise UNPREDICTABLE.
+ 0x2-0xF = Reserved. */
+ uint64_t mtype : 3; /**< [ 3: 1](R/W) Read-only. Typs of matcher for software capability discovery:
+ 0x0 = Not present.
+ 0x1 = Physical address matcher. This value is advertised in indices 4..7.
+ 0x2 = Instruction matcher. This value is advertised in indices 0..3. */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable. */
+ uint64_t mtype : 3; /**< [ 3: 1](R/W) Read-only. Typs of matcher for software capability discovery:
+ 0x0 = Not present.
+ 0x1 = Physical address matcher. This value is advertised in indices 4..7.
+ 0x2 = Instruction matcher. This value is advertised in indices 0..3. */
+ uint64_t action : 4; /**< [ 7: 4](R/W) Trap action:
+ 0x0 = Trap to EL3 on a match.
+ 0x1 = Flush the instruction pipeline and reissue instruction. For instruction matches
+ only, otherwise UNPREDICTABLE.
+ 0x2-0xF = Reserved. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t el0s : 1; /**< [ 32: 32](R/W) Trap accesses from EL0S. */
+ uint64_t el0ns : 1; /**< [ 33: 33](R/W) Trap accesses from EL0NS. */
+ uint64_t el1s : 1; /**< [ 34: 34](R/W) Trap accesses from EL1S. */
+ uint64_t el1ns : 1; /**< [ 35: 35](R/W) Trap accesses from EL1NS. */
+ uint64_t el2 : 1; /**< [ 36: 36](R/W) Trap accesses from EL2. */
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_trapctlx_el3_s cn; */
+};
+typedef union bdk_ap_cvm_trapctlx_el3 bdk_ap_cvm_trapctlx_el3_t;
+
+static inline uint64_t BDK_AP_CVM_TRAPCTLX_EL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_TRAPCTLX_EL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x3060b080000ll + 0x100ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_CVM_TRAPCTLX_EL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_TRAPCTLX_EL3(a) bdk_ap_cvm_trapctlx_el3_t
+#define bustype_BDK_AP_CVM_TRAPCTLX_EL3(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_TRAPCTLX_EL3(a) "AP_CVM_TRAPCTLX_EL3"
+#define busnum_BDK_AP_CVM_TRAPCTLX_EL3(a) (a)
+#define arguments_BDK_AP_CVM_TRAPCTLX_EL3(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_trapinsn#_el3
+ *
+ * AP Cavium Trap Instructions Register
+ * This register provides ternary match and enable bits for instruction word traps.
+ *
+ * Usage Constraints:
+ * This register is R/W at EL3.
+ *
+ * Traps and Enables:
+ * There are no traps nor enables affecting this register.
+ *
+ * Configurations:
+ * R/W fields in this register reset to IMPLEMENTATION DEFINED values that might be UNKNOWN.
+ * Cavium implementations will reset to 0x0.
+ */
+union bdk_ap_cvm_trapinsnx_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_trapinsnx_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t insn_ena : 32; /**< [ 63: 32](R/W) Instruction match bits. */
+ uint64_t insn : 32; /**< [ 31: 0](R/W) Instruction match enable bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t insn : 32; /**< [ 31: 0](R/W) Instruction match enable bits. */
+ uint64_t insn_ena : 32; /**< [ 63: 32](R/W) Instruction match bits. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_trapinsnx_el3_s cn; */
+};
+typedef union bdk_ap_cvm_trapinsnx_el3 bdk_ap_cvm_trapinsnx_el3_t;
+
+static inline uint64_t BDK_AP_CVM_TRAPINSNX_EL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_TRAPINSNX_EL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3060b090000ll + 0x100ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_CVM_TRAPINSNX_EL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_TRAPINSNX_EL3(a) bdk_ap_cvm_trapinsnx_el3_t
+#define bustype_BDK_AP_CVM_TRAPINSNX_EL3(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_TRAPINSNX_EL3(a) "AP_CVM_TRAPINSNX_EL3"
+#define busnum_BDK_AP_CVM_TRAPINSNX_EL3(a) (a)
+#define arguments_BDK_AP_CVM_TRAPINSNX_EL3(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_trapopc_el3
+ *
+ * AP Cavium Trap Exception Opcode Register
+ * This register stores syndrome information on a trap fault.
+ *
+ * Usage Constraints:
+ * This register is R/W at EL3.
+ * Traps and Enables:
+ * There are no traps nor enables affecting this register.
+ * Configurations:
+ * RW fields in this register reset to IMPLEMENTATION DEFINED values that might be UNKNOWN.
+ * Cavium implementations will reset to 0x0.
+ */
+union bdk_ap_cvm_trapopc_el3
+{
+ uint64_t u;
+ struct bdk_ap_cvm_trapopc_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t regset : 5; /**< [ 36: 32](R/W) Matching register set.
+ Values zero through 15 refer to physical address match register sets and values 16-31
+ refer to instruction match register sets. */
+ uint64_t insn : 32; /**< [ 31: 0](R/W) Instruction word that caused the fault. */
+#else /* Word 0 - Little Endian */
+ uint64_t insn : 32; /**< [ 31: 0](R/W) Instruction word that caused the fault. */
+ uint64_t regset : 5; /**< [ 36: 32](R/W) Matching register set.
+ Values zero through 15 refer to physical address match register sets and values 16-31
+ refer to instruction match register sets. */
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_trapopc_el3_s cn; */
+};
+typedef union bdk_ap_cvm_trapopc_el3 bdk_ap_cvm_trapopc_el3_t;
+
+#define BDK_AP_CVM_TRAPOPC_EL3 BDK_AP_CVM_TRAPOPC_EL3_FUNC()
+static inline uint64_t BDK_AP_CVM_TRAPOPC_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_TRAPOPC_EL3_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3060b060100ll;
+ __bdk_csr_fatal("AP_CVM_TRAPOPC_EL3", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVM_TRAPOPC_EL3 bdk_ap_cvm_trapopc_el3_t
+#define bustype_BDK_AP_CVM_TRAPOPC_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_TRAPOPC_EL3 "AP_CVM_TRAPOPC_EL3"
+#define busnum_BDK_AP_CVM_TRAPOPC_EL3 0
+#define arguments_BDK_AP_CVM_TRAPOPC_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_xlatdata0_el1
+ *
+ * AP Cavium Translation Data 0 EL1 Register
+ */
+union bdk_ap_cvm_xlatdata0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_xlatdata0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t par : 2; /**< [ 63: 62](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_54_61 : 8;
+ uint64_t walk : 2; /**< [ 53: 52](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t ng : 1; /**< [ 51: 51](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_50 : 1;
+ uint64_t nsec : 1; /**< [ 49: 49](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_48 : 1;
+ uint64_t ppn : 36; /**< [ 47: 12](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t sh1 : 2; /**< [ 9: 8](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t ap1 : 2; /**< [ 7: 6](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t xn1 : 1; /**< [ 5: 5](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t pxn1 : 1; /**< [ 4: 4](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t attr1 : 4; /**< [ 3: 0](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+#else /* Word 0 - Little Endian */
+ uint64_t attr1 : 4; /**< [ 3: 0](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t pxn1 : 1; /**< [ 4: 4](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t xn1 : 1; /**< [ 5: 5](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t ap1 : 2; /**< [ 7: 6](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t sh1 : 2; /**< [ 9: 8](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t ppn : 36; /**< [ 47: 12](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_48 : 1;
+ uint64_t nsec : 1; /**< [ 49: 49](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_50 : 1;
+ uint64_t ng : 1; /**< [ 51: 51](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t walk : 2; /**< [ 53: 52](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_54_61 : 8;
+ uint64_t par : 2; /**< [ 63: 62](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_xlatdata0_el1_s cn; */
+};
+typedef union bdk_ap_cvm_xlatdata0_el1 bdk_ap_cvm_xlatdata0_el1_t;
+
+#define BDK_AP_CVM_XLATDATA0_EL1 BDK_AP_CVM_XLATDATA0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_XLATDATA0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_XLATDATA0_EL1_FUNC(void)
+{
+ return 0x3000b050400ll;
+}
+
+#define typedef_BDK_AP_CVM_XLATDATA0_EL1 bdk_ap_cvm_xlatdata0_el1_t
+#define bustype_BDK_AP_CVM_XLATDATA0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_XLATDATA0_EL1 "AP_CVM_XLATDATA0_EL1"
+#define busnum_BDK_AP_CVM_XLATDATA0_EL1 0
+#define arguments_BDK_AP_CVM_XLATDATA0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_xlatdata1_el1
+ *
+ * AP Cavium Translation Data 1 EL1 Register
+ */
+union bdk_ap_cvm_xlatdata1_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_xlatdata1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t ent2 : 9; /**< [ 62: 54](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_45_53 : 9;
+ uint64_t ent1 : 9; /**< [ 44: 36](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t mask : 22; /**< [ 33: 12](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t sh2 : 2; /**< [ 9: 8](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t ap2 : 2; /**< [ 7: 6](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t xn2 : 1; /**< [ 5: 5](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t pxn2 : 1; /**< [ 4: 4](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t attr2 : 4; /**< [ 3: 0](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+#else /* Word 0 - Little Endian */
+ uint64_t attr2 : 4; /**< [ 3: 0](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t pxn2 : 1; /**< [ 4: 4](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t xn2 : 1; /**< [ 5: 5](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t ap2 : 2; /**< [ 7: 6](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t sh2 : 2; /**< [ 9: 8](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t mask : 22; /**< [ 33: 12](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t ent1 : 9; /**< [ 44: 36](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_45_53 : 9;
+ uint64_t ent2 : 9; /**< [ 62: 54](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_xlatdata1_el1_s cn; */
+};
+typedef union bdk_ap_cvm_xlatdata1_el1 bdk_ap_cvm_xlatdata1_el1_t;
+
+#define BDK_AP_CVM_XLATDATA1_EL1 BDK_AP_CVM_XLATDATA1_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_XLATDATA1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_XLATDATA1_EL1_FUNC(void)
+{
+ return 0x3000b050500ll;
+}
+
+#define typedef_BDK_AP_CVM_XLATDATA1_EL1 bdk_ap_cvm_xlatdata1_el1_t
+#define bustype_BDK_AP_CVM_XLATDATA1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_XLATDATA1_EL1 "AP_CVM_XLATDATA1_EL1"
+#define busnum_BDK_AP_CVM_XLATDATA1_EL1 0
+#define arguments_BDK_AP_CVM_XLATDATA1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_xlatvtag0_el1
+ *
+ * AP Cavium Translation Tag 0 EL1 Register
+ */
+union bdk_ap_cvm_xlatvtag0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_xlatvtag0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t level : 2; /**< [ 63: 62](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t eva_asid : 6; /**< [ 61: 56](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t eva_vmid : 4; /**< [ 55: 52](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t ng : 1; /**< [ 51: 51](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t el_1or0 : 1; /**< [ 50: 50](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t nsec : 1; /**< [ 49: 49](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t r : 1; /**< [ 48: 48](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t vpn : 36; /**< [ 47: 12](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_1_11 : 11;
+ uint64_t val : 1; /**< [ 0: 0](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+#else /* Word 0 - Little Endian */
+ uint64_t val : 1; /**< [ 0: 0](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_1_11 : 11;
+ uint64_t vpn : 36; /**< [ 47: 12](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t r : 1; /**< [ 48: 48](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t nsec : 1; /**< [ 49: 49](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t el_1or0 : 1; /**< [ 50: 50](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t ng : 1; /**< [ 51: 51](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t eva_vmid : 4; /**< [ 55: 52](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t eva_asid : 6; /**< [ 61: 56](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t level : 2; /**< [ 63: 62](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_xlatvtag0_el1_s cn; */
+};
+typedef union bdk_ap_cvm_xlatvtag0_el1 bdk_ap_cvm_xlatvtag0_el1_t;
+
+#define BDK_AP_CVM_XLATVTAG0_EL1 BDK_AP_CVM_XLATVTAG0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_XLATVTAG0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_XLATVTAG0_EL1_FUNC(void)
+{
+ return 0x3000b050600ll;
+}
+
+#define typedef_BDK_AP_CVM_XLATVTAG0_EL1 bdk_ap_cvm_xlatvtag0_el1_t
+#define bustype_BDK_AP_CVM_XLATVTAG0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_XLATVTAG0_EL1 "AP_CVM_XLATVTAG0_EL1"
+#define busnum_BDK_AP_CVM_XLATVTAG0_EL1 0
+#define arguments_BDK_AP_CVM_XLATVTAG0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvm_xlatvtag1_el1
+ *
+ * AP Cavium Translation Tag 1 EL1 Register
+ */
+union bdk_ap_cvm_xlatvtag1_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvm_xlatvtag1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t ent2 : 9; /**< [ 56: 48](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t ent1 : 9; /**< [ 44: 36](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t mask : 22; /**< [ 33: 12](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_2_11 : 10;
+ uint64_t stage2 : 1; /**< [ 1: 1](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t stage1 : 1; /**< [ 0: 0](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+#else /* Word 0 - Little Endian */
+ uint64_t stage1 : 1; /**< [ 0: 0](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t stage2 : 1; /**< [ 1: 1](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_2_11 : 10;
+ uint64_t mask : 22; /**< [ 33: 12](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_34_35 : 2;
+ uint64_t ent1 : 9; /**< [ 44: 36](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t ent2 : 9; /**< [ 56: 48](RO) SYS[CvmCACHE] debug read data from uTLB/MTLB/WCU. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvm_xlatvtag1_el1_s cn; */
+};
+typedef union bdk_ap_cvm_xlatvtag1_el1 bdk_ap_cvm_xlatvtag1_el1_t;
+
+#define BDK_AP_CVM_XLATVTAG1_EL1 BDK_AP_CVM_XLATVTAG1_EL1_FUNC()
+static inline uint64_t BDK_AP_CVM_XLATVTAG1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVM_XLATVTAG1_EL1_FUNC(void)
+{
+ return 0x3000b050700ll;
+}
+
+#define typedef_BDK_AP_CVM_XLATVTAG1_EL1 bdk_ap_cvm_xlatvtag1_el1_t
+#define bustype_BDK_AP_CVM_XLATVTAG1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVM_XLATVTAG1_EL1 "AP_CVM_XLATVTAG1_EL1"
+#define busnum_BDK_AP_CVM_XLATVTAG1_EL1 0
+#define arguments_BDK_AP_CVM_XLATVTAG1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvmctl2_el1
+ *
+ * AP Cavium Control Register
+ * This register provides Cavium-specific control information.
+ */
+union bdk_ap_cvmctl2_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvmctl2_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t livelock_stall_detect : 4; /**< [ 11: 8](R/W) Livelock stall detector. 0=disable, cycles 16*1\<\<[[11:8] */
+ uint64_t reserved_4_7 : 4;
+ uint64_t reduce_map_bandwidth : 2; /**< [ 3: 2](R/W) Reduce map bandwidth to 1-3 instr/cycle (also reduces max inflight instructions to 32,64,96) */
+ uint64_t allow_one_ifi : 1; /**< [ 1: 1](R/W) Allow only one inflight instruction. */
+ uint64_t allow_one_ifmr : 1; /**< [ 0: 0](R/W) Allow only one inflight memory reference. */
+#else /* Word 0 - Little Endian */
+ uint64_t allow_one_ifmr : 1; /**< [ 0: 0](R/W) Allow only one inflight memory reference. */
+ uint64_t allow_one_ifi : 1; /**< [ 1: 1](R/W) Allow only one inflight instruction. */
+ uint64_t reduce_map_bandwidth : 2; /**< [ 3: 2](R/W) Reduce map bandwidth to 1-3 instr/cycle (also reduces max inflight instructions to 32,64,96) */
+ uint64_t reserved_4_7 : 4;
+ uint64_t livelock_stall_detect : 4; /**< [ 11: 8](R/W) Livelock stall detector. 0=disable, cycles 16*1\<\<[[11:8] */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvmctl2_el1_s cn; */
+};
+typedef union bdk_ap_cvmctl2_el1 bdk_ap_cvmctl2_el1_t;
+
+#define BDK_AP_CVMCTL2_EL1 BDK_AP_CVMCTL2_EL1_FUNC()
+static inline uint64_t BDK_AP_CVMCTL2_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVMCTL2_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000b000100ll;
+ __bdk_csr_fatal("AP_CVMCTL2_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVMCTL2_EL1 bdk_ap_cvmctl2_el1_t
+#define bustype_BDK_AP_CVMCTL2_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVMCTL2_EL1 "AP_CVMCTL2_EL1"
+#define busnum_BDK_AP_CVMCTL2_EL1 0
+#define arguments_BDK_AP_CVMCTL2_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvmctl_el1
+ *
+ * AP Cavium Control Register
+ * This register provides Cavium-specific control information.
+ */
+union bdk_ap_cvmctl_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvmctl_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t disable_const : 1; /**< [ 56: 56](R/W) Disable constant optimization. */
+ uint64_t disable_alias : 1; /**< [ 55: 55](R/W) Disable alias optimization. */
+ uint64_t disable_ldp_stp_fiss : 1; /**< [ 54: 54](R/W) Disable ldp/stp fissioning with unaligned prediction. */
+ uint64_t disable_io_pred : 1; /**< [ 53: 53](R/W) Disable IO space prediction. */
+ uint64_t disable_unaligned_pred : 1; /**< [ 52: 52](R/W) Disable unaligned prediction. */
+ uint64_t disable_ldst_ordering_pred : 1;/**< [ 51: 51](R/W) Disable load/store ordering prediction. */
+ uint64_t reserved_50 : 1;
+ uint64_t disable_branch_elimination : 1;/**< [ 49: 49](R/W) Disable branch elimination. */
+ uint64_t disable_optimum_occupancy : 1;/**< [ 48: 48](R/W) Increase ibuf occupancy time. */
+ uint64_t disable_load2 : 1; /**< [ 47: 47](R/W) Disable second load port. */
+ uint64_t force_strong_ordering : 1; /**< [ 46: 46](R/W) Force strong load ordering.
+ 0 = Weak ordering.
+ 1 = Strong ordering.
+
+ Internal:
+ CN8XXX is always strong ordering. */
+ uint64_t disable_mem_ooo : 1; /**< [ 45: 45](R/W) Disable all memory out-of-order. */
+ uint64_t disable_ooo : 1; /**< [ 44: 44](R/W) Disable all out-of-order. */
+ uint64_t dpref_bp_dis : 1; /**< [ 43: 43](R/W) When set, hardware data prefetcher ignores memory system backpressure for next line prefetcher. */
+ uint64_t dpref_lookahead : 1; /**< [ 42: 42](R/W) When set, hardware data prefetcher uses a lookahead of 2. When clear, lookahead of 1. */
+ uint64_t dpref_next_line : 1; /**< [ 41: 41](R/W) Enable next line hardware data prefetcher. */
+ uint64_t dpref_delta : 1; /**< [ 40: 40](R/W) Enable delta stream hardware data prefetcher. */
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t force_cim_ich_vtr_to1 : 1; /**< [ 35: 35](R/W) Set CIM AP_ICH_VTR_EL2[LISTREGS] to 0x1 (i.e. two LRs) on Pass 1. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](R/W) Enable v8.1 features, modifying the ID registers to show v8.1. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t reserved_23 : 1;
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t step_rate : 4; /**< [ 19: 16](R/W) Step rate. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t disable_flex_execution : 1; /**< [ 9: 9](R/W) Disable flex execution; also prevents overlapped execution of DIV/SQRT and other
+ instructions (to prevent a DIV load collision). */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding. */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t reserved_6 : 1;
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t reserved_2 : 1;
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+#else /* Word 0 - Little Endian */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t reserved_2 : 1;
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t reserved_6 : 1;
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding. */
+ uint64_t disable_flex_execution : 1; /**< [ 9: 9](R/W) Disable flex execution; also prevents overlapped execution of DIV/SQRT and other
+ instructions (to prevent a DIV load collision). */
+ uint64_t reserved_10_15 : 6;
+ uint64_t step_rate : 4; /**< [ 19: 16](R/W) Step rate. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t reserved_23 : 1;
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](R/W) Enable v8.1 features, modifying the ID registers to show v8.1. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t force_cim_ich_vtr_to1 : 1; /**< [ 35: 35](R/W) Set CIM AP_ICH_VTR_EL2[LISTREGS] to 0x1 (i.e. two LRs) on Pass 1. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t dpref_delta : 1; /**< [ 40: 40](R/W) Enable delta stream hardware data prefetcher. */
+ uint64_t dpref_next_line : 1; /**< [ 41: 41](R/W) Enable next line hardware data prefetcher. */
+ uint64_t dpref_lookahead : 1; /**< [ 42: 42](R/W) When set, hardware data prefetcher uses a lookahead of 2. When clear, lookahead of 1. */
+ uint64_t dpref_bp_dis : 1; /**< [ 43: 43](R/W) When set, hardware data prefetcher ignores memory system backpressure for next line prefetcher. */
+ uint64_t disable_ooo : 1; /**< [ 44: 44](R/W) Disable all out-of-order. */
+ uint64_t disable_mem_ooo : 1; /**< [ 45: 45](R/W) Disable all memory out-of-order. */
+ uint64_t force_strong_ordering : 1; /**< [ 46: 46](R/W) Force strong load ordering.
+ 0 = Weak ordering.
+ 1 = Strong ordering.
+
+ Internal:
+ CN8XXX is always strong ordering. */
+ uint64_t disable_load2 : 1; /**< [ 47: 47](R/W) Disable second load port. */
+ uint64_t disable_optimum_occupancy : 1;/**< [ 48: 48](R/W) Increase ibuf occupancy time. */
+ uint64_t disable_branch_elimination : 1;/**< [ 49: 49](R/W) Disable branch elimination. */
+ uint64_t reserved_50 : 1;
+ uint64_t disable_ldst_ordering_pred : 1;/**< [ 51: 51](R/W) Disable load/store ordering prediction. */
+ uint64_t disable_unaligned_pred : 1; /**< [ 52: 52](R/W) Disable unaligned prediction. */
+ uint64_t disable_io_pred : 1; /**< [ 53: 53](R/W) Disable IO space prediction. */
+ uint64_t disable_ldp_stp_fiss : 1; /**< [ 54: 54](R/W) Disable ldp/stp fissioning with unaligned prediction. */
+ uint64_t disable_alias : 1; /**< [ 55: 55](R/W) Disable alias optimization. */
+ uint64_t disable_const : 1; /**< [ 56: 56](R/W) Disable constant optimization. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cvmctl_el1_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t force_cim_ich_vtr_to1 : 1; /**< [ 35: 35](R/W) Set CIM AP_ICH_VTR_EL2[LISTREGS] to 0x1 (i.e. two LRs) on Pass 1. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](R/W) Enable v8.1 features, modifying the ID registers to show v8.1. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t disable_icache_probes : 1; /**< [ 23: 23](R/W) Disable Icache probes. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t step_rate : 4; /**< [ 19: 16](R/W) Step rate. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t disable_flex_execution : 1; /**< [ 9: 9](R/W) Disable flex execution; also prevents overlapped execution of DIV/SQRT and other
+ instructions (to prevent a DIV load collision). */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding. */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t disable_fetch_under_fill : 1;/**< [ 6: 6](R/W) Disable fetch-under-fill. */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t disable_icache_prefetching : 1;/**< [ 2: 2](R/W) Disable Icache prefetching. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+#else /* Word 0 - Little Endian */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t disable_icache_prefetching : 1;/**< [ 2: 2](R/W) Disable Icache prefetching. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t disable_fetch_under_fill : 1;/**< [ 6: 6](R/W) Disable fetch-under-fill. */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding. */
+ uint64_t disable_flex_execution : 1; /**< [ 9: 9](R/W) Disable flex execution; also prevents overlapped execution of DIV/SQRT and other
+ instructions (to prevent a DIV load collision). */
+ uint64_t reserved_10_15 : 6;
+ uint64_t step_rate : 4; /**< [ 19: 16](R/W) Step rate. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t disable_icache_probes : 1; /**< [ 23: 23](R/W) Disable Icache probes. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](R/W) Enable v8.1 features, modifying the ID registers to show v8.1. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t force_cim_ich_vtr_to1 : 1; /**< [ 35: 35](R/W) Set CIM AP_ICH_VTR_EL2[LISTREGS] to 0x1 (i.e. two LRs) on Pass 1. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_ap_cvmctl_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t disable_const : 1; /**< [ 56: 56](R/W) Disable constant optimization. */
+ uint64_t disable_alias : 1; /**< [ 55: 55](R/W) Disable alias optimization. */
+ uint64_t disable_ldp_stp_fiss : 1; /**< [ 54: 54](R/W) Disable ldp/stp fissioning with unaligned prediction. */
+ uint64_t disable_io_pred : 1; /**< [ 53: 53](R/W) Disable IO space prediction. */
+ uint64_t disable_unaligned_pred : 1; /**< [ 52: 52](R/W) Disable unaligned prediction. */
+ uint64_t disable_ldst_ordering_pred : 1;/**< [ 51: 51](R/W) Disable load/store ordering prediction. */
+ uint64_t reserved_50 : 1;
+ uint64_t disable_branch_elimination : 1;/**< [ 49: 49](R/W) Disable branch elimination. */
+ uint64_t disable_optimum_occupancy : 1;/**< [ 48: 48](R/W) Increase ibuf occupancy time. */
+ uint64_t disable_load2 : 1; /**< [ 47: 47](R/W) Disable second load port. */
+ uint64_t force_strong_ordering : 1; /**< [ 46: 46](R/W) Force strong load ordering.
+ 0 = Weak ordering.
+ 1 = Strong ordering.
+
+ Internal:
+ CN8XXX is always strong ordering. */
+ uint64_t disable_mem_ooo : 1; /**< [ 45: 45](R/W) Disable all memory out-of-order. */
+ uint64_t disable_ooo : 1; /**< [ 44: 44](R/W) Disable all out-of-order. */
+ uint64_t dpref_bp_dis : 1; /**< [ 43: 43](R/W) When set, hardware data prefetcher ignores memory system backpressure for next line prefetcher. */
+ uint64_t dpref_lookahead : 1; /**< [ 42: 42](R/W) When set, hardware data prefetcher uses a lookahead of 2. When clear, lookahead of 1. */
+ uint64_t dpref_next_line : 1; /**< [ 41: 41](R/W) Enable next line hardware data prefetcher. */
+ uint64_t dpref_delta : 1; /**< [ 40: 40](R/W) Enable delta stream hardware data prefetcher. */
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t reserved_35 : 1;
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](RO) Enable v8.1 features, modifying the ID registers to show v8.1.
+ Internal:
+ FIXME does this go away with CN98XX. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t ldil1hwprefdis : 1; /**< [ 23: 23](R/W) Disable Istream LDI L1 hardware prefetcher. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t reserved_16_19 : 4;
+ uint64_t reserved_10_15 : 6;
+ uint64_t reserved_9 : 1;
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding and other fusions. */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t ldil1swprefdis : 1; /**< [ 6: 6](R/W) Disable LDI L1 software prefetch instructions (PRFM). */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t ldil1specdis : 1; /**< [ 2: 2](R/W) Disable all LDI L1 speculative fill requests (only demand fills with machine drained).
+ Internal:
+ CN88XX-like mode. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+#else /* Word 0 - Little Endian */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t ldil1specdis : 1; /**< [ 2: 2](R/W) Disable all LDI L1 speculative fill requests (only demand fills with machine drained).
+ Internal:
+ CN88XX-like mode. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t ldil1swprefdis : 1; /**< [ 6: 6](R/W) Disable LDI L1 software prefetch instructions (PRFM). */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding and other fusions. */
+ uint64_t reserved_9 : 1;
+ uint64_t reserved_10_15 : 6;
+ uint64_t reserved_16_19 : 4;
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t ldil1hwprefdis : 1; /**< [ 23: 23](R/W) Disable Istream LDI L1 hardware prefetcher. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](RO) Enable v8.1 features, modifying the ID registers to show v8.1.
+ Internal:
+ FIXME does this go away with CN98XX. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t reserved_35 : 1;
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t dpref_delta : 1; /**< [ 40: 40](R/W) Enable delta stream hardware data prefetcher. */
+ uint64_t dpref_next_line : 1; /**< [ 41: 41](R/W) Enable next line hardware data prefetcher. */
+ uint64_t dpref_lookahead : 1; /**< [ 42: 42](R/W) When set, hardware data prefetcher uses a lookahead of 2. When clear, lookahead of 1. */
+ uint64_t dpref_bp_dis : 1; /**< [ 43: 43](R/W) When set, hardware data prefetcher ignores memory system backpressure for next line prefetcher. */
+ uint64_t disable_ooo : 1; /**< [ 44: 44](R/W) Disable all out-of-order. */
+ uint64_t disable_mem_ooo : 1; /**< [ 45: 45](R/W) Disable all memory out-of-order. */
+ uint64_t force_strong_ordering : 1; /**< [ 46: 46](R/W) Force strong load ordering.
+ 0 = Weak ordering.
+ 1 = Strong ordering.
+
+ Internal:
+ CN8XXX is always strong ordering. */
+ uint64_t disable_load2 : 1; /**< [ 47: 47](R/W) Disable second load port. */
+ uint64_t disable_optimum_occupancy : 1;/**< [ 48: 48](R/W) Increase ibuf occupancy time. */
+ uint64_t disable_branch_elimination : 1;/**< [ 49: 49](R/W) Disable branch elimination. */
+ uint64_t reserved_50 : 1;
+ uint64_t disable_ldst_ordering_pred : 1;/**< [ 51: 51](R/W) Disable load/store ordering prediction. */
+ uint64_t disable_unaligned_pred : 1; /**< [ 52: 52](R/W) Disable unaligned prediction. */
+ uint64_t disable_io_pred : 1; /**< [ 53: 53](R/W) Disable IO space prediction. */
+ uint64_t disable_ldp_stp_fiss : 1; /**< [ 54: 54](R/W) Disable ldp/stp fissioning with unaligned prediction. */
+ uint64_t disable_alias : 1; /**< [ 55: 55](R/W) Disable alias optimization. */
+ uint64_t disable_const : 1; /**< [ 56: 56](R/W) Disable constant optimization. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_ap_cvmctl_el1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_44_63 : 20;
+ uint64_t dpref_bp_dis : 1; /**< [ 43: 43](R/W) When set, hardware data prefetcher ignores memory system backpressure for next line prefetcher. */
+ uint64_t dpref_lookahead : 1; /**< [ 42: 42](R/W) When set, hardware data prefetcher uses a lookahead of 2. When clear, lookahead of 1. */
+ uint64_t dpref_next_line : 1; /**< [ 41: 41](R/W) Enable next line hardware data prefetcher. */
+ uint64_t dpref_delta : 1; /**< [ 40: 40](R/W) Enable delta stream hardware data prefetcher. */
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t force_cim_ich_vtr_to1 : 1; /**< [ 35: 35](RAZ) Reserved. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](R/W) Enable v8.1 features, modifying the ID registers to show v8.1. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t disable_icache_probes : 1; /**< [ 23: 23](R/W) Disable Icache probes. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t step_rate : 4; /**< [ 19: 16](R/W) Step rate. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t disable_flex_execution : 1; /**< [ 9: 9](R/W) Disable flex execution; also prevents overlapped execution of DIV/SQRT and other
+ instructions (to prevent a DIV load collision). */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding. */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t disable_fetch_under_fill : 1;/**< [ 6: 6](R/W) Disable fetch-under-fill. */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t disable_icache_prefetching : 1;/**< [ 2: 2](R/W) Disable Icache prefetching. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+#else /* Word 0 - Little Endian */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t disable_icache_prefetching : 1;/**< [ 2: 2](R/W) Disable Icache prefetching. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t disable_fetch_under_fill : 1;/**< [ 6: 6](R/W) Disable fetch-under-fill. */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding. */
+ uint64_t disable_flex_execution : 1; /**< [ 9: 9](R/W) Disable flex execution; also prevents overlapped execution of DIV/SQRT and other
+ instructions (to prevent a DIV load collision). */
+ uint64_t reserved_10_15 : 6;
+ uint64_t step_rate : 4; /**< [ 19: 16](R/W) Step rate. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t disable_icache_probes : 1; /**< [ 23: 23](R/W) Disable Icache probes. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](R/W) Enable v8.1 features, modifying the ID registers to show v8.1. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t force_cim_ich_vtr_to1 : 1; /**< [ 35: 35](RAZ) Reserved. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t dpref_delta : 1; /**< [ 40: 40](R/W) Enable delta stream hardware data prefetcher. */
+ uint64_t dpref_next_line : 1; /**< [ 41: 41](R/W) Enable next line hardware data prefetcher. */
+ uint64_t dpref_lookahead : 1; /**< [ 42: 42](R/W) When set, hardware data prefetcher uses a lookahead of 2. When clear, lookahead of 1. */
+ uint64_t dpref_bp_dis : 1; /**< [ 43: 43](R/W) When set, hardware data prefetcher ignores memory system backpressure for next line prefetcher. */
+ uint64_t reserved_44_63 : 20;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_ap_cvmctl_el1_cn81xx cn83xx; */
+ struct bdk_ap_cvmctl_el1_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t force_cim_ich_vtr_to1 : 1; /**< [ 35: 35](RAZ) Reserved. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](R/W) Enable v8.1 features, modifying the ID registers to show v8.1. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t disable_icache_probes : 1; /**< [ 23: 23](R/W) Disable Icache probes. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t step_rate : 4; /**< [ 19: 16](R/W) Step rate. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t disable_flex_execution : 1; /**< [ 9: 9](R/W) Disable flex execution; also prevents overlapped execution of DIV/SQRT and other
+ instructions (to prevent a DIV load collision). */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding. */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t disable_fetch_under_fill : 1;/**< [ 6: 6](R/W) Disable fetch-under-fill. */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t disable_icache_prefetching : 1;/**< [ 2: 2](R/W) Disable Icache prefetching. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+#else /* Word 0 - Little Endian */
+ uint64_t disable_icache : 1; /**< [ 0: 0](R/W) Disable Icache. */
+ uint64_t random_icache : 1; /**< [ 1: 1](R/W) Random Icache replacement. */
+ uint64_t disable_icache_prefetching : 1;/**< [ 2: 2](R/W) Disable Icache prefetching. */
+ uint64_t force_csr_clock : 1; /**< [ 3: 3](R/W) Force CSR clock. */
+ uint64_t force_exe_clock : 1; /**< [ 4: 4](R/W) Force execution-unit clock. */
+ uint64_t force_issue_clock : 1; /**< [ 5: 5](R/W) Force issue-unit clock. */
+ uint64_t disable_fetch_under_fill : 1;/**< [ 6: 6](R/W) Disable fetch-under-fill. */
+ uint64_t disable_wfi : 1; /**< [ 7: 7](R/W) Disable WFI/WFE. */
+ uint64_t disable_branch_folding : 1; /**< [ 8: 8](R/W) Disable branch folding. */
+ uint64_t disable_flex_execution : 1; /**< [ 9: 9](R/W) Disable flex execution; also prevents overlapped execution of DIV/SQRT and other
+ instructions (to prevent a DIV load collision). */
+ uint64_t reserved_10_15 : 6;
+ uint64_t step_rate : 4; /**< [ 19: 16](R/W) Step rate. */
+ uint64_t no_exc_icache_parity : 1; /**< [ 20: 20](R/W) Suppress exception on Icache correctable parity error. */
+ uint64_t suppress_parity_checking : 1;/**< [ 21: 21](R/W) Suppress Icache correctable parity checking. */
+ uint64_t force_icache_parity : 1; /**< [ 22: 22](R/W) Force icache correctable parity error on next Icache fill. This bit clears itself after
+ the fill operation. */
+ uint64_t disable_icache_probes : 1; /**< [ 23: 23](R/W) Disable Icache probes. */
+ uint64_t wfe_defer : 8; /**< [ 31: 24](R/W) WFE defer timer setting. Time in core-clocks = {| WFE_DEFER, WFE_DEFER\<3:0\>} \<\<
+ WFE_DEFER\<7:4\>. */
+ uint64_t isb_flush : 1; /**< [ 32: 32](R/W) Enable pipeline flush after an ISB. */
+ uint64_t enable_v81 : 1; /**< [ 33: 33](R/W) Enable v8.1 features, modifying the ID registers to show v8.1. */
+ uint64_t disable_wfe : 1; /**< [ 34: 34](R/W) Disable WFE. */
+ uint64_t force_cim_ich_vtr_to1 : 1; /**< [ 35: 35](RAZ) Reserved. */
+ uint64_t disable_cas : 1; /**< [ 36: 36](R/W) Disable the CAS instruction. */
+ uint64_t disable_casp : 1; /**< [ 37: 37](R/W) Disable the CASP instruction. */
+ uint64_t disable_eret_pred : 1; /**< [ 38: 38](R/W) Disable ERET prediction. */
+ uint64_t mrs_msr_hazard : 1; /**< [ 39: 39](R/W) Disable MRS/MSR pipelining, assume hazards. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_ap_cvmctl_el1 bdk_ap_cvmctl_el1_t;
+
+#define BDK_AP_CVMCTL_EL1 BDK_AP_CVMCTL_EL1_FUNC()
+static inline uint64_t BDK_AP_CVMCTL_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVMCTL_EL1_FUNC(void)
+{
+ return 0x3000b000000ll;
+}
+
+#define typedef_BDK_AP_CVMCTL_EL1 bdk_ap_cvmctl_el1_t
+#define bustype_BDK_AP_CVMCTL_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVMCTL_EL1 "AP_CVMCTL_EL1"
+#define busnum_BDK_AP_CVMCTL_EL1 0
+#define arguments_BDK_AP_CVMCTL_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvmmemctl0_el1
+ *
+ * AP Cavium Memory Control 0 Register
+ * This register controls memory features.
+ */
+union bdk_ap_cvmmemctl0_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvmmemctl0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t node : 2; /**< [ 62: 61](RO) Local node ID. */
+ uint64_t stexfailcnt : 3; /**< [ 60: 58](RO) ST exclusive fail count. */
+ uint64_t wfeldex1dis : 1; /**< [ 57: 57](R/W) WFE release behavior for LD-exclusive.
+ 0 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) causes SEV to local core.
+ 1 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) does not cause SEV to local core. */
+ uint64_t stlstallforce : 1; /**< [ 56: 56](R/W) Force ST_release to wait for flushed write-buffer entries to be ACKed (pass 2.0 only).
+ 0 = Store-release instructions mark prior relevant write-buffer entries for flush but do
+ not wait for the ACKs to return.
+ 1 = Store-release instructions mark prior relevant write-buffer entries for flush and wait
+ for all the ACKs to return. */
+ uint64_t dmbstallforce : 1; /**< [ 55: 55](R/W) Force DMB to wait for flushed write-buffer entries to be ACKed (pass 2.0 only).
+ 0 = DMB instructions mark prior relevant write-buffer entries for flush, but do not wait
+ for the ACKs to return.
+ 1 = DMB instructions mark prior relevant write-buffer entries for flush and wait for all
+ the ACKs to return. */
+ uint64_t tlbinopdis : 1; /**< [ 54: 54](R/W) Disable broadcast TLBI optimization (pass 2.0 only).
+
+ Address-based broadcast TLBI instructions that go to remote cores are converted
+ from address-based TLBI instructions to context-based TLBI instructions. The
+ actions on the local core generating the TLBI instruction are still precise.
+
+ 0 = The converted context-based TLBI instructions or original context-based
+ TLBIs to remote cores (without intervening interruptions, such as a DSB) are
+ coalesced into a single context-based TLBI. Converted and original ones do not
+ coalesce.
+
+ 1 = The above-mentioned coalescing is suppressed and converted context-based
+ remote TLBIs still go out as such. */
+ uint64_t tlbiicflush : 1; /**< [ 53: 53](R/W) Some local TLBI instructions cause ICache flush (pass 2.0 only).
+ 0 = Icache flush operation do not happen on the TLBI instructions listed below.
+ 1 = Icache is flushed on the TLBI instructions listed below:
+ * TLBI ALLE2{IS}.
+ * TLBI ALLE3{IS}.
+ * TLBI VAE1{IS}.
+ * TLBI VALE1{IS}.
+ * TLBI VAAE1{IS}.
+ * TLBI VAALE1{IS}.
+ * TLBI VAE2{IS}.
+ * TLBI VALE2{IS}.
+ * TLBI VAE3{IS}.
+ * TLBI VALE3{IS}.
+ * TLBI IPAS2E1{IS}.
+ * TLBI IPAS2LE1{IS}. */
+ uint64_t gsyncto : 5; /**< [ 52: 48](R/W) GlobalSync timeout. (pass 2.0 only.)
+ timeout = 2^[GSYNCTO].
+ 0x0 = disable timeout. */
+ uint64_t utlbfillbypdis : 1; /**< [ 47: 47](R/W) Disable uTLB fill bypass (pass 2.0 only).
+ 0 = On a stage1-only translation, the uTLB is written along with the MTLB.
+ 1 = On a stage1-only translation, the uTLB is not written along with the MTLB causing a
+ uTLB miss replay to complete the uTLB fill. */
+ uint64_t tlbiall : 1; /**< [ 46: 46](R/W) Treat all TLBIs like TLBI ALL for a specific exception level. */
+ uint64_t wbfdsbflushall : 1; /**< [ 45: 45](R/W) Any DSB instruction flushes the write buffer. */
+ uint64_t wbfdmbflushnext : 1; /**< [ 44: 44](R/W) DMB instruction to !NSH flushes next ST to !NSH. uTLB is flushed when this value is
+ changed. */
+ uint64_t stexl2cforce : 1; /**< [ 43: 43](R/W) Send all store-exclusive instructions to L2 cache. uTLB is flushed when this value is
+ changed. */
+ uint64_t ioglobalforce : 1; /**< [ 42: 42](R/W) Reserved. uTLB is flushed when this value is changed.
+ Internal:
+ Force global order for IO references. */
+ uint64_t wcumissforce : 1; /**< [ 41: 41](R/W) Force all walker cache lookups to miss. uTLB is flushed when this value is changed. */
+ uint64_t replayprefdis : 1; /**< [ 40: 40](R/W) Replay PREF disable. uTLB miss PREF instruction behavior (see chapter body).
+ 0 = PREF instructions do attempt a replay for MTLB to uTLB refill.
+ 1 = PREF instructions do not attempt a replay for MTLB to uTLB refill.
+ uTLB is flushed when this value is changed. */
+ uint64_t zval2cdis : 1; /**< [ 39: 39](R/W) ZVA bypass L2C.
+ 0 = DC_ZVA instructions to L2C are STFIL1 (full block store operation allocating in
+ requester L2, fill 0s, self-invalidate L1 cache).
+ 1 = DC_ZVA instructions to L2C are STTIL1 (full block store operation through to DRAM,
+ bypass home and requester L2, fill 0s, self-invalidate L1 cache). */
+ uint64_t ldil2cdis : 1; /**< [ 38: 38](R/W) LDI instruction L2C usage.
+ 0 = LDI instructions to L2C are LDI (don't allocate in L1, allocates L2 at requester).
+ 1 = LDI instructions to L2C are LDT (don't allocate in L2 or L1 at home or requester). */
+ uint64_t dcva47 : 1; /**< [ 37: 37](R/W) If MMU translations are disabled,
+ apply memory attributes to physical addresses where bit\<47\>
+ is zero and device attributes to physical address bit\<47\> is
+ one. */
+ uint64_t stprefdis : 1; /**< [ 36: 36](R/W) ST PREF instructions disable. */
+ uint64_t ldprefdis : 1; /**< [ 35: 35](R/W) LD PREF instructions disable. */
+ uint64_t wfildexdis : 1; /**< [ 34: 34](R/W) WFE release behavior for LD-exclusive.
+ 0 = L2C invalidates to global monitor cause SEV to local core.
+ 1 = L2C invalidates have no effect on global monitor (i.e. lock_register).
+
+ This field should never be set to 1; setting to 1 does not
+ conform to the ARMv8 specification. */
+ uint64_t wfito : 3; /**< [ 33: 31](R/W) Wait-for-interrupt timeout; timeout=2^(8+[WFITO]). */
+ uint64_t rbfshortto : 5; /**< [ 30: 26](R/W) Read buffer short timeout; timeout = 2^[RBFSHORTTO].
+ Must be \>=0x6. The L2C directs the core to use either RBFSHORTTO or RBFTO. The short
+ timeout is used when an CCPI link goes down to expedite error indication. */
+ uint64_t rbfto : 5; /**< [ 25: 21](R/W) Read buffer timeout; timeout = 2^[RBFTO]. Must be \>= 0x6. */
+ uint64_t wbfallbarrier : 1; /**< [ 20: 20](R/W) Write-buffer apply barrier to all ST instructions. */
+ uint64_t wbfnomerge : 1; /**< [ 19: 19](R/W) Write-buffer merge disable. */
+ uint64_t wbftonshena : 1; /**< [ 18: 18](R/W) Write-buffer timeout for NSH entries enable.
+ 0 = Write-buffer time out for NSH entries = 218 cycles.
+ 1 = Write-buffer time out for NSH entries = 2^[WBFTO] (see [WBFTO]). */
+ uint64_t wbftomrgclrena : 1; /**< [ 17: 17](R/W) Write-buffer timeout clear-on-merge enable. */
+ uint64_t wbfto : 5; /**< [ 16: 12](R/W) Write-buffer timeout for non-NSH entries; timeout = 2^WBFTO. */
+ uint64_t wbfthresh : 5; /**< [ 11: 7](R/W) Write-buffer threshold. The write-buffer starts flushing entries to the L2 cache once the
+ number of valid write-buffer entries reaches this threshold value. */
+ uint64_t utlbentriesm1 : 5; /**< [ 6: 2](R/W) Number of uTLB entries - 1. Future allocation is limited to this size (pass 1, pass 2) */
+ uint64_t cclkforce : 1; /**< [ 1: 1](R/W) Force CSR clock enable. When set, force CSR conditional clocking. */
+ uint64_t mclkforce : 1; /**< [ 0: 0](R/W) Force memory clock enable. When set, force memory conditional clocking. */
+#else /* Word 0 - Little Endian */
+ uint64_t mclkforce : 1; /**< [ 0: 0](R/W) Force memory clock enable. When set, force memory conditional clocking. */
+ uint64_t cclkforce : 1; /**< [ 1: 1](R/W) Force CSR clock enable. When set, force CSR conditional clocking. */
+ uint64_t utlbentriesm1 : 5; /**< [ 6: 2](R/W) Number of uTLB entries - 1. Future allocation is limited to this size (pass 1, pass 2) */
+ uint64_t wbfthresh : 5; /**< [ 11: 7](R/W) Write-buffer threshold. The write-buffer starts flushing entries to the L2 cache once the
+ number of valid write-buffer entries reaches this threshold value. */
+ uint64_t wbfto : 5; /**< [ 16: 12](R/W) Write-buffer timeout for non-NSH entries; timeout = 2^WBFTO. */
+ uint64_t wbftomrgclrena : 1; /**< [ 17: 17](R/W) Write-buffer timeout clear-on-merge enable. */
+ uint64_t wbftonshena : 1; /**< [ 18: 18](R/W) Write-buffer timeout for NSH entries enable.
+ 0 = Write-buffer time out for NSH entries = 218 cycles.
+ 1 = Write-buffer time out for NSH entries = 2^[WBFTO] (see [WBFTO]). */
+ uint64_t wbfnomerge : 1; /**< [ 19: 19](R/W) Write-buffer merge disable. */
+ uint64_t wbfallbarrier : 1; /**< [ 20: 20](R/W) Write-buffer apply barrier to all ST instructions. */
+ uint64_t rbfto : 5; /**< [ 25: 21](R/W) Read buffer timeout; timeout = 2^[RBFTO]. Must be \>= 0x6. */
+ uint64_t rbfshortto : 5; /**< [ 30: 26](R/W) Read buffer short timeout; timeout = 2^[RBFSHORTTO].
+ Must be \>=0x6. The L2C directs the core to use either RBFSHORTTO or RBFTO. The short
+ timeout is used when an CCPI link goes down to expedite error indication. */
+ uint64_t wfito : 3; /**< [ 33: 31](R/W) Wait-for-interrupt timeout; timeout=2^(8+[WFITO]). */
+ uint64_t wfildexdis : 1; /**< [ 34: 34](R/W) WFE release behavior for LD-exclusive.
+ 0 = L2C invalidates to global monitor cause SEV to local core.
+ 1 = L2C invalidates have no effect on global monitor (i.e. lock_register).
+
+ This field should never be set to 1; setting to 1 does not
+ conform to the ARMv8 specification. */
+ uint64_t ldprefdis : 1; /**< [ 35: 35](R/W) LD PREF instructions disable. */
+ uint64_t stprefdis : 1; /**< [ 36: 36](R/W) ST PREF instructions disable. */
+ uint64_t dcva47 : 1; /**< [ 37: 37](R/W) If MMU translations are disabled,
+ apply memory attributes to physical addresses where bit\<47\>
+ is zero and device attributes to physical address bit\<47\> is
+ one. */
+ uint64_t ldil2cdis : 1; /**< [ 38: 38](R/W) LDI instruction L2C usage.
+ 0 = LDI instructions to L2C are LDI (don't allocate in L1, allocates L2 at requester).
+ 1 = LDI instructions to L2C are LDT (don't allocate in L2 or L1 at home or requester). */
+ uint64_t zval2cdis : 1; /**< [ 39: 39](R/W) ZVA bypass L2C.
+ 0 = DC_ZVA instructions to L2C are STFIL1 (full block store operation allocating in
+ requester L2, fill 0s, self-invalidate L1 cache).
+ 1 = DC_ZVA instructions to L2C are STTIL1 (full block store operation through to DRAM,
+ bypass home and requester L2, fill 0s, self-invalidate L1 cache). */
+ uint64_t replayprefdis : 1; /**< [ 40: 40](R/W) Replay PREF disable. uTLB miss PREF instruction behavior (see chapter body).
+ 0 = PREF instructions do attempt a replay for MTLB to uTLB refill.
+ 1 = PREF instructions do not attempt a replay for MTLB to uTLB refill.
+ uTLB is flushed when this value is changed. */
+ uint64_t wcumissforce : 1; /**< [ 41: 41](R/W) Force all walker cache lookups to miss. uTLB is flushed when this value is changed. */
+ uint64_t ioglobalforce : 1; /**< [ 42: 42](R/W) Reserved. uTLB is flushed when this value is changed.
+ Internal:
+ Force global order for IO references. */
+ uint64_t stexl2cforce : 1; /**< [ 43: 43](R/W) Send all store-exclusive instructions to L2 cache. uTLB is flushed when this value is
+ changed. */
+ uint64_t wbfdmbflushnext : 1; /**< [ 44: 44](R/W) DMB instruction to !NSH flushes next ST to !NSH. uTLB is flushed when this value is
+ changed. */
+ uint64_t wbfdsbflushall : 1; /**< [ 45: 45](R/W) Any DSB instruction flushes the write buffer. */
+ uint64_t tlbiall : 1; /**< [ 46: 46](R/W) Treat all TLBIs like TLBI ALL for a specific exception level. */
+ uint64_t utlbfillbypdis : 1; /**< [ 47: 47](R/W) Disable uTLB fill bypass (pass 2.0 only).
+ 0 = On a stage1-only translation, the uTLB is written along with the MTLB.
+ 1 = On a stage1-only translation, the uTLB is not written along with the MTLB causing a
+ uTLB miss replay to complete the uTLB fill. */
+ uint64_t gsyncto : 5; /**< [ 52: 48](R/W) GlobalSync timeout. (pass 2.0 only.)
+ timeout = 2^[GSYNCTO].
+ 0x0 = disable timeout. */
+ uint64_t tlbiicflush : 1; /**< [ 53: 53](R/W) Some local TLBI instructions cause ICache flush (pass 2.0 only).
+ 0 = Icache flush operation do not happen on the TLBI instructions listed below.
+ 1 = Icache is flushed on the TLBI instructions listed below:
+ * TLBI ALLE2{IS}.
+ * TLBI ALLE3{IS}.
+ * TLBI VAE1{IS}.
+ * TLBI VALE1{IS}.
+ * TLBI VAAE1{IS}.
+ * TLBI VAALE1{IS}.
+ * TLBI VAE2{IS}.
+ * TLBI VALE2{IS}.
+ * TLBI VAE3{IS}.
+ * TLBI VALE3{IS}.
+ * TLBI IPAS2E1{IS}.
+ * TLBI IPAS2LE1{IS}. */
+ uint64_t tlbinopdis : 1; /**< [ 54: 54](R/W) Disable broadcast TLBI optimization (pass 2.0 only).
+
+ Address-based broadcast TLBI instructions that go to remote cores are converted
+ from address-based TLBI instructions to context-based TLBI instructions. The
+ actions on the local core generating the TLBI instruction are still precise.
+
+ 0 = The converted context-based TLBI instructions or original context-based
+ TLBIs to remote cores (without intervening interruptions, such as a DSB) are
+ coalesced into a single context-based TLBI. Converted and original ones do not
+ coalesce.
+
+ 1 = The above-mentioned coalescing is suppressed and converted context-based
+ remote TLBIs still go out as such. */
+ uint64_t dmbstallforce : 1; /**< [ 55: 55](R/W) Force DMB to wait for flushed write-buffer entries to be ACKed (pass 2.0 only).
+ 0 = DMB instructions mark prior relevant write-buffer entries for flush, but do not wait
+ for the ACKs to return.
+ 1 = DMB instructions mark prior relevant write-buffer entries for flush and wait for all
+ the ACKs to return. */
+ uint64_t stlstallforce : 1; /**< [ 56: 56](R/W) Force ST_release to wait for flushed write-buffer entries to be ACKed (pass 2.0 only).
+ 0 = Store-release instructions mark prior relevant write-buffer entries for flush but do
+ not wait for the ACKs to return.
+ 1 = Store-release instructions mark prior relevant write-buffer entries for flush and wait
+ for all the ACKs to return. */
+ uint64_t wfeldex1dis : 1; /**< [ 57: 57](R/W) WFE release behavior for LD-exclusive.
+ 0 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) causes SEV to local core.
+ 1 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) does not cause SEV to local core. */
+ uint64_t stexfailcnt : 3; /**< [ 60: 58](RO) ST exclusive fail count. */
+ uint64_t node : 2; /**< [ 62: 61](RO) Local node ID. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cvmmemctl0_el1_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t node : 2; /**< [ 62: 61](RO) Local node ID. */
+ uint64_t stexfailcnt : 3; /**< [ 60: 58](RO) ST exclusive fail count. */
+ uint64_t reserved_57 : 1;
+ uint64_t stlstallforce : 1; /**< [ 56: 56](R/W) Force ST_release to wait for flushed write-buffer entries to be ACKed (pass 2.0 only).
+ 0 = Store-release instructions mark prior relevant write-buffer entries for flush but do
+ not wait for the ACKs to return.
+ 1 = Store-release instructions mark prior relevant write-buffer entries for flush and wait
+ for all the ACKs to return. */
+ uint64_t dmbstallforce : 1; /**< [ 55: 55](R/W) Force DMB to wait for flushed write-buffer entries to be ACKed (pass 2.0 only).
+ 0 = DMB instructions mark prior relevant write-buffer entries for flush, but do not wait
+ for the ACKs to return.
+ 1 = DMB instructions mark prior relevant write-buffer entries for flush and wait for all
+ the ACKs to return. */
+ uint64_t tlbinopdis : 1; /**< [ 54: 54](R/W) Disable broadcast TLBI optimization (pass 2.0 only).
+
+ Address-based broadcast TLBI instructions that go to remote cores are converted
+ from address-based TLBI instructions to context-based TLBI instructions. The
+ actions on the local core generating the TLBI instruction are still precise.
+
+ 0 = The converted context-based TLBI instructions or original context-based
+ TLBIs to remote cores (without intervening interruptions, such as a DSB) are
+ coalesced into a single context-based TLBI. Converted and original ones do not
+ coalesce.
+
+ 1 = The above-mentioned coalescing is suppressed and converted context-based
+ remote TLBIs still go out as such. */
+ uint64_t tlbiicflush : 1; /**< [ 53: 53](R/W) Some local TLBI instructions cause ICache flush (pass 2.0 only).
+ 0 = Icache flush operation do not happen on the TLBI instructions listed below.
+ 1 = Icache is flushed on the TLBI instructions listed below:
+ * TLBI ALLE2{IS}.
+ * TLBI ALLE3{IS}.
+ * TLBI VAE1{IS}.
+ * TLBI VALE1{IS}.
+ * TLBI VAAE1{IS}.
+ * TLBI VAALE1{IS}.
+ * TLBI VAE2{IS}.
+ * TLBI VALE2{IS}.
+ * TLBI VAE3{IS}.
+ * TLBI VALE3{IS}.
+ * TLBI IPAS2E1{IS}.
+ * TLBI IPAS2LE1{IS}. */
+ uint64_t gsyncto : 5; /**< [ 52: 48](R/W) GlobalSync timeout. (pass 2.0 only.)
+ timeout = 2^[GSYNCTO].
+ 0x0 = disable timeout. */
+ uint64_t utlbfillbypdis : 1; /**< [ 47: 47](R/W) Disable uTLB fill bypass (pass 2.0 only).
+ 0 = On a stage1-only translation, the uTLB is written along with the MTLB.
+ 1 = On a stage1-only translation, the uTLB is not written along with the MTLB causing a
+ uTLB miss replay to complete the uTLB fill. */
+ uint64_t tlbiall : 1; /**< [ 46: 46](R/W) Treat all TLBIs like TLBI ALL for a specific exception level. */
+ uint64_t wbfdsbflushall : 1; /**< [ 45: 45](R/W) Any DSB instruction flushes the write buffer. */
+ uint64_t wbfdmbflushnext : 1; /**< [ 44: 44](R/W) DMB instruction to !NSH flushes next ST to !NSH. uTLB is flushed when this value is
+ changed. */
+ uint64_t stexl2cforce : 1; /**< [ 43: 43](R/W) Send all store-exclusive instructions to L2 cache. uTLB is flushed when this value is
+ changed. */
+ uint64_t ioglobalforce : 1; /**< [ 42: 42](R/W) Reserved. uTLB is flushed when this value is changed.
+ Internal:
+ Force global order for IO references. */
+ uint64_t wcumissforce : 1; /**< [ 41: 41](R/W) Force all walker cache lookups to miss. uTLB is flushed when this value is changed. */
+ uint64_t replayprefdis : 1; /**< [ 40: 40](R/W) Replay PREF disable. uTLB miss PREF instruction behavior (see chapter body).
+ 0 = PREF instructions do attempt a replay for MTLB to uTLB refill.
+ 1 = PREF instructions do not attempt a replay for MTLB to uTLB refill.
+ uTLB is flushed when this value is changed. */
+ uint64_t zval2cdis : 1; /**< [ 39: 39](R/W) ZVA bypass L2C.
+ 0 = DC_ZVA instructions to L2C are STFIL1 (full block store operation allocating in
+ requester L2, fill 0s, self-invalidate L1 cache).
+ 1 = DC_ZVA instructions to L2C are STTIL1 (full block store operation through to DRAM,
+ bypass home and requester L2, fill 0s, self-invalidate L1 cache). */
+ uint64_t ldil2cdis : 1; /**< [ 38: 38](R/W) LDI instruction L2C usage.
+ 0 = LDI instructions to L2C are LDI (don't allocate in L1, allocates L2 at requester).
+ 1 = LDI instructions to L2C are LDT (don't allocate in L2 or L1 at home or requester). */
+ uint64_t dcva47 : 1; /**< [ 37: 37](R/W) If MMU translations are disabled,
+ apply memory attributes to physical addresses where bit\<47\>
+ is zero and device attributes to physical address bit\<47\> is
+ one. */
+ uint64_t stprefdis : 1; /**< [ 36: 36](R/W) ST PREF instructions disable. */
+ uint64_t ldprefdis : 1; /**< [ 35: 35](R/W) LD PREF instructions disable. */
+ uint64_t wfildexdis : 1; /**< [ 34: 34](R/W) WFE release behavior for LD-exclusive.
+ 0 = L2C invalidates to global monitor cause SEV to local core.
+ 1 = L2C invalidates have no effect on global monitor (i.e. lock_register).
+
+ This field should never be set to 1; setting to 1 does not
+ conform to the ARMv8 specification. */
+ uint64_t wfito : 3; /**< [ 33: 31](R/W) Wait-for-interrupt timeout; timeout=2^(8+[WFITO]). */
+ uint64_t rbfshortto : 5; /**< [ 30: 26](R/W) Read buffer short timeout; timeout = 2^[RBFSHORTTO].
+ Must be \>=0x6. The L2C directs the core to use either RBFSHORTTO or RBFTO. The short
+ timeout is used when an CCPI link goes down to expedite error indication. */
+ uint64_t rbfto : 5; /**< [ 25: 21](R/W) Read buffer timeout; timeout = 2^[RBFTO]. Must be \>= 0x6. */
+ uint64_t wbfallbarrier : 1; /**< [ 20: 20](R/W) Write-buffer apply barrier to all ST instructions. */
+ uint64_t wbfnomerge : 1; /**< [ 19: 19](R/W) Write-buffer merge disable. */
+ uint64_t wbftonshena : 1; /**< [ 18: 18](R/W) Write-buffer timeout for NSH entries enable.
+ 0 = Write-buffer time out for NSH entries = 218 cycles.
+ 1 = Write-buffer time out for NSH entries = 2^[WBFTO] (see [WBFTO]). */
+ uint64_t wbftomrgclrena : 1; /**< [ 17: 17](R/W) Write-buffer timeout clear-on-merge enable. */
+ uint64_t wbfto : 5; /**< [ 16: 12](R/W) Write-buffer timeout for non-NSH entries; timeout = 2^WBFTO. */
+ uint64_t wbfthresh : 5; /**< [ 11: 7](R/W) Write-buffer threshold. The write-buffer starts flushing entries to the L2 cache once the
+ number of valid write-buffer entries reaches this threshold value. */
+ uint64_t utlbentriesm1 : 5; /**< [ 6: 2](R/W) Number of uTLB entries - 1. Future allocation is limited to this size (pass 1, pass 2) */
+ uint64_t cclkforce : 1; /**< [ 1: 1](R/W) Force CSR clock enable. When set, force CSR conditional clocking. */
+ uint64_t mclkforce : 1; /**< [ 0: 0](R/W) Force memory clock enable. When set, force memory conditional clocking. */
+#else /* Word 0 - Little Endian */
+ uint64_t mclkforce : 1; /**< [ 0: 0](R/W) Force memory clock enable. When set, force memory conditional clocking. */
+ uint64_t cclkforce : 1; /**< [ 1: 1](R/W) Force CSR clock enable. When set, force CSR conditional clocking. */
+ uint64_t utlbentriesm1 : 5; /**< [ 6: 2](R/W) Number of uTLB entries - 1. Future allocation is limited to this size (pass 1, pass 2) */
+ uint64_t wbfthresh : 5; /**< [ 11: 7](R/W) Write-buffer threshold. The write-buffer starts flushing entries to the L2 cache once the
+ number of valid write-buffer entries reaches this threshold value. */
+ uint64_t wbfto : 5; /**< [ 16: 12](R/W) Write-buffer timeout for non-NSH entries; timeout = 2^WBFTO. */
+ uint64_t wbftomrgclrena : 1; /**< [ 17: 17](R/W) Write-buffer timeout clear-on-merge enable. */
+ uint64_t wbftonshena : 1; /**< [ 18: 18](R/W) Write-buffer timeout for NSH entries enable.
+ 0 = Write-buffer time out for NSH entries = 218 cycles.
+ 1 = Write-buffer time out for NSH entries = 2^[WBFTO] (see [WBFTO]). */
+ uint64_t wbfnomerge : 1; /**< [ 19: 19](R/W) Write-buffer merge disable. */
+ uint64_t wbfallbarrier : 1; /**< [ 20: 20](R/W) Write-buffer apply barrier to all ST instructions. */
+ uint64_t rbfto : 5; /**< [ 25: 21](R/W) Read buffer timeout; timeout = 2^[RBFTO]. Must be \>= 0x6. */
+ uint64_t rbfshortto : 5; /**< [ 30: 26](R/W) Read buffer short timeout; timeout = 2^[RBFSHORTTO].
+ Must be \>=0x6. The L2C directs the core to use either RBFSHORTTO or RBFTO. The short
+ timeout is used when an CCPI link goes down to expedite error indication. */
+ uint64_t wfito : 3; /**< [ 33: 31](R/W) Wait-for-interrupt timeout; timeout=2^(8+[WFITO]). */
+ uint64_t wfildexdis : 1; /**< [ 34: 34](R/W) WFE release behavior for LD-exclusive.
+ 0 = L2C invalidates to global monitor cause SEV to local core.
+ 1 = L2C invalidates have no effect on global monitor (i.e. lock_register).
+
+ This field should never be set to 1; setting to 1 does not
+ conform to the ARMv8 specification. */
+ uint64_t ldprefdis : 1; /**< [ 35: 35](R/W) LD PREF instructions disable. */
+ uint64_t stprefdis : 1; /**< [ 36: 36](R/W) ST PREF instructions disable. */
+ uint64_t dcva47 : 1; /**< [ 37: 37](R/W) If MMU translations are disabled,
+ apply memory attributes to physical addresses where bit\<47\>
+ is zero and device attributes to physical address bit\<47\> is
+ one. */
+ uint64_t ldil2cdis : 1; /**< [ 38: 38](R/W) LDI instruction L2C usage.
+ 0 = LDI instructions to L2C are LDI (don't allocate in L1, allocates L2 at requester).
+ 1 = LDI instructions to L2C are LDT (don't allocate in L2 or L1 at home or requester). */
+ uint64_t zval2cdis : 1; /**< [ 39: 39](R/W) ZVA bypass L2C.
+ 0 = DC_ZVA instructions to L2C are STFIL1 (full block store operation allocating in
+ requester L2, fill 0s, self-invalidate L1 cache).
+ 1 = DC_ZVA instructions to L2C are STTIL1 (full block store operation through to DRAM,
+ bypass home and requester L2, fill 0s, self-invalidate L1 cache). */
+ uint64_t replayprefdis : 1; /**< [ 40: 40](R/W) Replay PREF disable. uTLB miss PREF instruction behavior (see chapter body).
+ 0 = PREF instructions do attempt a replay for MTLB to uTLB refill.
+ 1 = PREF instructions do not attempt a replay for MTLB to uTLB refill.
+ uTLB is flushed when this value is changed. */
+ uint64_t wcumissforce : 1; /**< [ 41: 41](R/W) Force all walker cache lookups to miss. uTLB is flushed when this value is changed. */
+ uint64_t ioglobalforce : 1; /**< [ 42: 42](R/W) Reserved. uTLB is flushed when this value is changed.
+ Internal:
+ Force global order for IO references. */
+ uint64_t stexl2cforce : 1; /**< [ 43: 43](R/W) Send all store-exclusive instructions to L2 cache. uTLB is flushed when this value is
+ changed. */
+ uint64_t wbfdmbflushnext : 1; /**< [ 44: 44](R/W) DMB instruction to !NSH flushes next ST to !NSH. uTLB is flushed when this value is
+ changed. */
+ uint64_t wbfdsbflushall : 1; /**< [ 45: 45](R/W) Any DSB instruction flushes the write buffer. */
+ uint64_t tlbiall : 1; /**< [ 46: 46](R/W) Treat all TLBIs like TLBI ALL for a specific exception level. */
+ uint64_t utlbfillbypdis : 1; /**< [ 47: 47](R/W) Disable uTLB fill bypass (pass 2.0 only).
+ 0 = On a stage1-only translation, the uTLB is written along with the MTLB.
+ 1 = On a stage1-only translation, the uTLB is not written along with the MTLB causing a
+ uTLB miss replay to complete the uTLB fill. */
+ uint64_t gsyncto : 5; /**< [ 52: 48](R/W) GlobalSync timeout. (pass 2.0 only.)
+ timeout = 2^[GSYNCTO].
+ 0x0 = disable timeout. */
+ uint64_t tlbiicflush : 1; /**< [ 53: 53](R/W) Some local TLBI instructions cause ICache flush (pass 2.0 only).
+ 0 = Icache flush operation do not happen on the TLBI instructions listed below.
+ 1 = Icache is flushed on the TLBI instructions listed below:
+ * TLBI ALLE2{IS}.
+ * TLBI ALLE3{IS}.
+ * TLBI VAE1{IS}.
+ * TLBI VALE1{IS}.
+ * TLBI VAAE1{IS}.
+ * TLBI VAALE1{IS}.
+ * TLBI VAE2{IS}.
+ * TLBI VALE2{IS}.
+ * TLBI VAE3{IS}.
+ * TLBI VALE3{IS}.
+ * TLBI IPAS2E1{IS}.
+ * TLBI IPAS2LE1{IS}. */
+ uint64_t tlbinopdis : 1; /**< [ 54: 54](R/W) Disable broadcast TLBI optimization (pass 2.0 only).
+
+ Address-based broadcast TLBI instructions that go to remote cores are converted
+ from address-based TLBI instructions to context-based TLBI instructions. The
+ actions on the local core generating the TLBI instruction are still precise.
+
+ 0 = The converted context-based TLBI instructions or original context-based
+ TLBIs to remote cores (without intervening interruptions, such as a DSB) are
+ coalesced into a single context-based TLBI. Converted and original ones do not
+ coalesce.
+
+ 1 = The above-mentioned coalescing is suppressed and converted context-based
+ remote TLBIs still go out as such. */
+ uint64_t dmbstallforce : 1; /**< [ 55: 55](R/W) Force DMB to wait for flushed write-buffer entries to be ACKed (pass 2.0 only).
+ 0 = DMB instructions mark prior relevant write-buffer entries for flush, but do not wait
+ for the ACKs to return.
+ 1 = DMB instructions mark prior relevant write-buffer entries for flush and wait for all
+ the ACKs to return. */
+ uint64_t stlstallforce : 1; /**< [ 56: 56](R/W) Force ST_release to wait for flushed write-buffer entries to be ACKed (pass 2.0 only).
+ 0 = Store-release instructions mark prior relevant write-buffer entries for flush but do
+ not wait for the ACKs to return.
+ 1 = Store-release instructions mark prior relevant write-buffer entries for flush and wait
+ for all the ACKs to return. */
+ uint64_t reserved_57 : 1;
+ uint64_t stexfailcnt : 3; /**< [ 60: 58](RO) ST exclusive fail count. */
+ uint64_t node : 2; /**< [ 62: 61](RO) Local node ID. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_ap_cvmmemctl0_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t node : 2; /**< [ 62: 61](RO) Local node ID. */
+ uint64_t stexfailcnt : 3; /**< [ 60: 58](RO) ST exclusive fail count. */
+ uint64_t wfeldex1dis : 1; /**< [ 57: 57](R/W) WFE release behavior for LD-exclusive.
+ 0 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) causes SEV to local core.
+ 1 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) does not cause SEV to local core. */
+ uint64_t stlstallforce : 1; /**< [ 56: 56](R/W) Force ST_release to wait for flushed write-buffer entries to be ACKed.
+ 0 = Store-release instructions mark prior relevant write-buffer entries for flush but do
+ not wait for the ACKs to return.
+ 1 = Store-release instructions mark prior relevant write-buffer entries for flush and wait
+ for all the ACKs to return. */
+ uint64_t dmbstallforce : 1; /**< [ 55: 55](R/W) Force DMB to wait for flushed write-buffer entries to be ACKed.
+ 0 = DMB instructions mark prior relevant write-buffer entries for flush, but do not wait
+ for the ACKs to return.
+ 1 = DMB instructions mark prior relevant write-buffer entries for flush and wait for all
+ the ACKs to return. */
+ uint64_t tlbinopdis : 1; /**< [ 54: 54](R/W) Disable broadcast TLBI optimization.
+
+ Address-based broadcast TLBI instructions that go to remote cores are converted
+ from address-based TLBI instructions to context-based TLBI instructions. The
+ actions on the local core generating the TLBI instruction are still precise.
+
+ 0 = The converted context-based TLBI instructions or original context-based
+ TLBIs to remote cores (without intervening interruptions, such as a DSB) are
+ coalesced into a single context-based TLBI. Converted and original ones do not
+ coalesce.
+
+ 1 = The above-mentioned coalescing is suppressed and converted context-based
+ remote TLBIs still go out as such. */
+ uint64_t tlbiicflush : 1; /**< [ 53: 53](R/W) Some local TLBI instructions cause ICache flush.
+ 0 = Icache flush operation do not happen on the TLBI instructions listed below.
+ 1 = Icache is flushed on the TLBI instructions listed below:
+ * TLBI ALLE2{IS}.
+ * TLBI ALLE3{IS}.
+ * TLBI VAE1{IS}.
+ * TLBI VALE1{IS}.
+ * TLBI VAAE1{IS}.
+ * TLBI VAALE1{IS}.
+ * TLBI VAE2{IS}.
+ * TLBI VALE2{IS}.
+ * TLBI VAE3{IS}.
+ * TLBI VALE3{IS}.
+ * TLBI IPAS2E1{IS}.
+ * TLBI IPAS2LE1{IS}. */
+ uint64_t gsyncto : 5; /**< [ 52: 48](R/W) GlobalSync timeout.
+ timeout = 2^[GSYNCTO].
+ 0x0 = disable timeout. */
+ uint64_t utlbfillbypdis : 1; /**< [ 47: 47](R/W) Disable uTLB fill bypass.
+ 0 = On a stage1-only translation, the uTLB is written along with the MTLB.
+ 1 = On a stage1-only translation, the uTLB is not written along with the MTLB causing a
+ uTLB miss replay to complete the uTLB fill. */
+ uint64_t tlbiall : 1; /**< [ 46: 46](R/W) Treat all TLBIs like TLBI ALL for a specific exception level. */
+ uint64_t wbfdsbflushall : 1; /**< [ 45: 45](R/W) Any DSB instruction flushes the write buffer. */
+ uint64_t wbfdmbflushnext : 1; /**< [ 44: 44](R/W) DMB instruction to !NSH flushes next ST to !NSH. uTLB is not flushed with this value is
+ changed. */
+ uint64_t stexl2cforce : 1; /**< [ 43: 43](R/W) Send all store-exclusive instructions to L2 cache. uTLB is not flushed with this value is
+ changed. */
+ uint64_t ioglobalforce : 1; /**< [ 42: 42](R/W) Reserved.
+ Internal:
+ Force global order for IO references. */
+ uint64_t wcumissforce : 1; /**< [ 41: 41](R/W) Force all walker cache lookups to miss. uTLB is not flushed with this value is changed. */
+ uint64_t replayprefdis : 1; /**< [ 40: 40](R/W) Replay PREF disable. uTLB miss PREF instruction behavior (see chapter body).
+ 0 = PREF instructions do attempt a replay for MTLB to uTLB refill.
+ 1 = PREF instructions do not attempt a replay for MTLB to uTLB refill.
+
+ uTLB is not flushed with this value is changed. */
+ uint64_t zval2cdis : 1; /**< [ 39: 39](R/W) ZVA bypass L2C.
+ 0 = DC_ZVA instructions to L2C are STFIL1 (full block store operation allocating in
+ requester L2, fill 0s, self-invalidate L1 cache).
+ 1 = DC_ZVA instructions to L2C are STTIL1 (full block store operation through to DRAM,
+ bypass home and requester L2, fill 0s, self-invalidate L1 cache). */
+ uint64_t ldil2cdis : 1; /**< [ 38: 38](R/W) LDI instruction L2C usage.
+ 0 = LDI instructions to L2C are LDI (don't allocate in L1, allocates L2 at requester).
+ 1 = LDI instructions to L2C are LDT (don't allocate in L2 or L1 at home or requester). */
+ uint64_t dcva47 : 1; /**< [ 37: 37](R/W) If MMU translations are disabled,
+ apply memory attributes to physical addresses where bit\<47\>
+ is zero and device attributes to physical address bit\<47\> is
+ one. */
+ uint64_t stprefdis : 1; /**< [ 36: 36](R/W) ST PREF instructions disable. */
+ uint64_t ldprefdis : 1; /**< [ 35: 35](R/W) LD PREF instructions disable. */
+ uint64_t wfildexdis : 1; /**< [ 34: 34](R/W) WFE release behavior for LD-exclusive.
+ 0 = L2C invalidates to global monitor cause SEV to local core.
+ 1 = L2C invalidates have no effect on global monitor (i.e. lock_register).
+
+ This field should never be set to 1; setting to 1 does not
+ conform to the ARMv8 specification. */
+ uint64_t wfito : 3; /**< [ 33: 31](R/W) Wait-for-interrupt timeout; timeout=2^(8+[WFITO]). */
+ uint64_t rbfshortto : 5; /**< [ 30: 26](R/W) Read buffer short timeout; timeout = 2^[RBFSHORTTO+6].
+ The L2C directs the core to use either RBFSHORTTO or RBFTO. The short
+ timeout is used when an CCPI link goes down to expedite error indication. */
+ uint64_t rbfto : 5; /**< [ 25: 21](R/W) Read buffer timeout; timeout = 2^[RBFTO+6]. */
+ uint64_t wbfallbarrier : 1; /**< [ 20: 20](R/W) Write-buffer apply barrier to all ST instructions. */
+ uint64_t wbfnomerge : 1; /**< [ 19: 19](R/W) Write-buffer merge disable. */
+ uint64_t wbftonshena : 1; /**< [ 18: 18](R/W) Write-buffer timeout for NSH entries enable.
+ 0 = Write-buffer time out for NSH entries = 218 cycles.
+ 1 = Write-buffer time out for NSH entries = 2^[WBFTO] (see [WBFTO]). */
+ uint64_t wbftomrgclrena : 1; /**< [ 17: 17](R/W) Write-buffer timeout clear-on-merge enable. */
+ uint64_t wbfto : 5; /**< [ 16: 12](R/W) Write-buffer timeout for non-NSH entries; timeout = 2^WBFTO. */
+ uint64_t wbfthresh : 5; /**< [ 11: 7](R/W) Write-buffer threshold. The write-buffer starts flushing entries to the L2 cache once the
+ number of valid write-buffer entries reaches this threshold value. */
+ uint64_t utlbentriesm1 : 5; /**< [ 6: 2](R/W) Number of uTLB entries minus one. uTLB is flushed when this value is changed. */
+ uint64_t cclkforce : 1; /**< [ 1: 1](R/W) Force CSR clock enable. When set, force CSR conditional clocking. */
+ uint64_t mclkforce : 1; /**< [ 0: 0](R/W) Force memory clock enable. When set, force memory conditional clocking. */
+#else /* Word 0 - Little Endian */
+ uint64_t mclkforce : 1; /**< [ 0: 0](R/W) Force memory clock enable. When set, force memory conditional clocking. */
+ uint64_t cclkforce : 1; /**< [ 1: 1](R/W) Force CSR clock enable. When set, force CSR conditional clocking. */
+ uint64_t utlbentriesm1 : 5; /**< [ 6: 2](R/W) Number of uTLB entries minus one. uTLB is flushed when this value is changed. */
+ uint64_t wbfthresh : 5; /**< [ 11: 7](R/W) Write-buffer threshold. The write-buffer starts flushing entries to the L2 cache once the
+ number of valid write-buffer entries reaches this threshold value. */
+ uint64_t wbfto : 5; /**< [ 16: 12](R/W) Write-buffer timeout for non-NSH entries; timeout = 2^WBFTO. */
+ uint64_t wbftomrgclrena : 1; /**< [ 17: 17](R/W) Write-buffer timeout clear-on-merge enable. */
+ uint64_t wbftonshena : 1; /**< [ 18: 18](R/W) Write-buffer timeout for NSH entries enable.
+ 0 = Write-buffer time out for NSH entries = 218 cycles.
+ 1 = Write-buffer time out for NSH entries = 2^[WBFTO] (see [WBFTO]). */
+ uint64_t wbfnomerge : 1; /**< [ 19: 19](R/W) Write-buffer merge disable. */
+ uint64_t wbfallbarrier : 1; /**< [ 20: 20](R/W) Write-buffer apply barrier to all ST instructions. */
+ uint64_t rbfto : 5; /**< [ 25: 21](R/W) Read buffer timeout; timeout = 2^[RBFTO+6]. */
+ uint64_t rbfshortto : 5; /**< [ 30: 26](R/W) Read buffer short timeout; timeout = 2^[RBFSHORTTO+6].
+ The L2C directs the core to use either RBFSHORTTO or RBFTO. The short
+ timeout is used when an CCPI link goes down to expedite error indication. */
+ uint64_t wfito : 3; /**< [ 33: 31](R/W) Wait-for-interrupt timeout; timeout=2^(8+[WFITO]). */
+ uint64_t wfildexdis : 1; /**< [ 34: 34](R/W) WFE release behavior for LD-exclusive.
+ 0 = L2C invalidates to global monitor cause SEV to local core.
+ 1 = L2C invalidates have no effect on global monitor (i.e. lock_register).
+
+ This field should never be set to 1; setting to 1 does not
+ conform to the ARMv8 specification. */
+ uint64_t ldprefdis : 1; /**< [ 35: 35](R/W) LD PREF instructions disable. */
+ uint64_t stprefdis : 1; /**< [ 36: 36](R/W) ST PREF instructions disable. */
+ uint64_t dcva47 : 1; /**< [ 37: 37](R/W) If MMU translations are disabled,
+ apply memory attributes to physical addresses where bit\<47\>
+ is zero and device attributes to physical address bit\<47\> is
+ one. */
+ uint64_t ldil2cdis : 1; /**< [ 38: 38](R/W) LDI instruction L2C usage.
+ 0 = LDI instructions to L2C are LDI (don't allocate in L1, allocates L2 at requester).
+ 1 = LDI instructions to L2C are LDT (don't allocate in L2 or L1 at home or requester). */
+ uint64_t zval2cdis : 1; /**< [ 39: 39](R/W) ZVA bypass L2C.
+ 0 = DC_ZVA instructions to L2C are STFIL1 (full block store operation allocating in
+ requester L2, fill 0s, self-invalidate L1 cache).
+ 1 = DC_ZVA instructions to L2C are STTIL1 (full block store operation through to DRAM,
+ bypass home and requester L2, fill 0s, self-invalidate L1 cache). */
+ uint64_t replayprefdis : 1; /**< [ 40: 40](R/W) Replay PREF disable. uTLB miss PREF instruction behavior (see chapter body).
+ 0 = PREF instructions do attempt a replay for MTLB to uTLB refill.
+ 1 = PREF instructions do not attempt a replay for MTLB to uTLB refill.
+
+ uTLB is not flushed with this value is changed. */
+ uint64_t wcumissforce : 1; /**< [ 41: 41](R/W) Force all walker cache lookups to miss. uTLB is not flushed with this value is changed. */
+ uint64_t ioglobalforce : 1; /**< [ 42: 42](R/W) Reserved.
+ Internal:
+ Force global order for IO references. */
+ uint64_t stexl2cforce : 1; /**< [ 43: 43](R/W) Send all store-exclusive instructions to L2 cache. uTLB is not flushed with this value is
+ changed. */
+ uint64_t wbfdmbflushnext : 1; /**< [ 44: 44](R/W) DMB instruction to !NSH flushes next ST to !NSH. uTLB is not flushed with this value is
+ changed. */
+ uint64_t wbfdsbflushall : 1; /**< [ 45: 45](R/W) Any DSB instruction flushes the write buffer. */
+ uint64_t tlbiall : 1; /**< [ 46: 46](R/W) Treat all TLBIs like TLBI ALL for a specific exception level. */
+ uint64_t utlbfillbypdis : 1; /**< [ 47: 47](R/W) Disable uTLB fill bypass.
+ 0 = On a stage1-only translation, the uTLB is written along with the MTLB.
+ 1 = On a stage1-only translation, the uTLB is not written along with the MTLB causing a
+ uTLB miss replay to complete the uTLB fill. */
+ uint64_t gsyncto : 5; /**< [ 52: 48](R/W) GlobalSync timeout.
+ timeout = 2^[GSYNCTO].
+ 0x0 = disable timeout. */
+ uint64_t tlbiicflush : 1; /**< [ 53: 53](R/W) Some local TLBI instructions cause ICache flush.
+ 0 = Icache flush operation do not happen on the TLBI instructions listed below.
+ 1 = Icache is flushed on the TLBI instructions listed below:
+ * TLBI ALLE2{IS}.
+ * TLBI ALLE3{IS}.
+ * TLBI VAE1{IS}.
+ * TLBI VALE1{IS}.
+ * TLBI VAAE1{IS}.
+ * TLBI VAALE1{IS}.
+ * TLBI VAE2{IS}.
+ * TLBI VALE2{IS}.
+ * TLBI VAE3{IS}.
+ * TLBI VALE3{IS}.
+ * TLBI IPAS2E1{IS}.
+ * TLBI IPAS2LE1{IS}. */
+ uint64_t tlbinopdis : 1; /**< [ 54: 54](R/W) Disable broadcast TLBI optimization.
+
+ Address-based broadcast TLBI instructions that go to remote cores are converted
+ from address-based TLBI instructions to context-based TLBI instructions. The
+ actions on the local core generating the TLBI instruction are still precise.
+
+ 0 = The converted context-based TLBI instructions or original context-based
+ TLBIs to remote cores (without intervening interruptions, such as a DSB) are
+ coalesced into a single context-based TLBI. Converted and original ones do not
+ coalesce.
+
+ 1 = The above-mentioned coalescing is suppressed and converted context-based
+ remote TLBIs still go out as such. */
+ uint64_t dmbstallforce : 1; /**< [ 55: 55](R/W) Force DMB to wait for flushed write-buffer entries to be ACKed.
+ 0 = DMB instructions mark prior relevant write-buffer entries for flush, but do not wait
+ for the ACKs to return.
+ 1 = DMB instructions mark prior relevant write-buffer entries for flush and wait for all
+ the ACKs to return. */
+ uint64_t stlstallforce : 1; /**< [ 56: 56](R/W) Force ST_release to wait for flushed write-buffer entries to be ACKed.
+ 0 = Store-release instructions mark prior relevant write-buffer entries for flush but do
+ not wait for the ACKs to return.
+ 1 = Store-release instructions mark prior relevant write-buffer entries for flush and wait
+ for all the ACKs to return. */
+ uint64_t wfeldex1dis : 1; /**< [ 57: 57](R/W) WFE release behavior for LD-exclusive.
+ 0 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) causes SEV to local core.
+ 1 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) does not cause SEV to local core. */
+ uint64_t stexfailcnt : 3; /**< [ 60: 58](RO) ST exclusive fail count. */
+ uint64_t node : 2; /**< [ 62: 61](RO) Local node ID. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_ap_cvmmemctl0_el1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t node : 2; /**< [ 62: 61](RO) Local node ID. */
+ uint64_t stexfailcnt : 3; /**< [ 60: 58](RO) ST exclusive fail count. */
+ uint64_t wfeldex1dis : 1; /**< [ 57: 57](R/W) WFE release behavior for LD-exclusive.
+ 0 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) causes SEV to local core.
+ 1 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) does not cause SEV to local core. */
+ uint64_t stlstallforce : 1; /**< [ 56: 56](R/W) Force ST_release to wait for flushed write-buffer entries to be ACKed.
+ 0 = Store-release instructions mark prior relevant write-buffer entries for flush but do
+ not wait for the ACKs to return.
+ 1 = Store-release instructions mark prior relevant write-buffer entries for flush and wait
+ for all the ACKs to return. */
+ uint64_t dmbstallforce : 1; /**< [ 55: 55](R/W) Force DMB to wait for flushed write-buffer entries to be ACKed.
+ 0 = DMB instructions mark prior relevant write-buffer entries for flush, but do not wait
+ for the ACKs to return.
+ 1 = DMB instructions mark prior relevant write-buffer entries for flush and wait for all
+ the ACKs to return. */
+ uint64_t tlbinopdis : 1; /**< [ 54: 54](R/W) Disable broadcast TLBI optimization.
+
+ Address-based broadcast TLBI instructions that go to remote cores are converted
+ from address-based TLBI instructions to context-based TLBI instructions. The
+ actions on the local core generating the TLBI instruction are still precise.
+
+ 0 = The converted context-based TLBI instructions or original context-based
+ TLBIs to remote cores (without intervening interruptions, such as a DSB) are
+ coalesced into a single context-based TLBI. Converted and original ones do not
+ coalesce.
+
+ 1 = The above-mentioned coalescing is suppressed and converted context-based
+ remote TLBIs still go out as such. */
+ uint64_t tlbiicflush : 1; /**< [ 53: 53](R/W) Some local TLBI instructions cause ICache flush.
+ 0 = Icache flush operation do not happen on the TLBI instructions listed below.
+ 1 = Icache is flushed on the TLBI instructions listed below:
+ * TLBI ALLE2{IS}.
+ * TLBI ALLE3{IS}.
+ * TLBI VAE1{IS}.
+ * TLBI VALE1{IS}.
+ * TLBI VAAE1{IS}.
+ * TLBI VAALE1{IS}.
+ * TLBI VAE2{IS}.
+ * TLBI VALE2{IS}.
+ * TLBI VAE3{IS}.
+ * TLBI VALE3{IS}.
+ * TLBI IPAS2E1{IS}.
+ * TLBI IPAS2LE1{IS}. */
+ uint64_t gsyncto : 5; /**< [ 52: 48](R/W) GlobalSync timeout.
+ timeout = 2^[GSYNCTO].
+ 0x0 = disable timeout. */
+ uint64_t utlbfillbypdis : 1; /**< [ 47: 47](R/W) Disable uTLB fill bypass.
+ 0 = On a stage1-only translation, the uTLB is written along with the MTLB.
+ 1 = On a stage1-only translation, the uTLB is not written along with the MTLB causing a
+ uTLB miss replay to complete the uTLB fill. */
+ uint64_t tlbiall : 1; /**< [ 46: 46](R/W) Treat all TLBIs like TLBI ALL for a specific exception level. */
+ uint64_t wbfdsbflushall : 1; /**< [ 45: 45](R/W) Any DSB instruction flushes the write buffer. */
+ uint64_t wbfdmbflushnext : 1; /**< [ 44: 44](R/W) DMB instruction to !NSH flushes next ST to !NSH. uTLB is not flushed with this value is
+ changed. */
+ uint64_t stexl2cforce : 1; /**< [ 43: 43](R/W) Send all store-exclusive instructions to L2 cache. uTLB is not flushed with this value is
+ changed. */
+ uint64_t ioglobalforce : 1; /**< [ 42: 42](R/W) Reserved.
+ Internal:
+ Force global order for IO references. */
+ uint64_t wcumissforce : 1; /**< [ 41: 41](R/W) Force all walker cache lookups to miss. uTLB is not flushed with this value is changed. */
+ uint64_t replayprefdis : 1; /**< [ 40: 40](R/W) Replay PREF disable. uTLB miss PREF instruction behavior (see chapter body).
+ 0 = PREF instructions do attempt a replay for MTLB to uTLB refill.
+ 1 = PREF instructions do not attempt a replay for MTLB to uTLB refill.
+
+ uTLB is not flushed with this value is changed. */
+ uint64_t zval2cdis : 1; /**< [ 39: 39](R/W) ZVA bypass L2C.
+ 0 = DC_ZVA instructions to L2C are STFIL1 (full block store operation allocating in
+ requester L2, fill 0s, self-invalidate L1 cache).
+ 1 = DC_ZVA instructions to L2C are STTIL1 (full block store operation through to DRAM,
+ bypass home and requester L2, fill 0s, self-invalidate L1 cache). */
+ uint64_t ldil2cdis : 1; /**< [ 38: 38](R/W) LDI instruction L2C usage.
+ 0 = LDI instructions to L2C are LDI (don't allocate in L1, allocates L2 at requester).
+ 1 = LDI instructions to L2C are LDT (don't allocate in L2 or L1 at home or requester). */
+ uint64_t dcva47 : 1; /**< [ 37: 37](R/W) If MMU translations are disabled,
+ apply memory attributes to physical addresses where bit\<47\>
+ is zero and device attributes to physical address bit\<47\> is
+ one. */
+ uint64_t stprefdis : 1; /**< [ 36: 36](R/W) ST PREF instructions disable. */
+ uint64_t ldprefdis : 1; /**< [ 35: 35](R/W) LD PREF instructions disable. */
+ uint64_t wfildexdis : 1; /**< [ 34: 34](R/W) WFE release behavior for LD-exclusive.
+ 0 = L2C invalidates to global monitor cause SEV to local core.
+ 1 = L2C invalidates have no effect on global monitor (i.e. lock_register).
+
+ This field should never be set to 1; setting to 1 does not
+ conform to the ARMv8 specification. */
+ uint64_t wfito : 3; /**< [ 33: 31](R/W) Wait-for-interrupt timeout; timeout=2^(8+[WFITO]). */
+ uint64_t rbfshortto : 5; /**< [ 30: 26](R/W) Read buffer short timeout; timeout = 2^[RBFSHORTTO].
+ Must be \>=0x6. The L2C directs the core to use either RBFSHORTTO or RBFTO. The short
+ timeout is used when an CCPI link goes down to expedite error indication. */
+ uint64_t rbfto : 5; /**< [ 25: 21](R/W) Read buffer timeout; timeout = 2^[RBFTO]. Must be \>= 0x6. */
+ uint64_t wbfallbarrier : 1; /**< [ 20: 20](R/W) Write-buffer apply barrier to all ST instructions. */
+ uint64_t wbfnomerge : 1; /**< [ 19: 19](R/W) Write-buffer merge disable. */
+ uint64_t wbftonshena : 1; /**< [ 18: 18](R/W) Write-buffer timeout for NSH entries enable.
+ 0 = Write-buffer time out for NSH entries = 218 cycles.
+ 1 = Write-buffer time out for NSH entries = 2^[WBFTO] (see [WBFTO]). */
+ uint64_t wbftomrgclrena : 1; /**< [ 17: 17](R/W) Write-buffer timeout clear-on-merge enable. */
+ uint64_t wbfto : 5; /**< [ 16: 12](R/W) Write-buffer timeout for non-NSH entries; timeout = 2^WBFTO. */
+ uint64_t wbfthresh : 5; /**< [ 11: 7](R/W) Write-buffer threshold. The write-buffer starts flushing entries to the L2 cache once the
+ number of valid write-buffer entries reaches this threshold value. */
+ uint64_t utlbentriesm1 : 5; /**< [ 6: 2](R/W) Number of uTLB entries - 1. uTLB is flushed when this value is changed. */
+ uint64_t cclkforce : 1; /**< [ 1: 1](R/W) Force CSR clock enable. When set, force CSR conditional clocking. */
+ uint64_t mclkforce : 1; /**< [ 0: 0](R/W) Force memory clock enable. When set, force memory conditional clocking. */
+#else /* Word 0 - Little Endian */
+ uint64_t mclkforce : 1; /**< [ 0: 0](R/W) Force memory clock enable. When set, force memory conditional clocking. */
+ uint64_t cclkforce : 1; /**< [ 1: 1](R/W) Force CSR clock enable. When set, force CSR conditional clocking. */
+ uint64_t utlbentriesm1 : 5; /**< [ 6: 2](R/W) Number of uTLB entries - 1. uTLB is flushed when this value is changed. */
+ uint64_t wbfthresh : 5; /**< [ 11: 7](R/W) Write-buffer threshold. The write-buffer starts flushing entries to the L2 cache once the
+ number of valid write-buffer entries reaches this threshold value. */
+ uint64_t wbfto : 5; /**< [ 16: 12](R/W) Write-buffer timeout for non-NSH entries; timeout = 2^WBFTO. */
+ uint64_t wbftomrgclrena : 1; /**< [ 17: 17](R/W) Write-buffer timeout clear-on-merge enable. */
+ uint64_t wbftonshena : 1; /**< [ 18: 18](R/W) Write-buffer timeout for NSH entries enable.
+ 0 = Write-buffer time out for NSH entries = 218 cycles.
+ 1 = Write-buffer time out for NSH entries = 2^[WBFTO] (see [WBFTO]). */
+ uint64_t wbfnomerge : 1; /**< [ 19: 19](R/W) Write-buffer merge disable. */
+ uint64_t wbfallbarrier : 1; /**< [ 20: 20](R/W) Write-buffer apply barrier to all ST instructions. */
+ uint64_t rbfto : 5; /**< [ 25: 21](R/W) Read buffer timeout; timeout = 2^[RBFTO]. Must be \>= 0x6. */
+ uint64_t rbfshortto : 5; /**< [ 30: 26](R/W) Read buffer short timeout; timeout = 2^[RBFSHORTTO].
+ Must be \>=0x6. The L2C directs the core to use either RBFSHORTTO or RBFTO. The short
+ timeout is used when an CCPI link goes down to expedite error indication. */
+ uint64_t wfito : 3; /**< [ 33: 31](R/W) Wait-for-interrupt timeout; timeout=2^(8+[WFITO]). */
+ uint64_t wfildexdis : 1; /**< [ 34: 34](R/W) WFE release behavior for LD-exclusive.
+ 0 = L2C invalidates to global monitor cause SEV to local core.
+ 1 = L2C invalidates have no effect on global monitor (i.e. lock_register).
+
+ This field should never be set to 1; setting to 1 does not
+ conform to the ARMv8 specification. */
+ uint64_t ldprefdis : 1; /**< [ 35: 35](R/W) LD PREF instructions disable. */
+ uint64_t stprefdis : 1; /**< [ 36: 36](R/W) ST PREF instructions disable. */
+ uint64_t dcva47 : 1; /**< [ 37: 37](R/W) If MMU translations are disabled,
+ apply memory attributes to physical addresses where bit\<47\>
+ is zero and device attributes to physical address bit\<47\> is
+ one. */
+ uint64_t ldil2cdis : 1; /**< [ 38: 38](R/W) LDI instruction L2C usage.
+ 0 = LDI instructions to L2C are LDI (don't allocate in L1, allocates L2 at requester).
+ 1 = LDI instructions to L2C are LDT (don't allocate in L2 or L1 at home or requester). */
+ uint64_t zval2cdis : 1; /**< [ 39: 39](R/W) ZVA bypass L2C.
+ 0 = DC_ZVA instructions to L2C are STFIL1 (full block store operation allocating in
+ requester L2, fill 0s, self-invalidate L1 cache).
+ 1 = DC_ZVA instructions to L2C are STTIL1 (full block store operation through to DRAM,
+ bypass home and requester L2, fill 0s, self-invalidate L1 cache). */
+ uint64_t replayprefdis : 1; /**< [ 40: 40](R/W) Replay PREF disable. uTLB miss PREF instruction behavior (see chapter body).
+ 0 = PREF instructions do attempt a replay for MTLB to uTLB refill.
+ 1 = PREF instructions do not attempt a replay for MTLB to uTLB refill.
+
+ uTLB is not flushed with this value is changed. */
+ uint64_t wcumissforce : 1; /**< [ 41: 41](R/W) Force all walker cache lookups to miss. uTLB is not flushed with this value is changed. */
+ uint64_t ioglobalforce : 1; /**< [ 42: 42](R/W) Reserved.
+ Internal:
+ Force global order for IO references. */
+ uint64_t stexl2cforce : 1; /**< [ 43: 43](R/W) Send all store-exclusive instructions to L2 cache. uTLB is not flushed with this value is
+ changed. */
+ uint64_t wbfdmbflushnext : 1; /**< [ 44: 44](R/W) DMB instruction to !NSH flushes next ST to !NSH. uTLB is not flushed with this value is
+ changed. */
+ uint64_t wbfdsbflushall : 1; /**< [ 45: 45](R/W) Any DSB instruction flushes the write buffer. */
+ uint64_t tlbiall : 1; /**< [ 46: 46](R/W) Treat all TLBIs like TLBI ALL for a specific exception level. */
+ uint64_t utlbfillbypdis : 1; /**< [ 47: 47](R/W) Disable uTLB fill bypass.
+ 0 = On a stage1-only translation, the uTLB is written along with the MTLB.
+ 1 = On a stage1-only translation, the uTLB is not written along with the MTLB causing a
+ uTLB miss replay to complete the uTLB fill. */
+ uint64_t gsyncto : 5; /**< [ 52: 48](R/W) GlobalSync timeout.
+ timeout = 2^[GSYNCTO].
+ 0x0 = disable timeout. */
+ uint64_t tlbiicflush : 1; /**< [ 53: 53](R/W) Some local TLBI instructions cause ICache flush.
+ 0 = Icache flush operation do not happen on the TLBI instructions listed below.
+ 1 = Icache is flushed on the TLBI instructions listed below:
+ * TLBI ALLE2{IS}.
+ * TLBI ALLE3{IS}.
+ * TLBI VAE1{IS}.
+ * TLBI VALE1{IS}.
+ * TLBI VAAE1{IS}.
+ * TLBI VAALE1{IS}.
+ * TLBI VAE2{IS}.
+ * TLBI VALE2{IS}.
+ * TLBI VAE3{IS}.
+ * TLBI VALE3{IS}.
+ * TLBI IPAS2E1{IS}.
+ * TLBI IPAS2LE1{IS}. */
+ uint64_t tlbinopdis : 1; /**< [ 54: 54](R/W) Disable broadcast TLBI optimization.
+
+ Address-based broadcast TLBI instructions that go to remote cores are converted
+ from address-based TLBI instructions to context-based TLBI instructions. The
+ actions on the local core generating the TLBI instruction are still precise.
+
+ 0 = The converted context-based TLBI instructions or original context-based
+ TLBIs to remote cores (without intervening interruptions, such as a DSB) are
+ coalesced into a single context-based TLBI. Converted and original ones do not
+ coalesce.
+
+ 1 = The above-mentioned coalescing is suppressed and converted context-based
+ remote TLBIs still go out as such. */
+ uint64_t dmbstallforce : 1; /**< [ 55: 55](R/W) Force DMB to wait for flushed write-buffer entries to be ACKed.
+ 0 = DMB instructions mark prior relevant write-buffer entries for flush, but do not wait
+ for the ACKs to return.
+ 1 = DMB instructions mark prior relevant write-buffer entries for flush and wait for all
+ the ACKs to return. */
+ uint64_t stlstallforce : 1; /**< [ 56: 56](R/W) Force ST_release to wait for flushed write-buffer entries to be ACKed.
+ 0 = Store-release instructions mark prior relevant write-buffer entries for flush but do
+ not wait for the ACKs to return.
+ 1 = Store-release instructions mark prior relevant write-buffer entries for flush and wait
+ for all the ACKs to return. */
+ uint64_t wfeldex1dis : 1; /**< [ 57: 57](R/W) WFE release behavior for LD-exclusive.
+ 0 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) causes SEV to local core.
+ 1 = A global monitor transition from exclusive to open (lock flag transition
+ from 1 to 0) does not cause SEV to local core. */
+ uint64_t stexfailcnt : 3; /**< [ 60: 58](RO) ST exclusive fail count. */
+ uint64_t node : 2; /**< [ 62: 61](RO) Local node ID. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_ap_cvmmemctl0_el1_cn81xx cn83xx; */
+ /* struct bdk_ap_cvmmemctl0_el1_s cn88xxp2; */
+};
+typedef union bdk_ap_cvmmemctl0_el1 bdk_ap_cvmmemctl0_el1_t;
+
+#define BDK_AP_CVMMEMCTL0_EL1 BDK_AP_CVMMEMCTL0_EL1_FUNC()
+static inline uint64_t BDK_AP_CVMMEMCTL0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVMMEMCTL0_EL1_FUNC(void)
+{
+ return 0x3000b000400ll;
+}
+
+#define typedef_BDK_AP_CVMMEMCTL0_EL1 bdk_ap_cvmmemctl0_el1_t
+#define bustype_BDK_AP_CVMMEMCTL0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVMMEMCTL0_EL1 "AP_CVMMEMCTL0_EL1"
+#define busnum_BDK_AP_CVMMEMCTL0_EL1 0
+#define arguments_BDK_AP_CVMMEMCTL0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvmmemctl1_el1
+ *
+ * AP Cavium Memory Control 1 Register
+ * This register controls additional memory-unit features.
+ * Internal:
+ * Back-end, non-debug.
+ */
+union bdk_ap_cvmmemctl1_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvmmemctl1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rsvd_57_63 : 7; /**< [ 63: 57](R/W) Reserved. */
+ uint64_t ldictxchkena : 1; /**< [ 56: 56](R/W) LDI context consistency check enable. For diagnostic use only. */
+ uint64_t rbfevictbyp3dis : 1; /**< [ 55: 55](R/W) MAF RBUF evict datapath 3-cycle bypass disable. For diagnostic use only. */
+ uint64_t rbfevictbyp2dis : 1; /**< [ 54: 54](R/W) MAF RBUF evict datapath 2-cycle bypass disable. For diagnostic use only. */
+ uint64_t xmcpriwbfdis : 1; /**< [ 53: 53](R/W) XMC priority disable for predicted unlock WBF eviction. For diagnostic use only. */
+ uint64_t xmcpristdis : 1; /**< [ 52: 52](R/W) XMC priority disable for predicted unlock ST. For diagnostic use only. */
+ uint64_t xmcpriswpdis : 1; /**< [ 51: 51](R/W) XMC priority disable for predicted unlock SWP. For diagnostic use only. */
+ uint64_t xmcpricasdis : 1; /**< [ 50: 50](R/W) XMC priority disable for predicted unlock CAS. For diagnostic use only. */
+ uint64_t iostmergedis : 1; /**< [ 49: 49](R/W) IO ST merging disable. */
+ uint64_t ioldmergedis : 1; /**< [ 48: 48](R/W) IO LD merging disable. */
+ uint64_t gclkforce : 1; /**< [ 47: 47](R/W) Force gated clocks to be on. For diagnostic use only. */
+ uint64_t ldil3prefdis : 1; /**< [ 46: 46](R/W) LDIL3 PREF instructions disable. */
+ uint64_t ldil2prefdis : 1; /**< [ 45: 45](R/W) LDIL2 PREF instructions disable. */
+ uint64_t spare44 : 1; /**< [ 44: 44](R/W) Reserved; spare. */
+ uint64_t evatt_limited_size : 1; /**< [ 43: 43](R/W) 0 = do not limit ASIDMAP/VMIDMAP size
+ 1 = ASIDMAP has 7 entries, VMIDMAP has 7 entries */
+ uint64_t evatt_periodic_flush : 1; /**< [ 42: 42](R/W) 0 = EVATT is not periodically flushed
+ 1 = EVATT is flushed every 2^14 cycles */
+ uint64_t cvap_dis : 1; /**< [ 41: 41](R/W) If set, convert DC_CVAP into DC_CVAC. For diagnostic use only. */
+ uint64_t tlbinoadr : 1; /**< [ 40: 40](R/W) If set, convert broadcast TLBI address-based opcodes to context-based opcode. For
+ diagnostic use only. */
+ uint64_t utlbentriesm1_5 : 1; /**< [ 39: 39](R/W) Bit\<5\> of [UTLBENTRIESM1], the number of uTLB entries minus one. The uTLB is flushed when this
+ value is changed. */
+ uint64_t tlbiremoteicflush : 1; /**< [ 38: 38](R/W) Force ICache flush when any remote TLBI is received.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t tlbilocalicflush : 1; /**< [ 37: 37](R/W) Force ICache flush when any local TLBI is issued.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t dprefbpmode : 1; /**< [ 36: 36](R/W) Data-stream hardware prefetcher backpressure mode select.
+ 0 = Single counter mode (combined hit and miss latency counter).
+ 1 = Dual counter mode (separate hit and miss latency counters). */
+ uint64_t dprefbpctl : 4; /**< [ 35: 32](R/W) Data-stream hardware prefetcher backpressure control mask for dual counter mode.
+ Internal:
+ Backpressure is applied if:
+ \<pre\>
+ ( ([DPREFBPCTL]\<0\> && !hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<1\> && !hit_ctr_bp && miss_ctr_bp)
+ || ([DPREFBPCTL]\<2\> && hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<3\> && hit_ctr_bp && miss_ctr_bp))
+ \</pre\>
+
+ Where hit_ctr_bp is the MSB of the 4-bit hit counter being set, and miss_ctr_bp
+ is the MSB of the 4-bit miss counter being set. */
+ uint64_t dprefbphitthresh : 12; /**< [ 31: 20](R/W) Data-stream hardware prefetcher backpressure threshold for L2C hit latency. */
+ uint64_t dprefbpmissthresh : 12; /**< [ 19: 8](R/W) Data-stream hardware prefetcher backpressure threshold for L2C miss latency. */
+ uint64_t spare : 1; /**< [ 7: 7](R/W) Reserved; spare. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Reserved.
+ Internal:
+ 83xx: Enable SSO switch-tag. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Reserved.
+ Internal:
+ 83xx: Trap any access to nonzero node id. */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Reserved.
+ Internal:
+ Enable I/O SSO and PKO address region. */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ 83xx: Enable/disable LMTST(a). */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ 83xx: Enable/disable LMTST(a). */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Reserved.
+ Internal:
+ Enable I/O SSO and PKO address region. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Reserved.
+ Internal:
+ 83xx: Trap any access to nonzero node id. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Reserved.
+ Internal:
+ 83xx: Enable SSO switch-tag. */
+ uint64_t spare : 1; /**< [ 7: 7](R/W) Reserved; spare. */
+ uint64_t dprefbpmissthresh : 12; /**< [ 19: 8](R/W) Data-stream hardware prefetcher backpressure threshold for L2C miss latency. */
+ uint64_t dprefbphitthresh : 12; /**< [ 31: 20](R/W) Data-stream hardware prefetcher backpressure threshold for L2C hit latency. */
+ uint64_t dprefbpctl : 4; /**< [ 35: 32](R/W) Data-stream hardware prefetcher backpressure control mask for dual counter mode.
+ Internal:
+ Backpressure is applied if:
+ \<pre\>
+ ( ([DPREFBPCTL]\<0\> && !hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<1\> && !hit_ctr_bp && miss_ctr_bp)
+ || ([DPREFBPCTL]\<2\> && hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<3\> && hit_ctr_bp && miss_ctr_bp))
+ \</pre\>
+
+ Where hit_ctr_bp is the MSB of the 4-bit hit counter being set, and miss_ctr_bp
+ is the MSB of the 4-bit miss counter being set. */
+ uint64_t dprefbpmode : 1; /**< [ 36: 36](R/W) Data-stream hardware prefetcher backpressure mode select.
+ 0 = Single counter mode (combined hit and miss latency counter).
+ 1 = Dual counter mode (separate hit and miss latency counters). */
+ uint64_t tlbilocalicflush : 1; /**< [ 37: 37](R/W) Force ICache flush when any local TLBI is issued.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t tlbiremoteicflush : 1; /**< [ 38: 38](R/W) Force ICache flush when any remote TLBI is received.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t utlbentriesm1_5 : 1; /**< [ 39: 39](R/W) Bit\<5\> of [UTLBENTRIESM1], the number of uTLB entries minus one. The uTLB is flushed when this
+ value is changed. */
+ uint64_t tlbinoadr : 1; /**< [ 40: 40](R/W) If set, convert broadcast TLBI address-based opcodes to context-based opcode. For
+ diagnostic use only. */
+ uint64_t cvap_dis : 1; /**< [ 41: 41](R/W) If set, convert DC_CVAP into DC_CVAC. For diagnostic use only. */
+ uint64_t evatt_periodic_flush : 1; /**< [ 42: 42](R/W) 0 = EVATT is not periodically flushed
+ 1 = EVATT is flushed every 2^14 cycles */
+ uint64_t evatt_limited_size : 1; /**< [ 43: 43](R/W) 0 = do not limit ASIDMAP/VMIDMAP size
+ 1 = ASIDMAP has 7 entries, VMIDMAP has 7 entries */
+ uint64_t spare44 : 1; /**< [ 44: 44](R/W) Reserved; spare. */
+ uint64_t ldil2prefdis : 1; /**< [ 45: 45](R/W) LDIL2 PREF instructions disable. */
+ uint64_t ldil3prefdis : 1; /**< [ 46: 46](R/W) LDIL3 PREF instructions disable. */
+ uint64_t gclkforce : 1; /**< [ 47: 47](R/W) Force gated clocks to be on. For diagnostic use only. */
+ uint64_t ioldmergedis : 1; /**< [ 48: 48](R/W) IO LD merging disable. */
+ uint64_t iostmergedis : 1; /**< [ 49: 49](R/W) IO ST merging disable. */
+ uint64_t xmcpricasdis : 1; /**< [ 50: 50](R/W) XMC priority disable for predicted unlock CAS. For diagnostic use only. */
+ uint64_t xmcpriswpdis : 1; /**< [ 51: 51](R/W) XMC priority disable for predicted unlock SWP. For diagnostic use only. */
+ uint64_t xmcpristdis : 1; /**< [ 52: 52](R/W) XMC priority disable for predicted unlock ST. For diagnostic use only. */
+ uint64_t xmcpriwbfdis : 1; /**< [ 53: 53](R/W) XMC priority disable for predicted unlock WBF eviction. For diagnostic use only. */
+ uint64_t rbfevictbyp2dis : 1; /**< [ 54: 54](R/W) MAF RBUF evict datapath 2-cycle bypass disable. For diagnostic use only. */
+ uint64_t rbfevictbyp3dis : 1; /**< [ 55: 55](R/W) MAF RBUF evict datapath 3-cycle bypass disable. For diagnostic use only. */
+ uint64_t ldictxchkena : 1; /**< [ 56: 56](R/W) LDI context consistency check enable. For diagnostic use only. */
+ uint64_t rsvd_57_63 : 7; /**< [ 63: 57](R/W) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_cvmmemctl1_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rsvd_57_63 : 7; /**< [ 63: 57](R/W) Reserved. */
+ uint64_t ldictxchkena : 1; /**< [ 56: 56](R/W) LDI context consistency check enable. For diagnostic use only. */
+ uint64_t rbfevictbyp3dis : 1; /**< [ 55: 55](R/W) MAF RBUF evict datapath 3-cycle bypass disable. For diagnostic use only. */
+ uint64_t rbfevictbyp2dis : 1; /**< [ 54: 54](R/W) MAF RBUF evict datapath 2-cycle bypass disable. For diagnostic use only. */
+ uint64_t xmcpriwbfdis : 1; /**< [ 53: 53](R/W) XMC priority disable for predicted unlock WBF eviction. For diagnostic use only. */
+ uint64_t xmcpristdis : 1; /**< [ 52: 52](R/W) XMC priority disable for predicted unlock ST. For diagnostic use only. */
+ uint64_t xmcpriswpdis : 1; /**< [ 51: 51](R/W) XMC priority disable for predicted unlock SWP. For diagnostic use only. */
+ uint64_t xmcpricasdis : 1; /**< [ 50: 50](R/W) XMC priority disable for predicted unlock CAS. For diagnostic use only. */
+ uint64_t iostmergedis : 1; /**< [ 49: 49](R/W) IO ST merging disable. */
+ uint64_t ioldmergedis : 1; /**< [ 48: 48](R/W) IO LD merging disable. */
+ uint64_t gclkforce : 1; /**< [ 47: 47](R/W) Force gated clocks to be on. For diagnostic use only. */
+ uint64_t ldil3prefdis : 1; /**< [ 46: 46](R/W) LDIL3 PREF instructions disable. */
+ uint64_t ldil2prefdis : 1; /**< [ 45: 45](R/W) LDIL2 PREF instructions disable. */
+ uint64_t spare44 : 1; /**< [ 44: 44](R/W) Reserved; spare. */
+ uint64_t evatt_limited_size : 1; /**< [ 43: 43](R/W) 0 = do not limit ASIDMAP/VMIDMAP size
+ 1 = ASIDMAP has 7 entries, VMIDMAP has 7 entries */
+ uint64_t evatt_periodic_flush : 1; /**< [ 42: 42](R/W) 0 = EVATT is not periodically flushed
+ 1 = EVATT is flushed every 2^14 cycles */
+ uint64_t cvap_dis : 1; /**< [ 41: 41](R/W) If set, convert DC_CVAP into DC_CVAC. For diagnostic use only. */
+ uint64_t tlbinoadr : 1; /**< [ 40: 40](R/W) If set, convert broadcast TLBI address-based opcodes to context-based opcode. For
+ diagnostic use only. */
+ uint64_t utlbentriesm1_5 : 1; /**< [ 39: 39](R/W) Bit\<5\> of [UTLBENTRIESM1], the number of uTLB entries minus one. The uTLB is flushed when this
+ value is changed. */
+ uint64_t tlbiremoteicflush : 1; /**< [ 38: 38](R/W) Force ICache flush when any remote TLBI is received.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t tlbilocalicflush : 1; /**< [ 37: 37](R/W) Force ICache flush when any local TLBI is issued.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t dprefbpmode : 1; /**< [ 36: 36](R/W) Data-stream hardware prefetcher backpressure mode select.
+ 0 = Single counter mode (combined hit and miss latency counter).
+ 1 = Dual counter mode (separate hit and miss latency counters). */
+ uint64_t dprefbpctl : 4; /**< [ 35: 32](R/W) Data-stream hardware prefetcher backpressure control mask for dual counter mode.
+ Internal:
+ Backpressure is applied if:
+ \<pre\>
+ ( ([DPREFBPCTL]\<0\> && !hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<1\> && !hit_ctr_bp && miss_ctr_bp)
+ || ([DPREFBPCTL]\<2\> && hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<3\> && hit_ctr_bp && miss_ctr_bp))
+ \</pre\>
+
+ Where hit_ctr_bp is the MSB of the 4-bit hit counter being set, and miss_ctr_bp
+ is the MSB of the 4-bit miss counter being set. */
+ uint64_t dprefbphitthresh : 12; /**< [ 31: 20](R/W) Data-stream hardware prefetcher backpressure threshold for L2C hit latency. */
+ uint64_t dprefbpmissthresh : 12; /**< [ 19: 8](R/W) Data-stream hardware prefetcher backpressure threshold for L2C miss latency. */
+ uint64_t spare : 1; /**< [ 7: 7](R/W) Reserved; spare. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Reserved.
+ Internal:
+ Enable SSO switch-tag. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Trap any access to nonzero node id. This should be clear on multi-socket
+ systems, and set on single-socket systems. */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Enable SSO and PKO address region.
+ 0 = Accesses described below will trap.
+ 1 = Allow \> 64-bit memory instructions, multi-register memory instructions, and
+ atomic instructions to SSO and PKO I/O address regions. This must be set if SSO
+ or PKO are to be used.
+
+ Other address regions (e.g. SLI) are not affected by this setting. */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Enable LMTST. */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Enable LMTST. */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Enable SSO and PKO address region.
+ 0 = Accesses described below will trap.
+ 1 = Allow \> 64-bit memory instructions, multi-register memory instructions, and
+ atomic instructions to SSO and PKO I/O address regions. This must be set if SSO
+ or PKO are to be used.
+
+ Other address regions (e.g. SLI) are not affected by this setting. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Trap any access to nonzero node id. This should be clear on multi-socket
+ systems, and set on single-socket systems. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Reserved.
+ Internal:
+ Enable SSO switch-tag. */
+ uint64_t spare : 1; /**< [ 7: 7](R/W) Reserved; spare. */
+ uint64_t dprefbpmissthresh : 12; /**< [ 19: 8](R/W) Data-stream hardware prefetcher backpressure threshold for L2C miss latency. */
+ uint64_t dprefbphitthresh : 12; /**< [ 31: 20](R/W) Data-stream hardware prefetcher backpressure threshold for L2C hit latency. */
+ uint64_t dprefbpctl : 4; /**< [ 35: 32](R/W) Data-stream hardware prefetcher backpressure control mask for dual counter mode.
+ Internal:
+ Backpressure is applied if:
+ \<pre\>
+ ( ([DPREFBPCTL]\<0\> && !hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<1\> && !hit_ctr_bp && miss_ctr_bp)
+ || ([DPREFBPCTL]\<2\> && hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<3\> && hit_ctr_bp && miss_ctr_bp))
+ \</pre\>
+
+ Where hit_ctr_bp is the MSB of the 4-bit hit counter being set, and miss_ctr_bp
+ is the MSB of the 4-bit miss counter being set. */
+ uint64_t dprefbpmode : 1; /**< [ 36: 36](R/W) Data-stream hardware prefetcher backpressure mode select.
+ 0 = Single counter mode (combined hit and miss latency counter).
+ 1 = Dual counter mode (separate hit and miss latency counters). */
+ uint64_t tlbilocalicflush : 1; /**< [ 37: 37](R/W) Force ICache flush when any local TLBI is issued.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t tlbiremoteicflush : 1; /**< [ 38: 38](R/W) Force ICache flush when any remote TLBI is received.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t utlbentriesm1_5 : 1; /**< [ 39: 39](R/W) Bit\<5\> of [UTLBENTRIESM1], the number of uTLB entries minus one. The uTLB is flushed when this
+ value is changed. */
+ uint64_t tlbinoadr : 1; /**< [ 40: 40](R/W) If set, convert broadcast TLBI address-based opcodes to context-based opcode. For
+ diagnostic use only. */
+ uint64_t cvap_dis : 1; /**< [ 41: 41](R/W) If set, convert DC_CVAP into DC_CVAC. For diagnostic use only. */
+ uint64_t evatt_periodic_flush : 1; /**< [ 42: 42](R/W) 0 = EVATT is not periodically flushed
+ 1 = EVATT is flushed every 2^14 cycles */
+ uint64_t evatt_limited_size : 1; /**< [ 43: 43](R/W) 0 = do not limit ASIDMAP/VMIDMAP size
+ 1 = ASIDMAP has 7 entries, VMIDMAP has 7 entries */
+ uint64_t spare44 : 1; /**< [ 44: 44](R/W) Reserved; spare. */
+ uint64_t ldil2prefdis : 1; /**< [ 45: 45](R/W) LDIL2 PREF instructions disable. */
+ uint64_t ldil3prefdis : 1; /**< [ 46: 46](R/W) LDIL3 PREF instructions disable. */
+ uint64_t gclkforce : 1; /**< [ 47: 47](R/W) Force gated clocks to be on. For diagnostic use only. */
+ uint64_t ioldmergedis : 1; /**< [ 48: 48](R/W) IO LD merging disable. */
+ uint64_t iostmergedis : 1; /**< [ 49: 49](R/W) IO ST merging disable. */
+ uint64_t xmcpricasdis : 1; /**< [ 50: 50](R/W) XMC priority disable for predicted unlock CAS. For diagnostic use only. */
+ uint64_t xmcpriswpdis : 1; /**< [ 51: 51](R/W) XMC priority disable for predicted unlock SWP. For diagnostic use only. */
+ uint64_t xmcpristdis : 1; /**< [ 52: 52](R/W) XMC priority disable for predicted unlock ST. For diagnostic use only. */
+ uint64_t xmcpriwbfdis : 1; /**< [ 53: 53](R/W) XMC priority disable for predicted unlock WBF eviction. For diagnostic use only. */
+ uint64_t rbfevictbyp2dis : 1; /**< [ 54: 54](R/W) MAF RBUF evict datapath 2-cycle bypass disable. For diagnostic use only. */
+ uint64_t rbfevictbyp3dis : 1; /**< [ 55: 55](R/W) MAF RBUF evict datapath 3-cycle bypass disable. For diagnostic use only. */
+ uint64_t ldictxchkena : 1; /**< [ 56: 56](R/W) LDI context consistency check enable. For diagnostic use only. */
+ uint64_t rsvd_57_63 : 7; /**< [ 63: 57](R/W) Reserved. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_ap_cvmmemctl1_el1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_39_63 : 25;
+ uint64_t tlbiremoteicflush : 1; /**< [ 38: 38](R/W) Force ICache flush when any remote TLBI is received.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t tlbilocalicflush : 1; /**< [ 37: 37](R/W) Force ICache flush when any local TLBI is issued.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t dprefbpmode : 1; /**< [ 36: 36](R/W) Data-stream hardware prefetcher backpressure mode select.
+ 0 = Single counter mode (combined hit and miss latency counter).
+ 1 = Dual counter mode (separate hit and miss latency counters). */
+ uint64_t dprefbpctl : 4; /**< [ 35: 32](R/W) Data-stream hardware prefetcher backpressure control mask for dual counter mode.
+ Internal:
+ Backpressure is applied if:
+ \<pre\>
+ ( ([DPREFBPCTL]\<0\> && !hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<1\> && !hit_ctr_bp && miss_ctr_bp)
+ || ([DPREFBPCTL]\<2\> && hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<3\> && hit_ctr_bp && miss_ctr_bp))
+ \</pre\>
+
+ Where hit_ctr_bp is the MSB of the 4-bit hit counter being set, and miss_ctr_bp
+ is the MSB of the 4-bit miss counter being set. */
+ uint64_t dprefbphitthresh : 12; /**< [ 31: 20](R/W) Data-stream hardware prefetcher backpressure threshold for L2C hit latency. */
+ uint64_t dprefbpmissthresh : 12; /**< [ 19: 8](R/W) Data-stream hardware prefetcher backpressure threshold for L2C miss latency. */
+ uint64_t spare : 1; /**< [ 7: 7](R/W) Reserved; spare. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Reserved.
+ Internal:
+ 83xx: Enable SSO switch-tag. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Reserved.
+ Internal:
+ 83xx: Trap any access to nonzero node id. */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Reserved.
+ Internal:
+ Enable I/O SSO and PKO address region. */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ 83xx: Enable/disable LMTST(a). */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ 83xx: Enable/disable LMTST(a). */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Reserved.
+ Internal:
+ Enable I/O SSO and PKO address region. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Reserved.
+ Internal:
+ 83xx: Trap any access to nonzero node id. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Reserved.
+ Internal:
+ 83xx: Enable SSO switch-tag. */
+ uint64_t spare : 1; /**< [ 7: 7](R/W) Reserved; spare. */
+ uint64_t dprefbpmissthresh : 12; /**< [ 19: 8](R/W) Data-stream hardware prefetcher backpressure threshold for L2C miss latency. */
+ uint64_t dprefbphitthresh : 12; /**< [ 31: 20](R/W) Data-stream hardware prefetcher backpressure threshold for L2C hit latency. */
+ uint64_t dprefbpctl : 4; /**< [ 35: 32](R/W) Data-stream hardware prefetcher backpressure control mask for dual counter mode.
+ Internal:
+ Backpressure is applied if:
+ \<pre\>
+ ( ([DPREFBPCTL]\<0\> && !hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<1\> && !hit_ctr_bp && miss_ctr_bp)
+ || ([DPREFBPCTL]\<2\> && hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<3\> && hit_ctr_bp && miss_ctr_bp))
+ \</pre\>
+
+ Where hit_ctr_bp is the MSB of the 4-bit hit counter being set, and miss_ctr_bp
+ is the MSB of the 4-bit miss counter being set. */
+ uint64_t dprefbpmode : 1; /**< [ 36: 36](R/W) Data-stream hardware prefetcher backpressure mode select.
+ 0 = Single counter mode (combined hit and miss latency counter).
+ 1 = Dual counter mode (separate hit and miss latency counters). */
+ uint64_t tlbilocalicflush : 1; /**< [ 37: 37](R/W) Force ICache flush when any local TLBI is issued.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t tlbiremoteicflush : 1; /**< [ 38: 38](R/W) Force ICache flush when any remote TLBI is received.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t reserved_39_63 : 25;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_ap_cvmmemctl1_el1_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t reserved_8_36 : 29;
+ uint64_t reserved_7 : 1;
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Reserved.
+ Internal:
+ 83xx: Enable SSO switch-tag. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Reserved.
+ Internal:
+ 83xx: Trap any access to nonzero node id. */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Reserved.
+ Internal:
+ Enable I/O SSO and PKO address region. */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ 83xx: Enable/disable LMTST(a). */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ 83xx: Enable/disable LMTST(a). */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Reserved.
+ Internal:
+ Enable I/O SSO and PKO address region. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Reserved.
+ Internal:
+ 83xx: Trap any access to nonzero node id. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Reserved.
+ Internal:
+ 83xx: Enable SSO switch-tag. */
+ uint64_t reserved_7 : 1;
+ uint64_t reserved_8_36 : 29;
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_ap_cvmmemctl1_el1_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_39_63 : 25;
+ uint64_t tlbiremoteicflush : 1; /**< [ 38: 38](R/W) Force ICache flush when any remote TLBI is received.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t tlbilocalicflush : 1; /**< [ 37: 37](R/W) Force ICache flush when any local TLBI is issued.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t dprefbpmode : 1; /**< [ 36: 36](R/W) Data-stream hardware prefetcher backpressure mode select.
+ 0 = Single counter mode (combined hit and miss latency counter).
+ 1 = Dual counter mode (separate hit and miss latency counters). */
+ uint64_t dprefbpctl : 4; /**< [ 35: 32](R/W) Data-stream hardware prefetcher backpressure control mask for dual counter mode.
+ Internal:
+ Backpressure is applied if:
+ \<pre\>
+ ( ([DPREFBPCTL]\<0\> && !hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<1\> && !hit_ctr_bp && miss_ctr_bp)
+ || ([DPREFBPCTL]\<2\> && hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<3\> && hit_ctr_bp && miss_ctr_bp))
+ \</pre\>
+
+ Where hit_ctr_bp is the MSB of the 4-bit hit counter being set, and miss_ctr_bp
+ is the MSB of the 4-bit miss counter being set. */
+ uint64_t dprefbphitthresh : 12; /**< [ 31: 20](R/W) Data-stream hardware prefetcher backpressure threshold for L2C hit latency. */
+ uint64_t dprefbpmissthresh : 12; /**< [ 19: 8](R/W) Data-stream hardware prefetcher backpressure threshold for L2C miss latency. */
+ uint64_t spare : 1; /**< [ 7: 7](R/W) Reserved; spare. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Enable SSO switch-tag caching. The cache must be invalidated through e.g. use of
+ SSO_WS_CFG[SSO_SAI_FLUSH] before clearing this bit. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Trap any access to nonzero node id. */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Enable SSO and PKO address region.
+ 0 = Accesses described below will trap.
+ 1 = Allow \> 64-bit memory instructions, multi-register memory instructions, and
+ atomic instructions to SSO and PKO I/O address regions. This must be set if SSO
+ or PKO are to be used.
+
+ Other address regions (e.g. SLI) are not affected by this setting. */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Enable LMTST. */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t lodnshena : 1; /**< [ 0: 0](R/W) LocalOrderDomain DMB/DSB_NSH{ST} enable. */
+ uint64_t lodishena : 1; /**< [ 1: 1](R/W) LocalOrderDomain DMB/DSB_ISH{ST} enable. */
+ uint64_t lodignoresh : 1; /**< [ 2: 2](R/W) LocalOrderDomain DMB/DSB_NSH{ST} ignores shareability (applies to both nsh and ish pages). */
+ uint64_t lmtstena : 1; /**< [ 3: 3](R/W) Enable LMTST. */
+ uint64_t ioatomicena : 1; /**< [ 4: 4](R/W) Enable SSO and PKO address region.
+ 0 = Accesses described below will trap.
+ 1 = Allow \> 64-bit memory instructions, multi-register memory instructions, and
+ atomic instructions to SSO and PKO I/O address regions. This must be set if SSO
+ or PKO are to be used.
+
+ Other address regions (e.g. SLI) are not affected by this setting. */
+ uint64_t node1trapena : 1; /**< [ 5: 5](R/W) Trap any access to nonzero node id. */
+ uint64_t switchtagena : 1; /**< [ 6: 6](R/W) Enable SSO switch-tag caching. The cache must be invalidated through e.g. use of
+ SSO_WS_CFG[SSO_SAI_FLUSH] before clearing this bit. */
+ uint64_t spare : 1; /**< [ 7: 7](R/W) Reserved; spare. */
+ uint64_t dprefbpmissthresh : 12; /**< [ 19: 8](R/W) Data-stream hardware prefetcher backpressure threshold for L2C miss latency. */
+ uint64_t dprefbphitthresh : 12; /**< [ 31: 20](R/W) Data-stream hardware prefetcher backpressure threshold for L2C hit latency. */
+ uint64_t dprefbpctl : 4; /**< [ 35: 32](R/W) Data-stream hardware prefetcher backpressure control mask for dual counter mode.
+ Internal:
+ Backpressure is applied if:
+ \<pre\>
+ ( ([DPREFBPCTL]\<0\> && !hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<1\> && !hit_ctr_bp && miss_ctr_bp)
+ || ([DPREFBPCTL]\<2\> && hit_ctr_bp && !miss_ctr_bp)
+ || ([DPREFBPCTL]\<3\> && hit_ctr_bp && miss_ctr_bp))
+ \</pre\>
+
+ Where hit_ctr_bp is the MSB of the 4-bit hit counter being set, and miss_ctr_bp
+ is the MSB of the 4-bit miss counter being set. */
+ uint64_t dprefbpmode : 1; /**< [ 36: 36](R/W) Data-stream hardware prefetcher backpressure mode select.
+ 0 = Single counter mode (combined hit and miss latency counter).
+ 1 = Dual counter mode (separate hit and miss latency counters). */
+ uint64_t tlbilocalicflush : 1; /**< [ 37: 37](R/W) Force ICache flush when any local TLBI is issued.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t tlbiremoteicflush : 1; /**< [ 38: 38](R/W) Force ICache flush when any remote TLBI is received.
+ 0 = Do nothing.
+ 1 = Flush the ICache. */
+ uint64_t reserved_39_63 : 25;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_ap_cvmmemctl1_el1 bdk_ap_cvmmemctl1_el1_t;
+
+#define BDK_AP_CVMMEMCTL1_EL1 BDK_AP_CVMMEMCTL1_EL1_FUNC()
+static inline uint64_t BDK_AP_CVMMEMCTL1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVMMEMCTL1_EL1_FUNC(void)
+{
+ return 0x3000b000500ll;
+}
+
+#define typedef_BDK_AP_CVMMEMCTL1_EL1 bdk_ap_cvmmemctl1_el1_t
+#define bustype_BDK_AP_CVMMEMCTL1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVMMEMCTL1_EL1 "AP_CVMMEMCTL1_EL1"
+#define busnum_BDK_AP_CVMMEMCTL1_EL1 0
+#define arguments_BDK_AP_CVMMEMCTL1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvmmemctl2_el1
+ *
+ * AP Cavium Memory Control 2 Register
+ * This register controls additional memory-unit features.
+ * Internal:
+ * Back-end, non-debug.
+ */
+union bdk_ap_cvmmemctl2_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvmmemctl2_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rsvd_63_60 : 4; /**< [ 63: 60](R/W) Reserved. */
+ uint64_t tlbiremotegsyncall : 1; /**< [ 59: 59](R/W) Remote TLBI apply GSYNC semantics. For diagnostic use only. */
+ uint64_t tlbiremotemultidis : 1; /**< [ 58: 58](R/W) Remote TLBI multiple processing disable. For diagnostic use only. */
+ uint64_t tlbiremotebatchdis : 1; /**< [ 57: 57](R/W) Remote TLBI batch processing disable. For diagnostic use only. */
+ uint64_t l1dwaysm1 : 6; /**< [ 56: 51](R/W) Number of L1D WAYS minus one. */
+ uint64_t wbfentriesm1 : 5; /**< [ 50: 46](R/W) Number of MAF WBUF entries minus one. */
+ uint64_t rbfentriesm1 : 4; /**< [ 45: 42](R/W) Number of MAF RBUF entries minus one. */
+ uint64_t ptwspecdis : 1; /**< [ 41: 41](R/W) Disable page table walker access on speculative instructions. */
+ uint64_t ptwprefudis : 1; /**< [ 40: 40](R/W) Disable page table walker access on PREFU instructions. */
+ uint64_t ptwdhwprefdis : 1; /**< [ 39: 39](R/W) Disable page table walker access on dstream hardware prefetches. */
+ uint64_t ptwdswprefdis : 1; /**< [ 38: 38](R/W) Disable page table walker access on dstream software prefetches. */
+ uint64_t ptwihwprefdis : 1; /**< [ 37: 37](R/W) Disable page table walker access on istream hardware prefetches. */
+ uint64_t ptwiswprefdis : 1; /**< [ 36: 36](R/W) Disable page table walker access on istream software prefetches. */
+ uint64_t mtlbdhwprefdis : 1; /**< [ 35: 35](R/W) Disable MTLB access on dstream hardware prefetches. */
+ uint64_t mtlbdswprefdis : 1; /**< [ 34: 34](R/W) Disable MTLB access on dstream software prefetches. */
+ uint64_t mtlbihwprefdis : 1; /**< [ 33: 33](R/W) Disable MTLB access on istream hardware prefetches. */
+ uint64_t mtlbiswprefdis : 1; /**< [ 32: 32](R/W) Disable MTLB access on istream software prefetches. */
+ uint64_t rsvd_25_31 : 7; /**< [ 31: 25](R/W) Reserved. */
+ uint64_t tlbi_block_msk : 9; /**< [ 24: 16](R/W) Mask of block sizes that are precisely invalidated by TLBI instructions.
+ For each bit {a} in this field:
+ _ Mask\<{a}\>=0 = blocksize {a} is not precisely invalidated.
+ _ Mask\<{a}\>=1 = blocksize {a} is precisely invalidated.
+
+ _ Mask\<0\> represents block size 2^12.
+ _ Mask\<1\> represents block size 2^14.
+ _ Mask\<2\> represents block size 2^16.
+ _ Mask\<3\> represents block size 2^21.
+ _ Mask\<4\> represents block size 2^25.
+ _ Mask\<5\> represents block size 2^29.
+ _ Mask\<6\> represents block size 2^30.
+ _ Mask\<7\> represents block size 2^34.
+ _ Mask\<8\> represents block size 2^42. */
+ uint64_t rsvd_9_15 : 7; /**< [ 15: 9](R/W) Reserved. */
+ uint64_t mtlb0_block_msk : 9; /**< [ 8: 0](R/W) Mask of block sizes that are allocated in MTLB0.
+ For each bit {a} in this field:
+ _ Mask\<{a}\>=0 = blocksize {a} allocated in MTLB1.
+ _ Mask\<{a}\>=1 = blocksize {a} allocated in MTLB0.
+
+ _ Mask\<0\> represents block size 2^12.
+ _ Mask\<1\> represents block size 2^14.
+ _ Mask\<2\> represents block size 2^16.
+ _ Mask\<3\> represents block size 2^21.
+ _ Mask\<4\> represents block size 2^25.
+ _ Mask\<5\> represents block size 2^29.
+ _ Mask\<6\> represents block size 2^30.
+ _ Mask\<7\> represents block size 2^34.
+ _ Mask\<8\> represents block size 2^42. */
+#else /* Word 0 - Little Endian */
+ uint64_t mtlb0_block_msk : 9; /**< [ 8: 0](R/W) Mask of block sizes that are allocated in MTLB0.
+ For each bit {a} in this field:
+ _ Mask\<{a}\>=0 = blocksize {a} allocated in MTLB1.
+ _ Mask\<{a}\>=1 = blocksize {a} allocated in MTLB0.
+
+ _ Mask\<0\> represents block size 2^12.
+ _ Mask\<1\> represents block size 2^14.
+ _ Mask\<2\> represents block size 2^16.
+ _ Mask\<3\> represents block size 2^21.
+ _ Mask\<4\> represents block size 2^25.
+ _ Mask\<5\> represents block size 2^29.
+ _ Mask\<6\> represents block size 2^30.
+ _ Mask\<7\> represents block size 2^34.
+ _ Mask\<8\> represents block size 2^42. */
+ uint64_t rsvd_9_15 : 7; /**< [ 15: 9](R/W) Reserved. */
+ uint64_t tlbi_block_msk : 9; /**< [ 24: 16](R/W) Mask of block sizes that are precisely invalidated by TLBI instructions.
+ For each bit {a} in this field:
+ _ Mask\<{a}\>=0 = blocksize {a} is not precisely invalidated.
+ _ Mask\<{a}\>=1 = blocksize {a} is precisely invalidated.
+
+ _ Mask\<0\> represents block size 2^12.
+ _ Mask\<1\> represents block size 2^14.
+ _ Mask\<2\> represents block size 2^16.
+ _ Mask\<3\> represents block size 2^21.
+ _ Mask\<4\> represents block size 2^25.
+ _ Mask\<5\> represents block size 2^29.
+ _ Mask\<6\> represents block size 2^30.
+ _ Mask\<7\> represents block size 2^34.
+ _ Mask\<8\> represents block size 2^42. */
+ uint64_t rsvd_25_31 : 7; /**< [ 31: 25](R/W) Reserved. */
+ uint64_t mtlbiswprefdis : 1; /**< [ 32: 32](R/W) Disable MTLB access on istream software prefetches. */
+ uint64_t mtlbihwprefdis : 1; /**< [ 33: 33](R/W) Disable MTLB access on istream hardware prefetches. */
+ uint64_t mtlbdswprefdis : 1; /**< [ 34: 34](R/W) Disable MTLB access on dstream software prefetches. */
+ uint64_t mtlbdhwprefdis : 1; /**< [ 35: 35](R/W) Disable MTLB access on dstream hardware prefetches. */
+ uint64_t ptwiswprefdis : 1; /**< [ 36: 36](R/W) Disable page table walker access on istream software prefetches. */
+ uint64_t ptwihwprefdis : 1; /**< [ 37: 37](R/W) Disable page table walker access on istream hardware prefetches. */
+ uint64_t ptwdswprefdis : 1; /**< [ 38: 38](R/W) Disable page table walker access on dstream software prefetches. */
+ uint64_t ptwdhwprefdis : 1; /**< [ 39: 39](R/W) Disable page table walker access on dstream hardware prefetches. */
+ uint64_t ptwprefudis : 1; /**< [ 40: 40](R/W) Disable page table walker access on PREFU instructions. */
+ uint64_t ptwspecdis : 1; /**< [ 41: 41](R/W) Disable page table walker access on speculative instructions. */
+ uint64_t rbfentriesm1 : 4; /**< [ 45: 42](R/W) Number of MAF RBUF entries minus one. */
+ uint64_t wbfentriesm1 : 5; /**< [ 50: 46](R/W) Number of MAF WBUF entries minus one. */
+ uint64_t l1dwaysm1 : 6; /**< [ 56: 51](R/W) Number of L1D WAYS minus one. */
+ uint64_t tlbiremotebatchdis : 1; /**< [ 57: 57](R/W) Remote TLBI batch processing disable. For diagnostic use only. */
+ uint64_t tlbiremotemultidis : 1; /**< [ 58: 58](R/W) Remote TLBI multiple processing disable. For diagnostic use only. */
+ uint64_t tlbiremotegsyncall : 1; /**< [ 59: 59](R/W) Remote TLBI apply GSYNC semantics. For diagnostic use only. */
+ uint64_t rsvd_63_60 : 4; /**< [ 63: 60](R/W) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvmmemctl2_el1_s cn; */
+};
+typedef union bdk_ap_cvmmemctl2_el1 bdk_ap_cvmmemctl2_el1_t;
+
+#define BDK_AP_CVMMEMCTL2_EL1 BDK_AP_CVMMEMCTL2_EL1_FUNC()
+static inline uint64_t BDK_AP_CVMMEMCTL2_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVMMEMCTL2_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000b000600ll;
+ __bdk_csr_fatal("AP_CVMMEMCTL2_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVMMEMCTL2_EL1 bdk_ap_cvmmemctl2_el1_t
+#define bustype_BDK_AP_CVMMEMCTL2_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVMMEMCTL2_EL1 "AP_CVMMEMCTL2_EL1"
+#define busnum_BDK_AP_CVMMEMCTL2_EL1 0
+#define arguments_BDK_AP_CVMMEMCTL2_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_cvmmemctl3_el1
+ *
+ * AP Cavium Memory Control 3 Register
+ * This register controls additional memory-unit features.
+ * Internal:
+ * Back-end, non-debug.
+ */
+union bdk_ap_cvmmemctl3_el1
+{
+ uint64_t u;
+ struct bdk_ap_cvmmemctl3_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rsvd_63_12 : 52; /**< [ 63: 12](RAZ) Reserved. */
+ uint64_t iopredclrfreq : 4; /**< [ 11: 8](R/W) IO predictor clear frequency. For diagnostic use only. */
+ uint64_t unalignpredclrfreq : 4; /**< [ 7: 4](R/W) Unaligned predictor clear frequency. For diagnostic use only. */
+ uint64_t ldstpredclrfreq : 4; /**< [ 3: 0](R/W) Load-store predictor clear frequency. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ldstpredclrfreq : 4; /**< [ 3: 0](R/W) Load-store predictor clear frequency. For diagnostic use only. */
+ uint64_t unalignpredclrfreq : 4; /**< [ 7: 4](R/W) Unaligned predictor clear frequency. For diagnostic use only. */
+ uint64_t iopredclrfreq : 4; /**< [ 11: 8](R/W) IO predictor clear frequency. For diagnostic use only. */
+ uint64_t rsvd_63_12 : 52; /**< [ 63: 12](RAZ) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_cvmmemctl3_el1_s cn; */
+};
+typedef union bdk_ap_cvmmemctl3_el1 bdk_ap_cvmmemctl3_el1_t;
+
+#define BDK_AP_CVMMEMCTL3_EL1 BDK_AP_CVMMEMCTL3_EL1_FUNC()
+static inline uint64_t BDK_AP_CVMMEMCTL3_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_CVMMEMCTL3_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000b000700ll;
+ __bdk_csr_fatal("AP_CVMMEMCTL3_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_CVMMEMCTL3_EL1 bdk_ap_cvmmemctl3_el1_t
+#define bustype_BDK_AP_CVMMEMCTL3_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_CVMMEMCTL3_EL1 "AP_CVMMEMCTL3_EL1"
+#define busnum_BDK_AP_CVMMEMCTL3_EL1 0
+#define arguments_BDK_AP_CVMMEMCTL3_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dacr32_el2
+ *
+ * AP Domain Access Control Register
+ * Allows access to the AArch32 DACR register from AArch64 state
+ * only. Its value has no effect on execution in AArch64 state.
+ */
+union bdk_ap_dacr32_el2
+{
+ uint32_t u;
+ struct bdk_ap_dacr32_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dacr32_el2_s cn; */
+};
+typedef union bdk_ap_dacr32_el2 bdk_ap_dacr32_el2_t;
+
+#define BDK_AP_DACR32_EL2 BDK_AP_DACR32_EL2_FUNC()
+static inline uint64_t BDK_AP_DACR32_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DACR32_EL2_FUNC(void)
+{
+ return 0x30403000000ll;
+}
+
+#define typedef_BDK_AP_DACR32_EL2 bdk_ap_dacr32_el2_t
+#define bustype_BDK_AP_DACR32_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DACR32_EL2 "AP_DACR32_EL2"
+#define busnum_BDK_AP_DACR32_EL2 0
+#define arguments_BDK_AP_DACR32_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_daif
+ *
+ * AP Interrupt Mask Bits Register
+ * Allows access to the interrupt mask bits.
+ */
+union bdk_ap_daif
+{
+ uint32_t u;
+ struct bdk_ap_daif_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Process state D mask.
+ When the target Exception level of the debug exception is not
+ than the current Exception level, the exception is not masked
+ by this bit.
+ 0 = Debug exceptions from Watchpoint, Breakpoint, and Software
+ step debug events targeted at the current Exception level are
+ not masked.
+ 1 = Debug exceptions from Watchpoint, Breakpoint, and Software
+ step debug events targeted at the current Exception level are
+ masked. */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) SError (System Error) mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) IRQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t f : 1; /**< [ 6: 6](R/W) FIQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_5 : 6;
+ uint32_t f : 1; /**< [ 6: 6](R/W) FIQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) IRQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) SError (System Error) mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Process state D mask.
+ When the target Exception level of the debug exception is not
+ than the current Exception level, the exception is not masked
+ by this bit.
+ 0 = Debug exceptions from Watchpoint, Breakpoint, and Software
+ step debug events targeted at the current Exception level are
+ not masked.
+ 1 = Debug exceptions from Watchpoint, Breakpoint, and Software
+ step debug events targeted at the current Exception level are
+ masked. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_daif_s cn; */
+};
+typedef union bdk_ap_daif bdk_ap_daif_t;
+
+#define BDK_AP_DAIF BDK_AP_DAIF_FUNC()
+static inline uint64_t BDK_AP_DAIF_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DAIF_FUNC(void)
+{
+ return 0x30304020100ll;
+}
+
+#define typedef_BDK_AP_DAIF bdk_ap_daif_t
+#define bustype_BDK_AP_DAIF BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DAIF "AP_DAIF"
+#define busnum_BDK_AP_DAIF 0
+#define arguments_BDK_AP_DAIF -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgauthstatus_el1
+ *
+ * AP Debug Authentication Status Register
+ * Provides information about the state of the implementation
+ * defined authentication interface for debug.
+ */
+union bdk_ap_dbgauthstatus_el1
+{
+ uint32_t u;
+ struct bdk_ap_dbgauthstatus_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t snid : 2; /**< [ 7: 6](RO) Secure non-invasive debug.
+ 0x0 = Not implemented. EL3 is not implemented and the processor is
+ nonsecure.
+ 0x2 = Implemented and disabled.
+ 0x3 = Implemented and enabled. */
+ uint32_t sid : 2; /**< [ 5: 4](RO) Secure invasive debug.
+ 0x0 = Not implemented. EL3 is not implemented and the processor is
+ nonsecure.
+ 0x2 = Implemented and disabled.
+ 0x3 = Implemented and enabled. */
+ uint32_t nsnid : 2; /**< [ 3: 2](RO) Nonsecure non-invasive debug.
+ 0x0 = Not implemented. EL3 is not implemented and the processor is
+ Secure.
+ 0x2 = Implemented and disabled.
+ 0x3 = Implemented and enabled. */
+ uint32_t nsid : 2; /**< [ 1: 0](RO) Nonsecure invasive debug.
+ 0x0 = Not implemented. EL3 is not implemented and the processor is
+ 0x2 = Implemented and disabled.
+ 0x3 = Implemented and enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t nsid : 2; /**< [ 1: 0](RO) Nonsecure invasive debug.
+ 0x0 = Not implemented. EL3 is not implemented and the processor is
+ 0x2 = Implemented and disabled.
+ 0x3 = Implemented and enabled. */
+ uint32_t nsnid : 2; /**< [ 3: 2](RO) Nonsecure non-invasive debug.
+ 0x0 = Not implemented. EL3 is not implemented and the processor is
+ Secure.
+ 0x2 = Implemented and disabled.
+ 0x3 = Implemented and enabled. */
+ uint32_t sid : 2; /**< [ 5: 4](RO) Secure invasive debug.
+ 0x0 = Not implemented. EL3 is not implemented and the processor is
+ nonsecure.
+ 0x2 = Implemented and disabled.
+ 0x3 = Implemented and enabled. */
+ uint32_t snid : 2; /**< [ 7: 6](RO) Secure non-invasive debug.
+ 0x0 = Not implemented. EL3 is not implemented and the processor is
+ nonsecure.
+ 0x2 = Implemented and disabled.
+ 0x3 = Implemented and enabled. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgauthstatus_el1_s cn; */
+};
+typedef union bdk_ap_dbgauthstatus_el1 bdk_ap_dbgauthstatus_el1_t;
+
+#define BDK_AP_DBGAUTHSTATUS_EL1 BDK_AP_DBGAUTHSTATUS_EL1_FUNC()
+static inline uint64_t BDK_AP_DBGAUTHSTATUS_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGAUTHSTATUS_EL1_FUNC(void)
+{
+ return 0x200070e0600ll;
+}
+
+#define typedef_BDK_AP_DBGAUTHSTATUS_EL1 bdk_ap_dbgauthstatus_el1_t
+#define bustype_BDK_AP_DBGAUTHSTATUS_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGAUTHSTATUS_EL1 "AP_DBGAUTHSTATUS_EL1"
+#define busnum_BDK_AP_DBGAUTHSTATUS_EL1 0
+#define arguments_BDK_AP_DBGAUTHSTATUS_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgbcr#_el1
+ *
+ * AP Debug Breakpoint Control Registers
+ * Holds control information for a breakpoint. Forms breakpoint n
+ * together with value register DBGBVR\<n\>_EL1, where n is 0 to
+ * 15.
+ */
+union bdk_ap_dbgbcrx_el1
+{
+ uint32_t u;
+ struct bdk_ap_dbgbcrx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t bt : 4; /**< [ 23: 20](R/W) Breakpoint Type.
+ The field breaks down as follows:
+ BT[3:1]: Base type.- 0b000: Match address. DBGBVR\<n\>_EL1 is
+ the address of an instruction. - 0b010: Mismatch address.
+ Behaves as type0b000 DBGBVR\<n\>_EL1 is the address of an
+ instruction to be stepped. - 0b001: Match context ID.
+ DBGBVR\<n\>_EL1[31:0] is a context ID. - 0b100: Match VMID.
+ DBGBVR\<n\>_EL1[39:32] is a VMID. - 0b101: Match VMID and
+ context ID. DBGBVR\<n\>_EL1[31:0] is a context ID, and
+ DBGBVR\<n\>_EL1[39:32] is a VMID.
+ BT[0]: Enable linking.
+
+ If the breakpoint is not context-aware, BT[3] and BT[1] are
+ RES0. If EL2 is not implemented, BT[3] is RES0. If EL1 using
+ AArch32 is not implemented, BT[2] is RES0.
+
+ 0x0 = Unlinked address match.
+ 0x1 = Linked address match.
+ 0x2 = Unlinked context ID match.
+ 0x3 = Linked context ID match
+ 0x4 = Unlinked address mismatch.
+ 0x5 = Linked address mismatch.
+ 0x8 = Unlinked VMID match.
+ 0x9 = Linked VMID match.
+ 0xA = Unlinked VMID and context ID match.
+ 0xB = Linked VMID and context ID match. */
+ uint32_t lbn : 4; /**< [ 19: 16](R/W) Linked breakpoint number. For Linked address matching
+ breakpoints, this specifies the index of the Context-matching
+ breakpoint linked to. */
+ uint32_t ssc : 2; /**< [ 15: 14](R/W) Security state control. Determines the Security states under
+ which a breakpoint debug event for breakpoint n is generated.
+ This field must be interpreted along with the HMC and PMC
+ fields. */
+ uint32_t hmc : 1; /**< [ 13: 13](R/W) Higher mode control. Determines the debug perspective for
+ deciding when a breakpoint debug event for breakpoint n is
+ generated. This field must be interpreted along with the SSC
+ and PMC fields. */
+ uint32_t reserved_9_12 : 4;
+ uint32_t rsvd_5_8 : 4; /**< [ 8: 5](RO) Byte address select. Defines which half-words an address-
+ matching breakpoint matches, regardless of the instruction set
+ and Execution state. In an AArch64-only implementation, this
+ field is reserved, RES1. Otherwise:
+
+ BAS[2] and BAS[0] are read/write.
+
+ BAS[3] and BAS[1] are read-only copies of BAS[2] and BAS[0]
+ respectively.
+
+ The permitted values depend on the breakpoint type.
+
+ For Address match breakpoints in either AArch32 or AArch64
+ state:
+ BAS Match instruction at Constraint for debuggers
+ 0b0011 DBGBVR\<n\>_EL1 Use for T32 instructions.
+ 0b1100 DBGBVR\<n\>_EL1+2 Use for T32 instructions.
+ 0b1111 DBGBVR\<n\>_EL1 Use for A64 and A32 instructions.
+ 0b0000
+
+ For Address mismatch breakpoints in an AArch32 stage 1
+ translation regime:
+ BAS Step instruction at Constraint for debuggers
+ 0b0000 - Use for a match anywhere breakpoint.
+ 0b0011 DBGBVR\<n\>_EL1 Use for stepping T32 instructions.
+ 0b1100 DBGBVR\<n\>_EL1+2 Use for stepping T32 instructions.
+ 0b1111 DBGBVR\<n\>_EL1 Use for stepping A32 instructions.
+
+ For Context matching breakpoints, this field is RES1 and
+ ignored. */
+ uint32_t reserved_3_4 : 2;
+ uint32_t pmc : 2; /**< [ 2: 1](R/W) Privilege mode control. Determines the Exception level or
+ levels at which a breakpoint debug event for breakpoint n is
+ generated. This field must be interpreted along with the SSC
+ and HMC fields. */
+ uint32_t ee : 1; /**< [ 0: 0](R/W) Enable breakpoint DBGBVR\<n\>_EL1.
+ 0 = Breakpoint disabled.
+ 1 = Breakpoint enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t ee : 1; /**< [ 0: 0](R/W) Enable breakpoint DBGBVR\<n\>_EL1.
+ 0 = Breakpoint disabled.
+ 1 = Breakpoint enabled. */
+ uint32_t pmc : 2; /**< [ 2: 1](R/W) Privilege mode control. Determines the Exception level or
+ levels at which a breakpoint debug event for breakpoint n is
+ generated. This field must be interpreted along with the SSC
+ and HMC fields. */
+ uint32_t reserved_3_4 : 2;
+ uint32_t rsvd_5_8 : 4; /**< [ 8: 5](RO) Byte address select. Defines which half-words an address-
+ matching breakpoint matches, regardless of the instruction set
+ and Execution state. In an AArch64-only implementation, this
+ field is reserved, RES1. Otherwise:
+
+ BAS[2] and BAS[0] are read/write.
+
+ BAS[3] and BAS[1] are read-only copies of BAS[2] and BAS[0]
+ respectively.
+
+ The permitted values depend on the breakpoint type.
+
+ For Address match breakpoints in either AArch32 or AArch64
+ state:
+ BAS Match instruction at Constraint for debuggers
+ 0b0011 DBGBVR\<n\>_EL1 Use for T32 instructions.
+ 0b1100 DBGBVR\<n\>_EL1+2 Use for T32 instructions.
+ 0b1111 DBGBVR\<n\>_EL1 Use for A64 and A32 instructions.
+ 0b0000
+
+ For Address mismatch breakpoints in an AArch32 stage 1
+ translation regime:
+ BAS Step instruction at Constraint for debuggers
+ 0b0000 - Use for a match anywhere breakpoint.
+ 0b0011 DBGBVR\<n\>_EL1 Use for stepping T32 instructions.
+ 0b1100 DBGBVR\<n\>_EL1+2 Use for stepping T32 instructions.
+ 0b1111 DBGBVR\<n\>_EL1 Use for stepping A32 instructions.
+
+ For Context matching breakpoints, this field is RES1 and
+ ignored. */
+ uint32_t reserved_9_12 : 4;
+ uint32_t hmc : 1; /**< [ 13: 13](R/W) Higher mode control. Determines the debug perspective for
+ deciding when a breakpoint debug event for breakpoint n is
+ generated. This field must be interpreted along with the SSC
+ and PMC fields. */
+ uint32_t ssc : 2; /**< [ 15: 14](R/W) Security state control. Determines the Security states under
+ which a breakpoint debug event for breakpoint n is generated.
+ This field must be interpreted along with the HMC and PMC
+ fields. */
+ uint32_t lbn : 4; /**< [ 19: 16](R/W) Linked breakpoint number. For Linked address matching
+ breakpoints, this specifies the index of the Context-matching
+ breakpoint linked to. */
+ uint32_t bt : 4; /**< [ 23: 20](R/W) Breakpoint Type.
+ The field breaks down as follows:
+ BT[3:1]: Base type.- 0b000: Match address. DBGBVR\<n\>_EL1 is
+ the address of an instruction. - 0b010: Mismatch address.
+ Behaves as type0b000 DBGBVR\<n\>_EL1 is the address of an
+ instruction to be stepped. - 0b001: Match context ID.
+ DBGBVR\<n\>_EL1[31:0] is a context ID. - 0b100: Match VMID.
+ DBGBVR\<n\>_EL1[39:32] is a VMID. - 0b101: Match VMID and
+ context ID. DBGBVR\<n\>_EL1[31:0] is a context ID, and
+ DBGBVR\<n\>_EL1[39:32] is a VMID.
+ BT[0]: Enable linking.
+
+ If the breakpoint is not context-aware, BT[3] and BT[1] are
+ RES0. If EL2 is not implemented, BT[3] is RES0. If EL1 using
+ AArch32 is not implemented, BT[2] is RES0.
+
+ 0x0 = Unlinked address match.
+ 0x1 = Linked address match.
+ 0x2 = Unlinked context ID match.
+ 0x3 = Linked context ID match
+ 0x4 = Unlinked address mismatch.
+ 0x5 = Linked address mismatch.
+ 0x8 = Unlinked VMID match.
+ 0x9 = Linked VMID match.
+ 0xA = Unlinked VMID and context ID match.
+ 0xB = Linked VMID and context ID match. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgbcrx_el1_s cn; */
+};
+typedef union bdk_ap_dbgbcrx_el1 bdk_ap_dbgbcrx_el1_t;
+
+static inline uint64_t BDK_AP_DBGBCRX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGBCRX_EL1(unsigned long a)
+{
+ if (a<=15)
+ return 0x20000000500ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_DBGBCRX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_DBGBCRX_EL1(a) bdk_ap_dbgbcrx_el1_t
+#define bustype_BDK_AP_DBGBCRX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGBCRX_EL1(a) "AP_DBGBCRX_EL1"
+#define busnum_BDK_AP_DBGBCRX_EL1(a) (a)
+#define arguments_BDK_AP_DBGBCRX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgbvr#_el1
+ *
+ * AP Debug Breakpoint Value Registers
+ * Holds a virtual address, or a VMID and/or a context ID, for
+ * use in breakpoint matching. Forms breakpoint n together with
+ * control register DBGBCR\<n\>_EL1, where n is 0 to 15.
+ */
+union bdk_ap_dbgbvrx_el1
+{
+ uint64_t u;
+ struct bdk_ap_dbgbvrx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data for breakpoint value. This doesn't match ARM docs as
+ they have many encoding of the same register.
+ Reserved, Sign extended. Hardwired to the value of the sign
+ bit, bit [48]. Hardware and software must treat this field as
+ RES0 if bit[48] is 0, and as RES1 if bit[48] is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data for breakpoint value. This doesn't match ARM docs as
+ they have many encoding of the same register.
+ Reserved, Sign extended. Hardwired to the value of the sign
+ bit, bit [48]. Hardware and software must treat this field as
+ RES0 if bit[48] is 0, and as RES1 if bit[48] is 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgbvrx_el1_s cn; */
+};
+typedef union bdk_ap_dbgbvrx_el1 bdk_ap_dbgbvrx_el1_t;
+
+static inline uint64_t BDK_AP_DBGBVRX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGBVRX_EL1(unsigned long a)
+{
+ if (a<=15)
+ return 0x20000000400ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_DBGBVRX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_DBGBVRX_EL1(a) bdk_ap_dbgbvrx_el1_t
+#define bustype_BDK_AP_DBGBVRX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGBVRX_EL1(a) "AP_DBGBVRX_EL1"
+#define busnum_BDK_AP_DBGBVRX_EL1(a) (a)
+#define arguments_BDK_AP_DBGBVRX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgclaimclr_el1
+ *
+ * AP Debug Claim Tag Clear Register
+ * Used by software to read the values of the CLAIM bits, and to
+ * clear these bits to 0.
+ */
+union bdk_ap_dbgclaimclr_el1
+{
+ uint32_t u;
+ struct bdk_ap_dbgclaimclr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t claim : 8; /**< [ 7: 0](R/W) Claim clear bits. Reading this field returns the current value
+ of the CLAIM bits.
+ Writing a 1 to one of these bits clears the corresponding
+ CLAIM bit to 0. This is an indirect write to the CLAIM bits.
+ A single write operation can clear multiple bits to 0. Writing
+ 0 to one of these bits has no effect. */
+#else /* Word 0 - Little Endian */
+ uint32_t claim : 8; /**< [ 7: 0](R/W) Claim clear bits. Reading this field returns the current value
+ of the CLAIM bits.
+ Writing a 1 to one of these bits clears the corresponding
+ CLAIM bit to 0. This is an indirect write to the CLAIM bits.
+ A single write operation can clear multiple bits to 0. Writing
+ 0 to one of these bits has no effect. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgclaimclr_el1_s cn; */
+};
+typedef union bdk_ap_dbgclaimclr_el1 bdk_ap_dbgclaimclr_el1_t;
+
+#define BDK_AP_DBGCLAIMCLR_EL1 BDK_AP_DBGCLAIMCLR_EL1_FUNC()
+static inline uint64_t BDK_AP_DBGCLAIMCLR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGCLAIMCLR_EL1_FUNC(void)
+{
+ return 0x20007090600ll;
+}
+
+#define typedef_BDK_AP_DBGCLAIMCLR_EL1 bdk_ap_dbgclaimclr_el1_t
+#define bustype_BDK_AP_DBGCLAIMCLR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGCLAIMCLR_EL1 "AP_DBGCLAIMCLR_EL1"
+#define busnum_BDK_AP_DBGCLAIMCLR_EL1 0
+#define arguments_BDK_AP_DBGCLAIMCLR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgclaimset_el1
+ *
+ * AP Debug Claim Tag Set Register
+ * Used by software to set CLAIM bits to 1.
+ */
+union bdk_ap_dbgclaimset_el1
+{
+ uint32_t u;
+ struct bdk_ap_dbgclaimset_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t claim : 8; /**< [ 7: 0](R/W) Claim set bits. RAO.
+ Writing a 1 to one of these bits sets the corresponding CLAIM
+ bit to 1. This is an indirect write to the CLAIM bits.
+ A single write operation can set multiple bits to 1. Writing 0
+ to one of these bits has no effect. */
+#else /* Word 0 - Little Endian */
+ uint32_t claim : 8; /**< [ 7: 0](R/W) Claim set bits. RAO.
+ Writing a 1 to one of these bits sets the corresponding CLAIM
+ bit to 1. This is an indirect write to the CLAIM bits.
+ A single write operation can set multiple bits to 1. Writing 0
+ to one of these bits has no effect. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgclaimset_el1_s cn; */
+};
+typedef union bdk_ap_dbgclaimset_el1 bdk_ap_dbgclaimset_el1_t;
+
+#define BDK_AP_DBGCLAIMSET_EL1 BDK_AP_DBGCLAIMSET_EL1_FUNC()
+static inline uint64_t BDK_AP_DBGCLAIMSET_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGCLAIMSET_EL1_FUNC(void)
+{
+ return 0x20007080600ll;
+}
+
+#define typedef_BDK_AP_DBGCLAIMSET_EL1 bdk_ap_dbgclaimset_el1_t
+#define bustype_BDK_AP_DBGCLAIMSET_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGCLAIMSET_EL1 "AP_DBGCLAIMSET_EL1"
+#define busnum_BDK_AP_DBGCLAIMSET_EL1 0
+#define arguments_BDK_AP_DBGCLAIMSET_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgdtr_el0
+ *
+ * AP Debug Data Transfer Half-Duplex Register
+ * Transfers 64 bits of data between the processor and an
+ * external host. Can transfer both ways using only a single
+ * register.
+ */
+union bdk_ap_dbgdtr_el0
+{
+ uint64_t u;
+ struct bdk_ap_dbgdtr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t highword : 32; /**< [ 63: 32](R/W) Writes to this register set DTRRX to the value in this field.
+ Reads from this register return the value of DTRTX. */
+ uint64_t lowword : 32; /**< [ 31: 0](R/W) Writes to this register set DTRTX to the value in this field.
+ Reads from this register return the value of DTRRX. */
+#else /* Word 0 - Little Endian */
+ uint64_t lowword : 32; /**< [ 31: 0](R/W) Writes to this register set DTRTX to the value in this field.
+ Reads from this register return the value of DTRRX. */
+ uint64_t highword : 32; /**< [ 63: 32](R/W) Writes to this register set DTRRX to the value in this field.
+ Reads from this register return the value of DTRTX. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgdtr_el0_s cn; */
+};
+typedef union bdk_ap_dbgdtr_el0 bdk_ap_dbgdtr_el0_t;
+
+#define BDK_AP_DBGDTR_EL0 BDK_AP_DBGDTR_EL0_FUNC()
+static inline uint64_t BDK_AP_DBGDTR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGDTR_EL0_FUNC(void)
+{
+ return 0x20300040000ll;
+}
+
+#define typedef_BDK_AP_DBGDTR_EL0 bdk_ap_dbgdtr_el0_t
+#define bustype_BDK_AP_DBGDTR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGDTR_EL0 "AP_DBGDTR_EL0"
+#define busnum_BDK_AP_DBGDTR_EL0 0
+#define arguments_BDK_AP_DBGDTR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgdtrrx_el0
+ *
+ * AP Debug Data Transfer Receive Register
+ * Transfers 32 bits of data from an external host to the
+ * processor.
+ *
+ * This register is at the same select as AP_DBGDTRTX_EL0.
+ */
+union bdk_ap_dbgdtrrx_el0
+{
+ uint32_t u;
+ struct bdk_ap_dbgdtrrx_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](RO) Host to target data
+ Host to target data. One word of data for transfer from the
+ debug host to the debug target.
+ For the full behavior of the Debug Communications Channel, see
+ section 9 (The Debug Communications Channel and Instruction
+ Transfer Register) in document PRD03-PRDC-010486. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](RO) Host to target data
+ Host to target data. One word of data for transfer from the
+ debug host to the debug target.
+ For the full behavior of the Debug Communications Channel, see
+ section 9 (The Debug Communications Channel and Instruction
+ Transfer Register) in document PRD03-PRDC-010486. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgdtrrx_el0_s cn; */
+};
+typedef union bdk_ap_dbgdtrrx_el0 bdk_ap_dbgdtrrx_el0_t;
+
+#define BDK_AP_DBGDTRRX_EL0 BDK_AP_DBGDTRRX_EL0_FUNC()
+static inline uint64_t BDK_AP_DBGDTRRX_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGDTRRX_EL0_FUNC(void)
+{
+ return 0x20300050000ll;
+}
+
+#define typedef_BDK_AP_DBGDTRRX_EL0 bdk_ap_dbgdtrrx_el0_t
+#define bustype_BDK_AP_DBGDTRRX_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGDTRRX_EL0 "AP_DBGDTRRX_EL0"
+#define busnum_BDK_AP_DBGDTRRX_EL0 0
+#define arguments_BDK_AP_DBGDTRRX_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgdtrtx_el0
+ *
+ * AP Debug Data Transfer Transmit Register
+ * Transfers 32 bits of data from the processor to an external
+ * host.
+ *
+ * This register is at the same select as AP_DBGDTRRX_EL0.
+ */
+union bdk_ap_dbgdtrtx_el0
+{
+ uint32_t u;
+ struct bdk_ap_dbgdtrtx_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](RO) Target to host data. One word of data for transfer from the
+ debug target to the debug host.
+ For the full behavior of the Debug Communications Channel, see
+ section 9 (The Debug Communications Channel and Instruction
+ Transfer Register) in document PRD03-PRDC-010486. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](RO) Target to host data. One word of data for transfer from the
+ debug target to the debug host.
+ For the full behavior of the Debug Communications Channel, see
+ section 9 (The Debug Communications Channel and Instruction
+ Transfer Register) in document PRD03-PRDC-010486. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgdtrtx_el0_s cn8; */
+ struct bdk_ap_dbgdtrtx_el0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](WO) Target to host data. One word of data for transfer from the
+ debug target to the debug host.
+ For the full behavior of the Debug Communications Channel, see
+ section 9 (The Debug Communications Channel and Instruction
+ Transfer Register) in document PRD03-PRDC-010486. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](WO) Target to host data. One word of data for transfer from the
+ debug target to the debug host.
+ For the full behavior of the Debug Communications Channel, see
+ section 9 (The Debug Communications Channel and Instruction
+ Transfer Register) in document PRD03-PRDC-010486. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_dbgdtrtx_el0 bdk_ap_dbgdtrtx_el0_t;
+
+#define BDK_AP_DBGDTRTX_EL0 BDK_AP_DBGDTRTX_EL0_FUNC()
+static inline uint64_t BDK_AP_DBGDTRTX_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGDTRTX_EL0_FUNC(void)
+{
+ return 0x20300050010ll;
+}
+
+#define typedef_BDK_AP_DBGDTRTX_EL0 bdk_ap_dbgdtrtx_el0_t
+#define bustype_BDK_AP_DBGDTRTX_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGDTRTX_EL0 "AP_DBGDTRTX_EL0"
+#define busnum_BDK_AP_DBGDTRTX_EL0 0
+#define arguments_BDK_AP_DBGDTRTX_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgprcr_el1
+ *
+ * AP Debug Power Control Register
+ * Controls behavior of processor on power-down request.
+ */
+union bdk_ap_dbgprcr_el1
+{
+ uint32_t u;
+ struct bdk_ap_dbgprcr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t corenpdrq : 1; /**< [ 0: 0](R/W) Core no powerdown request. Requests emulation of powerdown.
+
+ 0 = On a powerdown request, the system powers down the Core power
+ domain.
+ 1 = On a powerdown request, the system emulates powerdown of the
+ Core power domain. In this emulation mode the Core power
+ domain is not actually powered down. */
+#else /* Word 0 - Little Endian */
+ uint32_t corenpdrq : 1; /**< [ 0: 0](R/W) Core no powerdown request. Requests emulation of powerdown.
+
+ 0 = On a powerdown request, the system powers down the Core power
+ domain.
+ 1 = On a powerdown request, the system emulates powerdown of the
+ Core power domain. In this emulation mode the Core power
+ domain is not actually powered down. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgprcr_el1_s cn; */
+};
+typedef union bdk_ap_dbgprcr_el1 bdk_ap_dbgprcr_el1_t;
+
+#define BDK_AP_DBGPRCR_EL1 BDK_AP_DBGPRCR_EL1_FUNC()
+static inline uint64_t BDK_AP_DBGPRCR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGPRCR_EL1_FUNC(void)
+{
+ return 0x20001040400ll;
+}
+
+#define typedef_BDK_AP_DBGPRCR_EL1 bdk_ap_dbgprcr_el1_t
+#define bustype_BDK_AP_DBGPRCR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGPRCR_EL1 "AP_DBGPRCR_EL1"
+#define busnum_BDK_AP_DBGPRCR_EL1 0
+#define arguments_BDK_AP_DBGPRCR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgvcr32_el2
+ *
+ * AP Debug Vector Catch Register
+ * Allows access to the AArch32 register DBGVCR from AArch64
+ * state only. Its value has no effect on execution in AArch64
+ * state.
+ */
+union bdk_ap_dbgvcr32_el2
+{
+ uint32_t u;
+ struct bdk_ap_dbgvcr32_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nsf : 1; /**< [ 31: 31](R/W) FIQ vector catch enable in nonsecure state.
+ The exception vector offset is 0x1C. */
+ uint32_t nsi : 1; /**< [ 30: 30](R/W) IRQ vector catch enable in nonsecure state.
+ The exception vector offset is 0x18. */
+ uint32_t reserved_29 : 1;
+ uint32_t nsd : 1; /**< [ 28: 28](R/W) Data Abort vector catch enable in nonsecure state.
+ The exception vector offset is 0x10. */
+ uint32_t nsp : 1; /**< [ 27: 27](R/W) Prefetch Abort vector catch enable in nonsecure state.
+ The exception vector offset is 0x0C. */
+ uint32_t nss : 1; /**< [ 26: 26](R/W) Supervisor Call (SVC) vector catch enable in nonsecure state.
+ The exception vector offset is 0x08. */
+ uint32_t nsu : 1; /**< [ 25: 25](R/W) Undefined Instruction vector catch enable in nonsecure state.
+ The exception vector offset is 0x04. */
+ uint32_t reserved_8_24 : 17;
+ uint32_t sf : 1; /**< [ 7: 7](R/W) FIQ vector catch enable in Secure state.
+ The exception vector offset is 0x1C. */
+ uint32_t si : 1; /**< [ 6: 6](R/W) IRQ vector catch enable in Secure state.
+ The exception vector offset is 0x18. */
+ uint32_t reserved_5 : 1;
+ uint32_t sd : 1; /**< [ 4: 4](R/W) Data Abort vector catch enable in Secure state.
+ The exception vector offset is 0x10. */
+ uint32_t sp : 1; /**< [ 3: 3](R/W) Prefetch Abort vector catch enable in Secure state.
+ The exception vector offset is 0x0C. */
+ uint32_t ss : 1; /**< [ 2: 2](R/W) Supervisor Call (SVC) vector catch enable in Secure state.
+ The exception vector offset is 0x08. */
+ uint32_t su : 1; /**< [ 1: 1](R/W) Undefined Instruction vector catch enable in Secure state.
+ The exception vector offset is 0x04. */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t su : 1; /**< [ 1: 1](R/W) Undefined Instruction vector catch enable in Secure state.
+ The exception vector offset is 0x04. */
+ uint32_t ss : 1; /**< [ 2: 2](R/W) Supervisor Call (SVC) vector catch enable in Secure state.
+ The exception vector offset is 0x08. */
+ uint32_t sp : 1; /**< [ 3: 3](R/W) Prefetch Abort vector catch enable in Secure state.
+ The exception vector offset is 0x0C. */
+ uint32_t sd : 1; /**< [ 4: 4](R/W) Data Abort vector catch enable in Secure state.
+ The exception vector offset is 0x10. */
+ uint32_t reserved_5 : 1;
+ uint32_t si : 1; /**< [ 6: 6](R/W) IRQ vector catch enable in Secure state.
+ The exception vector offset is 0x18. */
+ uint32_t sf : 1; /**< [ 7: 7](R/W) FIQ vector catch enable in Secure state.
+ The exception vector offset is 0x1C. */
+ uint32_t reserved_8_24 : 17;
+ uint32_t nsu : 1; /**< [ 25: 25](R/W) Undefined Instruction vector catch enable in nonsecure state.
+ The exception vector offset is 0x04. */
+ uint32_t nss : 1; /**< [ 26: 26](R/W) Supervisor Call (SVC) vector catch enable in nonsecure state.
+ The exception vector offset is 0x08. */
+ uint32_t nsp : 1; /**< [ 27: 27](R/W) Prefetch Abort vector catch enable in nonsecure state.
+ The exception vector offset is 0x0C. */
+ uint32_t nsd : 1; /**< [ 28: 28](R/W) Data Abort vector catch enable in nonsecure state.
+ The exception vector offset is 0x10. */
+ uint32_t reserved_29 : 1;
+ uint32_t nsi : 1; /**< [ 30: 30](R/W) IRQ vector catch enable in nonsecure state.
+ The exception vector offset is 0x18. */
+ uint32_t nsf : 1; /**< [ 31: 31](R/W) FIQ vector catch enable in nonsecure state.
+ The exception vector offset is 0x1C. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgvcr32_el2_s cn8; */
+ struct bdk_ap_dbgvcr32_el2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nsf : 1; /**< [ 31: 31](RAZ) FIQ vector catch enable in nonsecure state.
+ The exception vector offset is 0x1C. */
+ uint32_t nsi : 1; /**< [ 30: 30](RAZ) IRQ vector catch enable in nonsecure state.
+ The exception vector offset is 0x18. */
+ uint32_t reserved_29 : 1;
+ uint32_t nsd : 1; /**< [ 28: 28](RAZ) Data Abort vector catch enable in nonsecure state.
+ The exception vector offset is 0x10. */
+ uint32_t nsp : 1; /**< [ 27: 27](RAZ) Prefetch Abort vector catch enable in nonsecure state.
+ The exception vector offset is 0x0C. */
+ uint32_t nss : 1; /**< [ 26: 26](RAZ) Supervisor Call (SVC) vector catch enable in nonsecure state.
+ The exception vector offset is 0x08. */
+ uint32_t nsu : 1; /**< [ 25: 25](RAZ) Undefined Instruction vector catch enable in nonsecure state.
+ The exception vector offset is 0x04. */
+ uint32_t reserved_8_24 : 17;
+ uint32_t sf : 1; /**< [ 7: 7](RAZ) FIQ vector catch enable in Secure state.
+ The exception vector offset is 0x1C. */
+ uint32_t si : 1; /**< [ 6: 6](RAZ) IRQ vector catch enable in Secure state.
+ The exception vector offset is 0x18. */
+ uint32_t reserved_5 : 1;
+ uint32_t sd : 1; /**< [ 4: 4](RAZ) Data Abort vector catch enable in Secure state.
+ The exception vector offset is 0x10. */
+ uint32_t sp : 1; /**< [ 3: 3](RAZ) Prefetch Abort vector catch enable in Secure state.
+ The exception vector offset is 0x0C. */
+ uint32_t ss : 1; /**< [ 2: 2](RAZ) Supervisor Call (SVC) vector catch enable in Secure state.
+ The exception vector offset is 0x08. */
+ uint32_t su : 1; /**< [ 1: 1](RAZ) Undefined Instruction vector catch enable in Secure state.
+ The exception vector offset is 0x04. */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t su : 1; /**< [ 1: 1](RAZ) Undefined Instruction vector catch enable in Secure state.
+ The exception vector offset is 0x04. */
+ uint32_t ss : 1; /**< [ 2: 2](RAZ) Supervisor Call (SVC) vector catch enable in Secure state.
+ The exception vector offset is 0x08. */
+ uint32_t sp : 1; /**< [ 3: 3](RAZ) Prefetch Abort vector catch enable in Secure state.
+ The exception vector offset is 0x0C. */
+ uint32_t sd : 1; /**< [ 4: 4](RAZ) Data Abort vector catch enable in Secure state.
+ The exception vector offset is 0x10. */
+ uint32_t reserved_5 : 1;
+ uint32_t si : 1; /**< [ 6: 6](RAZ) IRQ vector catch enable in Secure state.
+ The exception vector offset is 0x18. */
+ uint32_t sf : 1; /**< [ 7: 7](RAZ) FIQ vector catch enable in Secure state.
+ The exception vector offset is 0x1C. */
+ uint32_t reserved_8_24 : 17;
+ uint32_t nsu : 1; /**< [ 25: 25](RAZ) Undefined Instruction vector catch enable in nonsecure state.
+ The exception vector offset is 0x04. */
+ uint32_t nss : 1; /**< [ 26: 26](RAZ) Supervisor Call (SVC) vector catch enable in nonsecure state.
+ The exception vector offset is 0x08. */
+ uint32_t nsp : 1; /**< [ 27: 27](RAZ) Prefetch Abort vector catch enable in nonsecure state.
+ The exception vector offset is 0x0C. */
+ uint32_t nsd : 1; /**< [ 28: 28](RAZ) Data Abort vector catch enable in nonsecure state.
+ The exception vector offset is 0x10. */
+ uint32_t reserved_29 : 1;
+ uint32_t nsi : 1; /**< [ 30: 30](RAZ) IRQ vector catch enable in nonsecure state.
+ The exception vector offset is 0x18. */
+ uint32_t nsf : 1; /**< [ 31: 31](RAZ) FIQ vector catch enable in nonsecure state.
+ The exception vector offset is 0x1C. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_dbgvcr32_el2 bdk_ap_dbgvcr32_el2_t;
+
+#define BDK_AP_DBGVCR32_EL2 BDK_AP_DBGVCR32_EL2_FUNC()
+static inline uint64_t BDK_AP_DBGVCR32_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGVCR32_EL2_FUNC(void)
+{
+ return 0x20400070000ll;
+}
+
+#define typedef_BDK_AP_DBGVCR32_EL2 bdk_ap_dbgvcr32_el2_t
+#define bustype_BDK_AP_DBGVCR32_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGVCR32_EL2 "AP_DBGVCR32_EL2"
+#define busnum_BDK_AP_DBGVCR32_EL2 0
+#define arguments_BDK_AP_DBGVCR32_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgwcr#_el1
+ *
+ * AP Debug Watchpoint Control Registers
+ * Holds control information for a watchpoint. Forms watchpoint n
+ * together with value register DBGWVR\<n\>_EL1, where n is 0 to
+ * 15.
+ */
+union bdk_ap_dbgwcrx_el1
+{
+ uint32_t u;
+ struct bdk_ap_dbgwcrx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t mask : 5; /**< [ 28: 24](R/W) Address mask. Only objects up to 2GB can be watched using a
+ single mask.
+ Other values mask the corresponding number of address bits.
+
+ 0x0 = No mask.
+ 0x1 = Reserved.
+ 0x2 = Reserved. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t wt : 1; /**< [ 20: 20](R/W) Watchpoint type.
+ 0 = Unlinked data address match.
+ 1 = Linked data address match. */
+ uint32_t lbn : 4; /**< [ 19: 16](R/W) Linked breakpoint number. For Linked data address watchpoints,
+ this specifies the index of the Context-matching breakpoint
+ linked to. */
+ uint32_t ssc : 2; /**< [ 15: 14](R/W) Security state control. Determines the Security states under
+ which a watchpoint debug event for watchpoint n is generated.
+ This field must be interpreted along with the HMC and PAC
+ fields. */
+ uint32_t hmc : 1; /**< [ 13: 13](R/W) Higher mode control. Determines the debug perspective for
+ deciding when a watchpoint debug event for watchpoint n is
+ generated. This field must be interpreted along with the SSC
+ and PAC fields. */
+ uint32_t bas : 8; /**< [ 12: 5](R/W) Byte address select. Each bit of this field selects whether a
+ byte from within the word or double-word addressed by
+ DBGWVR\<n\>_EL1 is being watched.
+
+ xxxxxxx1 Match byte at DBGWVR\<n\>_EL1
+ xxxxxx1x Match byte at DBGWVR\<n\>_EL1+1
+ xxxxx1xx Match byte at DBGWVR\<n\>_EL1+2
+ xxxx1xxx Match byte at DBGWVR\<n\>_EL1+3
+
+ In cases where DBGWVR\<n\>_EL1 addresses a double-word:
+ BAS Description, if DBGWVR\<n\>_EL1[2] == 0
+
+ xxx1xxxx Match byte at DBGWVR\<n\>_EL1+4
+ xx1xxxxx Match byte at DBGWVR\<n\>_EL1+5
+ x1xxxxxx Match byte at DBGWVR\<n\>_EL1+6
+ 1xxxxxxx Match byte at DBGWVR\<n\>_EL1+7
+
+ If DBGWVR\<n\>_EL1[2] == 1, only BAS[3:0] is used. ARM
+ deprecates setting DBGWVR\<n\>_EL1 == 1.
+ If BAS is zero, no bytes are watched by this watchpoint.
+ Ignored if E is 0. */
+ uint32_t lsc : 2; /**< [ 4: 3](R/W) Load/store control. This field enables watchpoint matching on
+ the type of access being made.
+ All other values are reserved, but must behave as if the
+ watchpoint is disabled. Software must not rely on this
+ property as the behavior of reserved values might change in a
+ future revision of the architecture.
+ Ignored if E is 0.
+ 0x1 = Match instructions that load from a watchpointed address.
+ 0x2 = Match instructions that store to a watchpointed address.
+ 0x3 = Match instructions that load from or store to a watchpointed
+ address. */
+ uint32_t pac : 2; /**< [ 2: 1](R/W) Privilege of access control. Determines the Exception level or
+ levels at which a watchpoint debug event for watchpoint n is
+ generated. This field must be interpreted along with the SSC
+ and HMC fields. */
+ uint32_t ee : 1; /**< [ 0: 0](R/W) Enable watchpoint n.
+ 0 = Watchpoint disabled.
+ 1 = Watchpoint enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t ee : 1; /**< [ 0: 0](R/W) Enable watchpoint n.
+ 0 = Watchpoint disabled.
+ 1 = Watchpoint enabled. */
+ uint32_t pac : 2; /**< [ 2: 1](R/W) Privilege of access control. Determines the Exception level or
+ levels at which a watchpoint debug event for watchpoint n is
+ generated. This field must be interpreted along with the SSC
+ and HMC fields. */
+ uint32_t lsc : 2; /**< [ 4: 3](R/W) Load/store control. This field enables watchpoint matching on
+ the type of access being made.
+ All other values are reserved, but must behave as if the
+ watchpoint is disabled. Software must not rely on this
+ property as the behavior of reserved values might change in a
+ future revision of the architecture.
+ Ignored if E is 0.
+ 0x1 = Match instructions that load from a watchpointed address.
+ 0x2 = Match instructions that store to a watchpointed address.
+ 0x3 = Match instructions that load from or store to a watchpointed
+ address. */
+ uint32_t bas : 8; /**< [ 12: 5](R/W) Byte address select. Each bit of this field selects whether a
+ byte from within the word or double-word addressed by
+ DBGWVR\<n\>_EL1 is being watched.
+
+ xxxxxxx1 Match byte at DBGWVR\<n\>_EL1
+ xxxxxx1x Match byte at DBGWVR\<n\>_EL1+1
+ xxxxx1xx Match byte at DBGWVR\<n\>_EL1+2
+ xxxx1xxx Match byte at DBGWVR\<n\>_EL1+3
+
+ In cases where DBGWVR\<n\>_EL1 addresses a double-word:
+ BAS Description, if DBGWVR\<n\>_EL1[2] == 0
+
+ xxx1xxxx Match byte at DBGWVR\<n\>_EL1+4
+ xx1xxxxx Match byte at DBGWVR\<n\>_EL1+5
+ x1xxxxxx Match byte at DBGWVR\<n\>_EL1+6
+ 1xxxxxxx Match byte at DBGWVR\<n\>_EL1+7
+
+ If DBGWVR\<n\>_EL1[2] == 1, only BAS[3:0] is used. ARM
+ deprecates setting DBGWVR\<n\>_EL1 == 1.
+ If BAS is zero, no bytes are watched by this watchpoint.
+ Ignored if E is 0. */
+ uint32_t hmc : 1; /**< [ 13: 13](R/W) Higher mode control. Determines the debug perspective for
+ deciding when a watchpoint debug event for watchpoint n is
+ generated. This field must be interpreted along with the SSC
+ and PAC fields. */
+ uint32_t ssc : 2; /**< [ 15: 14](R/W) Security state control. Determines the Security states under
+ which a watchpoint debug event for watchpoint n is generated.
+ This field must be interpreted along with the HMC and PAC
+ fields. */
+ uint32_t lbn : 4; /**< [ 19: 16](R/W) Linked breakpoint number. For Linked data address watchpoints,
+ this specifies the index of the Context-matching breakpoint
+ linked to. */
+ uint32_t wt : 1; /**< [ 20: 20](R/W) Watchpoint type.
+ 0 = Unlinked data address match.
+ 1 = Linked data address match. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t mask : 5; /**< [ 28: 24](R/W) Address mask. Only objects up to 2GB can be watched using a
+ single mask.
+ Other values mask the corresponding number of address bits.
+
+ 0x0 = No mask.
+ 0x1 = Reserved.
+ 0x2 = Reserved. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dbgwcrx_el1_s cn; */
+};
+typedef union bdk_ap_dbgwcrx_el1 bdk_ap_dbgwcrx_el1_t;
+
+static inline uint64_t BDK_AP_DBGWCRX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGWCRX_EL1(unsigned long a)
+{
+ if (a<=15)
+ return 0x20000000700ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_DBGWCRX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_DBGWCRX_EL1(a) bdk_ap_dbgwcrx_el1_t
+#define bustype_BDK_AP_DBGWCRX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGWCRX_EL1(a) "AP_DBGWCRX_EL1"
+#define busnum_BDK_AP_DBGWCRX_EL1(a) (a)
+#define arguments_BDK_AP_DBGWCRX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dbgwvr#_el1
+ *
+ * AP Debug Watchpoint Value Registers
+ * Holds a data address value for use in watchpoint matching.
+ * Forms watchpoint n together with control register
+ * DBGWCR\<n\>_EL1, where n is 0 to 15.
+ */
+union bdk_ap_dbgwvrx_el1
+{
+ uint64_t u;
+ struct bdk_ap_dbgwvrx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_dbgwvrx_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ress : 15; /**< [ 63: 49](R/W) Reserved, Sign extended. Hardwired to the value of the sign
+ bit, bit [48]. Hardware and software must treat this field as
+ RES0 if bit[48] is 0, and as RES1 if bit[48] is 1. */
+ uint64_t va : 47; /**< [ 48: 2](R/W) Bits[48:2] of the address value for comparison.
+ ARM deprecates setting DBGWVR\<n\>_EL1[2] == 1. */
+ uint64_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_1 : 2;
+ uint64_t va : 47; /**< [ 48: 2](R/W) Bits[48:2] of the address value for comparison.
+ ARM deprecates setting DBGWVR\<n\>_EL1[2] == 1. */
+ uint64_t ress : 15; /**< [ 63: 49](R/W) Reserved, Sign extended. Hardwired to the value of the sign
+ bit, bit [48]. Hardware and software must treat this field as
+ RES0 if bit[48] is 0, and as RES1 if bit[48] is 1. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_dbgwvrx_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ress : 11; /**< [ 63: 53](R/W) Reserved, Sign extended. Hardwired to the value of the sign
+ bit, bit [52]. Hardware and software must treat this field as
+ RES0 if bit[52] is 0, and as RES1 if bit[52] is 1. */
+ uint64_t va : 51; /**< [ 52: 2](R/W) Bits[52:2] of the address value for comparison.
+ ARM deprecates setting DBGWVR\<n\>_EL1[2] == 1. */
+ uint64_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_1 : 2;
+ uint64_t va : 51; /**< [ 52: 2](R/W) Bits[52:2] of the address value for comparison.
+ ARM deprecates setting DBGWVR\<n\>_EL1[2] == 1. */
+ uint64_t ress : 11; /**< [ 63: 53](R/W) Reserved, Sign extended. Hardwired to the value of the sign
+ bit, bit [52]. Hardware and software must treat this field as
+ RES0 if bit[52] is 0, and as RES1 if bit[52] is 1. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_dbgwvrx_el1 bdk_ap_dbgwvrx_el1_t;
+
+static inline uint64_t BDK_AP_DBGWVRX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DBGWVRX_EL1(unsigned long a)
+{
+ if (a<=15)
+ return 0x20000000600ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_DBGWVRX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_DBGWVRX_EL1(a) bdk_ap_dbgwvrx_el1_t
+#define bustype_BDK_AP_DBGWVRX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DBGWVRX_EL1(a) "AP_DBGWVRX_EL1"
+#define busnum_BDK_AP_DBGWVRX_EL1(a) (a)
+#define arguments_BDK_AP_DBGWVRX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dczid_el0
+ *
+ * AP Data Cache Zero ID Register
+ * This register indicates the block size that is written with byte values of 0 by the
+ * DC ZVA (Data Cache Zero by Address) system instruction.
+ */
+union bdk_ap_dczid_el0
+{
+ uint32_t u;
+ struct bdk_ap_dczid_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t dzp : 1; /**< [ 4: 4](RO) Data Zero prohibited.
+ The value read from this field is governed by the access state
+ and the values of the AP_HCR_EL2[TDZ] and AP_SCTLR_EL1[DZE] bits.
+ 0 = DC ZVA instruction is permitted.
+ 1 = DC ZVA instruction is prohibited. */
+ uint32_t bs : 4; /**< [ 3: 0](RO) Log2 of the block size in words. The maximum size supported is 2KB (value == 9).
+
+ In CNXXXX, 128 bytes. */
+#else /* Word 0 - Little Endian */
+ uint32_t bs : 4; /**< [ 3: 0](RO) Log2 of the block size in words. The maximum size supported is 2KB (value == 9).
+
+ In CNXXXX, 128 bytes. */
+ uint32_t dzp : 1; /**< [ 4: 4](RO) Data Zero prohibited.
+ The value read from this field is governed by the access state
+ and the values of the AP_HCR_EL2[TDZ] and AP_SCTLR_EL1[DZE] bits.
+ 0 = DC ZVA instruction is permitted.
+ 1 = DC ZVA instruction is prohibited. */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dczid_el0_s cn; */
+};
+typedef union bdk_ap_dczid_el0 bdk_ap_dczid_el0_t;
+
+#define BDK_AP_DCZID_EL0 BDK_AP_DCZID_EL0_FUNC()
+static inline uint64_t BDK_AP_DCZID_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DCZID_EL0_FUNC(void)
+{
+ return 0x30300000700ll;
+}
+
+#define typedef_BDK_AP_DCZID_EL0 bdk_ap_dczid_el0_t
+#define bustype_BDK_AP_DCZID_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DCZID_EL0 "AP_DCZID_EL0"
+#define busnum_BDK_AP_DCZID_EL0 0
+#define arguments_BDK_AP_DCZID_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_disr_el1
+ *
+ * AP Deferred Interrupt Status Register
+ * Records that an SError interrupt has been consumed by an ESB instruction.
+ *
+ * Usage constraints:
+ * DISR_EL1 is UNDEFINED at EL0.
+ * Direct reads and writes of DISR_EL1:
+ * - If EL2 is implemented and HCR_EL2.AMO is set to 1 access VDISR_EL2 at Non-secure EL1.
+ * - If EL3 is implemented and SCR_EL3.EA == 1, are RAZ/WI at EL2, Secure EL1, and, if they
+ * do not access VDISR_EL2, Non-secure EL1.
+ * An indirect write to DISR_EL1 made by an ESB instruction does not require an explicit
+ * synchronization operation for the value written to be observed by a direct read of DISR_EL1
+ * occurring in program order after the ESB.
+ */
+union bdk_ap_disr_el1
+{
+ uint64_t u;
+ struct bdk_ap_disr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t aa : 1; /**< [ 31: 31](R/W) Set to 1 when ESB defers an asynchronous SError interrupt. If the implementation does not
+ include any synchronizable sources of SError interrupt, this bit is RES0. */
+ uint64_t reserved_25_30 : 6;
+ uint64_t ids : 1; /**< [ 24: 24](R/W) Indicates whether the deferred SError interrupt was of an implementation defined type.
+ 0 = Deferred error uses architecturally-defined format.
+ 1 = Deferred error uses implementation defined format. ISS, bits [23:0], when
+ IDS = 1 contain an implementation defined SError interrupt syndrome. See the
+ description of ESR_ELx[23:0] for an SError interrupt. */
+ uint64_t reserved_13_23 : 11;
+ uint64_t aet : 3; /**< [ 12: 10](R/W) Asynchronous error type. See the description of ESR_ELx.AET for an SError interrupt. */
+ uint64_t ea : 1; /**< [ 9: 9](R/W) External abort type. See the description of ESR_ELx.EA for an SError interrupt. */
+ uint64_t reserved_6_8 : 3;
+ uint64_t dfsc : 6; /**< [ 5: 0](R/W) Fault status code. See the description of ESR_ELx.DFSC for an SError interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfsc : 6; /**< [ 5: 0](R/W) Fault status code. See the description of ESR_ELx.DFSC for an SError interrupt. */
+ uint64_t reserved_6_8 : 3;
+ uint64_t ea : 1; /**< [ 9: 9](R/W) External abort type. See the description of ESR_ELx.EA for an SError interrupt. */
+ uint64_t aet : 3; /**< [ 12: 10](R/W) Asynchronous error type. See the description of ESR_ELx.AET for an SError interrupt. */
+ uint64_t reserved_13_23 : 11;
+ uint64_t ids : 1; /**< [ 24: 24](R/W) Indicates whether the deferred SError interrupt was of an implementation defined type.
+ 0 = Deferred error uses architecturally-defined format.
+ 1 = Deferred error uses implementation defined format. ISS, bits [23:0], when
+ IDS = 1 contain an implementation defined SError interrupt syndrome. See the
+ description of ESR_ELx[23:0] for an SError interrupt. */
+ uint64_t reserved_25_30 : 6;
+ uint64_t aa : 1; /**< [ 31: 31](R/W) Set to 1 when ESB defers an asynchronous SError interrupt. If the implementation does not
+ include any synchronizable sources of SError interrupt, this bit is RES0. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_disr_el1_s cn; */
+};
+typedef union bdk_ap_disr_el1 bdk_ap_disr_el1_t;
+
+#define BDK_AP_DISR_EL1 BDK_AP_DISR_EL1_FUNC()
+static inline uint64_t BDK_AP_DISR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DISR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000c010100ll;
+ __bdk_csr_fatal("AP_DISR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_DISR_EL1 bdk_ap_disr_el1_t
+#define bustype_BDK_AP_DISR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DISR_EL1 "AP_DISR_EL1"
+#define busnum_BDK_AP_DISR_EL1 0
+#define arguments_BDK_AP_DISR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dlr_el0
+ *
+ * AP Debug Link Register
+ * In Debug state, holds the address to restart from.
+ */
+union bdk_ap_dlr_el0
+{
+ uint64_t u;
+ struct bdk_ap_dlr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Restart address. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Restart address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_dlr_el0_s cn; */
+};
+typedef union bdk_ap_dlr_el0 bdk_ap_dlr_el0_t;
+
+#define BDK_AP_DLR_EL0 BDK_AP_DLR_EL0_FUNC()
+static inline uint64_t BDK_AP_DLR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DLR_EL0_FUNC(void)
+{
+ return 0x30304050100ll;
+}
+
+#define typedef_BDK_AP_DLR_EL0 bdk_ap_dlr_el0_t
+#define bustype_BDK_AP_DLR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DLR_EL0 "AP_DLR_EL0"
+#define busnum_BDK_AP_DLR_EL0 0
+#define arguments_BDK_AP_DLR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_dspsr_el0
+ *
+ * AP Debug Saved Program Status Register
+ * Holds the saved processor state on entry to debug state.
+ */
+union bdk_ap_dspsr_el0
+{
+ uint32_t u;
+ struct bdk_ap_dspsr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Set to the value of CPSR[N] on entering Debug state, and copied
+ to CPSR[N] on exiting Debug state. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Set to the value of CPSR[Z] on entering Debug state, and copied
+ to CPSR[Z] on exiting Debug state. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Set to the value of CPSR[C] on entering Debug state, and copied
+ to CPSR[C] on exiting Debug state. */
+ uint32_t v : 1; /**< [ 28: 28](R/W) Set to the value of CPSR[V] on entering Debug state, and copied
+ to CPSR[V] on exiting Debug state. */
+ uint32_t reserved_24_27 : 4;
+ uint32_t uao : 1; /**< [ 23: 23](R/W) User access override. */
+ uint32_t reserved_22 : 1;
+ uint32_t ss : 1; /**< [ 21: 21](R/W) Software step. Indicates whether software step was enabled when
+ Debug state was entered. */
+ uint32_t il : 1; /**< [ 20: 20](R/W) Illegal Execution State bit. Shows the value of PSTATE[IL]
+ immediately before Debug state was entered. */
+ uint32_t reserved_10_19 : 10;
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Interrupt masks - can also be accessed as PSTATE.[D,A,I,F] */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) Asynchronous data abort mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) IRQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t f : 1; /**< [ 6: 6](R/W) FIQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t reserved_5 : 1;
+ uint32_t nrw : 1; /**< [ 4: 4](R/W) Current register width: 0 = AArch64, 1 = AArch32. */
+ uint32_t el : 2; /**< [ 3: 2](R/W) Current exception level: 0x0 = EL0, 0x1 = EL1, 0x2 = EL2, 0x3 = EL3. */
+ uint32_t reserved_1 : 1;
+ uint32_t sp : 1; /**< [ 0: 0](R/W) AArch64 only - Stack Pointer selection - 0 - SP0, 1 - SPx. */
+#else /* Word 0 - Little Endian */
+ uint32_t sp : 1; /**< [ 0: 0](R/W) AArch64 only - Stack Pointer selection - 0 - SP0, 1 - SPx. */
+ uint32_t reserved_1 : 1;
+ uint32_t el : 2; /**< [ 3: 2](R/W) Current exception level: 0x0 = EL0, 0x1 = EL1, 0x2 = EL2, 0x3 = EL3. */
+ uint32_t nrw : 1; /**< [ 4: 4](R/W) Current register width: 0 = AArch64, 1 = AArch32. */
+ uint32_t reserved_5 : 1;
+ uint32_t f : 1; /**< [ 6: 6](R/W) FIQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) IRQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) Asynchronous data abort mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Interrupt masks - can also be accessed as PSTATE.[D,A,I,F] */
+ uint32_t reserved_10_19 : 10;
+ uint32_t il : 1; /**< [ 20: 20](R/W) Illegal Execution State bit. Shows the value of PSTATE[IL]
+ immediately before Debug state was entered. */
+ uint32_t ss : 1; /**< [ 21: 21](R/W) Software step. Indicates whether software step was enabled when
+ Debug state was entered. */
+ uint32_t reserved_22 : 1;
+ uint32_t uao : 1; /**< [ 23: 23](R/W) User access override. */
+ uint32_t reserved_24_27 : 4;
+ uint32_t v : 1; /**< [ 28: 28](R/W) Set to the value of CPSR[V] on entering Debug state, and copied
+ to CPSR[V] on exiting Debug state. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Set to the value of CPSR[C] on entering Debug state, and copied
+ to CPSR[C] on exiting Debug state. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Set to the value of CPSR[Z] on entering Debug state, and copied
+ to CPSR[Z] on exiting Debug state. */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Set to the value of CPSR[N] on entering Debug state, and copied
+ to CPSR[N] on exiting Debug state. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_dspsr_el0_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Set to the value of CPSR[N] on entering Debug state, and copied
+ to CPSR[N] on exiting Debug state. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Set to the value of CPSR[Z] on entering Debug state, and copied
+ to CPSR[Z] on exiting Debug state. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Set to the value of CPSR[C] on entering Debug state, and copied
+ to CPSR[C] on exiting Debug state. */
+ uint32_t v : 1; /**< [ 28: 28](R/W) Set to the value of CPSR[V] on entering Debug state, and copied
+ to CPSR[V] on exiting Debug state. */
+ uint32_t reserved_22_27 : 6;
+ uint32_t ss : 1; /**< [ 21: 21](R/W) Software step. Indicates whether software step was enabled when
+ Debug state was entered. */
+ uint32_t il : 1; /**< [ 20: 20](R/W) Illegal Execution State bit. Shows the value of PSTATE[IL]
+ immediately before Debug state was entered. */
+ uint32_t reserved_10_19 : 10;
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Interrupt masks - can also be accessed as PSTATE.[D,A,I,F] */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) Asynchronous data abort mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) IRQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t f : 1; /**< [ 6: 6](R/W) FIQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t reserved_5 : 1;
+ uint32_t nrw : 1; /**< [ 4: 4](R/W) Current register width: 0 = AArch64, 1 = AArch32. */
+ uint32_t el : 2; /**< [ 3: 2](R/W) Current exception level: 0x0 = EL0, 0x1 = EL1, 0x2 = EL2, 0x3 = EL3. */
+ uint32_t reserved_1 : 1;
+ uint32_t sp : 1; /**< [ 0: 0](R/W) AArch64 only - Stack Pointer selection - 0 - SP0, 1 - SPx. */
+#else /* Word 0 - Little Endian */
+ uint32_t sp : 1; /**< [ 0: 0](R/W) AArch64 only - Stack Pointer selection - 0 - SP0, 1 - SPx. */
+ uint32_t reserved_1 : 1;
+ uint32_t el : 2; /**< [ 3: 2](R/W) Current exception level: 0x0 = EL0, 0x1 = EL1, 0x2 = EL2, 0x3 = EL3. */
+ uint32_t nrw : 1; /**< [ 4: 4](R/W) Current register width: 0 = AArch64, 1 = AArch32. */
+ uint32_t reserved_5 : 1;
+ uint32_t f : 1; /**< [ 6: 6](R/W) FIQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) IRQ mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) Asynchronous data abort mask bit.
+ 0 = Exception not masked.
+ 1 = Exception masked. */
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Interrupt masks - can also be accessed as PSTATE.[D,A,I,F] */
+ uint32_t reserved_10_19 : 10;
+ uint32_t il : 1; /**< [ 20: 20](R/W) Illegal Execution State bit. Shows the value of PSTATE[IL]
+ immediately before Debug state was entered. */
+ uint32_t ss : 1; /**< [ 21: 21](R/W) Software step. Indicates whether software step was enabled when
+ Debug state was entered. */
+ uint32_t reserved_22_27 : 6;
+ uint32_t v : 1; /**< [ 28: 28](R/W) Set to the value of CPSR[V] on entering Debug state, and copied
+ to CPSR[V] on exiting Debug state. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Set to the value of CPSR[C] on entering Debug state, and copied
+ to CPSR[C] on exiting Debug state. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Set to the value of CPSR[Z] on entering Debug state, and copied
+ to CPSR[Z] on exiting Debug state. */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Set to the value of CPSR[N] on entering Debug state, and copied
+ to CPSR[N] on exiting Debug state. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_dspsr_el0_s cn9; */
+};
+typedef union bdk_ap_dspsr_el0 bdk_ap_dspsr_el0_t;
+
+#define BDK_AP_DSPSR_EL0 BDK_AP_DSPSR_EL0_FUNC()
+static inline uint64_t BDK_AP_DSPSR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_DSPSR_EL0_FUNC(void)
+{
+ return 0x30304050000ll;
+}
+
+#define typedef_BDK_AP_DSPSR_EL0 bdk_ap_dspsr_el0_t
+#define bustype_BDK_AP_DSPSR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_DSPSR_EL0 "AP_DSPSR_EL0"
+#define busnum_BDK_AP_DSPSR_EL0 0
+#define arguments_BDK_AP_DSPSR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_elr_el#
+ *
+ * AP Exception Link Register
+ * Return address for exception
+ */
+union bdk_ap_elr_elx
+{
+ uint64_t u;
+ struct bdk_ap_elr_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t address : 64; /**< [ 63: 0](R/W) Return address for exception. */
+#else /* Word 0 - Little Endian */
+ uint64_t address : 64; /**< [ 63: 0](R/W) Return address for exception. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_elr_elx_s cn; */
+};
+typedef union bdk_ap_elr_elx bdk_ap_elr_elx_t;
+
+static inline uint64_t BDK_AP_ELR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ELR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=3))
+ return 0x30004000100ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_ELR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ELR_ELX(a) bdk_ap_elr_elx_t
+#define bustype_BDK_AP_ELR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ELR_ELX(a) "AP_ELR_ELX"
+#define busnum_BDK_AP_ELR_ELX(a) (a)
+#define arguments_BDK_AP_ELR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_elr_el12
+ *
+ * AP Exception Link EL2/3 Alias Register
+ * Alias to allow EL2/3 access to ELR_EL1 when AP_HCR_EL2[E2H]==1.
+ */
+union bdk_ap_elr_el12
+{
+ uint64_t u;
+ struct bdk_ap_elr_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t address : 64; /**< [ 63: 0](R/W) Return address for exception. */
+#else /* Word 0 - Little Endian */
+ uint64_t address : 64; /**< [ 63: 0](R/W) Return address for exception. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_elr_el12_s cn; */
+};
+typedef union bdk_ap_elr_el12 bdk_ap_elr_el12_t;
+
+#define BDK_AP_ELR_EL12 BDK_AP_ELR_EL12_FUNC()
+static inline uint64_t BDK_AP_ELR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ELR_EL12_FUNC(void)
+{
+ return 0x30504000100ll;
+}
+
+#define typedef_BDK_AP_ELR_EL12 bdk_ap_elr_el12_t
+#define bustype_BDK_AP_ELR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ELR_EL12 "AP_ELR_EL12"
+#define busnum_BDK_AP_ELR_EL12 0
+#define arguments_BDK_AP_ELR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_erridr_el1
+ *
+ * AP Error Record ID Register
+ * Defines the number of error records that can be accessed through the Error Record system
+ * registers.
+ *
+ * Usage constraints:
+ * AP_ERRIDR_EL1 is UNDEFINED at EL0.
+ * If EL2 is implemented and AP_HCR_EL2[TERR] == 1, then direct reads of AP_ERRIDR_EL1 at
+ * nonsecure
+ * EL1 generate a Trap exception to EL2.
+ * If EL3 is implemented and AP_SCR_EL3[TERR] == 1, then direct reads of AP_ERRIDR_EL1 at EL1
+ * and EL2
+ * generate a Trap exception to EL3.
+ */
+union bdk_ap_erridr_el1
+{
+ uint64_t u;
+ struct bdk_ap_erridr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t num : 16; /**< [ 15: 0](RO) Number of records that can be accessed through the error record system registers. Each
+ record is notionally owned by a node. A node might own multiple records. */
+#else /* Word 0 - Little Endian */
+ uint64_t num : 16; /**< [ 15: 0](RO) Number of records that can be accessed through the error record system registers. Each
+ record is notionally owned by a node. A node might own multiple records. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_erridr_el1_s cn; */
+};
+typedef union bdk_ap_erridr_el1 bdk_ap_erridr_el1_t;
+
+#define BDK_AP_ERRIDR_EL1 BDK_AP_ERRIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_ERRIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ERRIDR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30005030000ll;
+ __bdk_csr_fatal("AP_ERRIDR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ERRIDR_EL1 bdk_ap_erridr_el1_t
+#define bustype_BDK_AP_ERRIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ERRIDR_EL1 "AP_ERRIDR_EL1"
+#define busnum_BDK_AP_ERRIDR_EL1 0
+#define arguments_BDK_AP_ERRIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_errselr_el1
+ *
+ * AP Error Record Select Register
+ * ERRSELR_EL1 is UNDEFINED at EL0.
+ * If EL2 is implemented and HCR_EL2.TERR == 1, then direct reads and writes of ERRSELR_EL1 at
+ * Non-secure EL1 generate a Trap exception to EL2.
+ * If EL3 is implemented and SCR_EL3.TERR == 1, then direct reads and writes of ERRSELR_EL1 at
+ * EL1 and EL2 generate a Trap exception to EL3.
+ * If ERRIDR_EL1 indicates that zero records are implemented, ERRSELR_EL1 might be UNDEFINED or
+ * RES0.
+ */
+union bdk_ap_errselr_el1
+{
+ uint64_t u;
+ struct bdk_ap_errselr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t sel : 16; /**< [ 15: 0](R/W) Selects the record accessed through the ERX registers.
+ For example, if AP_ERRSELR_EL1[SEL] is set to 4, then reads and writes of AP_ERXSTATUS_EL1
+ access
+ ERR4STATUS.
+ If AP_ERRSELR_EL1[SEL] is set to a value greater than or equal to AP_ERRIDR_EL1[NUM] then:
+ - The value read back from AP_ERRSELR_EL1[SEL] is UNKNOWN.
+ - One of:
+ - An UNKNOWN record is selected.
+ - The ERX* registers are RAZ/WI.
+ - ERX* register reads and writes are NOPs.
+ - ERX* register reads and writes are UNDEFINED.
+ Note: The ARM preferred behavior if one or more records are implemented is:
+ - SEL is implemented as an N-bit field, where N is the smallest value such that
+ ERRIDR_EL1.NUM . 2N.
+ - If the value written to SEL modulo 2N is greater than or equal to ERRIDR_EL1.NUM,
+ a dummy RAZ/WI record is selected.
+ If zero records are implemented, the ARM preferred behavior is for ERRSELR_EL1 and
+ ERX* to be undefined. */
+#else /* Word 0 - Little Endian */
+ uint64_t sel : 16; /**< [ 15: 0](R/W) Selects the record accessed through the ERX registers.
+ For example, if AP_ERRSELR_EL1[SEL] is set to 4, then reads and writes of AP_ERXSTATUS_EL1
+ access
+ ERR4STATUS.
+ If AP_ERRSELR_EL1[SEL] is set to a value greater than or equal to AP_ERRIDR_EL1[NUM] then:
+ - The value read back from AP_ERRSELR_EL1[SEL] is UNKNOWN.
+ - One of:
+ - An UNKNOWN record is selected.
+ - The ERX* registers are RAZ/WI.
+ - ERX* register reads and writes are NOPs.
+ - ERX* register reads and writes are UNDEFINED.
+ Note: The ARM preferred behavior if one or more records are implemented is:
+ - SEL is implemented as an N-bit field, where N is the smallest value such that
+ ERRIDR_EL1.NUM . 2N.
+ - If the value written to SEL modulo 2N is greater than or equal to ERRIDR_EL1.NUM,
+ a dummy RAZ/WI record is selected.
+ If zero records are implemented, the ARM preferred behavior is for ERRSELR_EL1 and
+ ERX* to be undefined. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_errselr_el1_s cn; */
+};
+typedef union bdk_ap_errselr_el1 bdk_ap_errselr_el1_t;
+
+#define BDK_AP_ERRSELR_EL1 BDK_AP_ERRSELR_EL1_FUNC()
+static inline uint64_t BDK_AP_ERRSELR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ERRSELR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30005030100ll;
+ __bdk_csr_fatal("AP_ERRSELR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ERRSELR_EL1 bdk_ap_errselr_el1_t
+#define bustype_BDK_AP_ERRSELR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ERRSELR_EL1 "AP_ERRSELR_EL1"
+#define busnum_BDK_AP_ERRSELR_EL1 0
+#define arguments_BDK_AP_ERRSELR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_erxaddr_el1
+ *
+ * AP Selected Error Record Address Register
+ * Accesses the ERR\<n\>ADDR address register for the error record selected by ERRSELR_EL1.SEL.
+ *
+ * Usage constraints as described in AP_ERXFR_EL1.
+ */
+union bdk_ap_erxaddr_el1
+{
+ uint64_t u;
+ struct bdk_ap_erxaddr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_erxaddr_el1_s cn; */
+};
+typedef union bdk_ap_erxaddr_el1 bdk_ap_erxaddr_el1_t;
+
+#define BDK_AP_ERXADDR_EL1 BDK_AP_ERXADDR_EL1_FUNC()
+static inline uint64_t BDK_AP_ERXADDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ERXADDR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30005040300ll;
+ __bdk_csr_fatal("AP_ERXADDR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ERXADDR_EL1 bdk_ap_erxaddr_el1_t
+#define bustype_BDK_AP_ERXADDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ERXADDR_EL1 "AP_ERXADDR_EL1"
+#define busnum_BDK_AP_ERXADDR_EL1 0
+#define arguments_BDK_AP_ERXADDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_erxctlr_el1
+ *
+ * AP Selected Error Record Control Register
+ * Accesses the ERR\<n\>CTLR control register for the error record selected by AP_ERRSELR_EL1[SEL].
+ *
+ * Usage constraints as described in AP_ERXFR_EL1.
+ */
+union bdk_ap_erxctlr_el1
+{
+ uint64_t u;
+ struct bdk_ap_erxctlr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_erxctlr_el1_s cn; */
+};
+typedef union bdk_ap_erxctlr_el1 bdk_ap_erxctlr_el1_t;
+
+#define BDK_AP_ERXCTLR_EL1 BDK_AP_ERXCTLR_EL1_FUNC()
+static inline uint64_t BDK_AP_ERXCTLR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ERXCTLR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30005040100ll;
+ __bdk_csr_fatal("AP_ERXCTLR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ERXCTLR_EL1 bdk_ap_erxctlr_el1_t
+#define bustype_BDK_AP_ERXCTLR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ERXCTLR_EL1 "AP_ERXCTLR_EL1"
+#define busnum_BDK_AP_ERXCTLR_EL1 0
+#define arguments_BDK_AP_ERXCTLR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_erxfr_el1
+ *
+ * AP Selected Error Record Feature Register
+ * Accesses the ERR\<n\>FR feature register for the error record selected by AP_ERRSELR_EL1[SEL].
+ *
+ * Usage constraints:
+ * AP_ERXFR_EL1 is UNDEFINED at EL0.
+ * If EL2 is implemented and AP_HCR_EL2[TERR] == 1, then direct reads of AP_ERXFR_EL1 at Non-
+ * secure
+ * EL1 generate a Trap exception to EL2.
+ * If EL3 is implemented and AP_SCR_EL3[TERR] == 1, then direct reads of AP_ERXFR_EL1 at EL1
+ * and EL2
+ * generate a Trap exception to EL3.
+ * If AP_ERRIDR_EL1[NUM] == 0 or AP_ERRSELR_EL1[SEL] is set to a value greater than or equal to
+ * AP_ERRIDR_EL1[NUM] then one of:
+ * - An UNKNOWN record is selected.
+ * - AP_ERXFR_EL1 is RAZ/WI.
+ * - Direct reads of AP_ERXFR_EL1 are NOPs.
+ * - Direct reads of AP_ERXFR_EL1 are undefined.
+ */
+union bdk_ap_erxfr_el1
+{
+ uint64_t u;
+ struct bdk_ap_erxfr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_erxfr_el1_s cn; */
+};
+typedef union bdk_ap_erxfr_el1 bdk_ap_erxfr_el1_t;
+
+#define BDK_AP_ERXFR_EL1 BDK_AP_ERXFR_EL1_FUNC()
+static inline uint64_t BDK_AP_ERXFR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ERXFR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30005040000ll;
+ __bdk_csr_fatal("AP_ERXFR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ERXFR_EL1 bdk_ap_erxfr_el1_t
+#define bustype_BDK_AP_ERXFR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ERXFR_EL1 "AP_ERXFR_EL1"
+#define busnum_BDK_AP_ERXFR_EL1 0
+#define arguments_BDK_AP_ERXFR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_erxmisc0_el1
+ *
+ * AP Selected Error Record Miscellaneous Register 0
+ * Accesses the ERR\<n\>MISC0 miscellaneous register 0 for the error record selected by
+ * ERRSELR_EL1.SEL.
+ *
+ * Usage constraints as described in AP_ERXFR_EL1.
+ */
+union bdk_ap_erxmisc0_el1
+{
+ uint64_t u;
+ struct bdk_ap_erxmisc0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_erxmisc0_el1_s cn; */
+};
+typedef union bdk_ap_erxmisc0_el1 bdk_ap_erxmisc0_el1_t;
+
+#define BDK_AP_ERXMISC0_EL1 BDK_AP_ERXMISC0_EL1_FUNC()
+static inline uint64_t BDK_AP_ERXMISC0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ERXMISC0_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30005050000ll;
+ __bdk_csr_fatal("AP_ERXMISC0_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ERXMISC0_EL1 bdk_ap_erxmisc0_el1_t
+#define bustype_BDK_AP_ERXMISC0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ERXMISC0_EL1 "AP_ERXMISC0_EL1"
+#define busnum_BDK_AP_ERXMISC0_EL1 0
+#define arguments_BDK_AP_ERXMISC0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_erxmisc1_el1
+ *
+ * AP Selected Error Record Miscellaneous Register 1
+ * Accesses the ERR\<n\>MISC1 miscellaneous register 1 for the error record selected by
+ * ERRSELR_EL1.SEL.
+ *
+ * Usage constraints as described in AP_ERXFR_EL1.
+ */
+union bdk_ap_erxmisc1_el1
+{
+ uint64_t u;
+ struct bdk_ap_erxmisc1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_erxmisc1_el1_s cn; */
+};
+typedef union bdk_ap_erxmisc1_el1 bdk_ap_erxmisc1_el1_t;
+
+#define BDK_AP_ERXMISC1_EL1 BDK_AP_ERXMISC1_EL1_FUNC()
+static inline uint64_t BDK_AP_ERXMISC1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ERXMISC1_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30005050100ll;
+ __bdk_csr_fatal("AP_ERXMISC1_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ERXMISC1_EL1 bdk_ap_erxmisc1_el1_t
+#define bustype_BDK_AP_ERXMISC1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ERXMISC1_EL1 "AP_ERXMISC1_EL1"
+#define busnum_BDK_AP_ERXMISC1_EL1 0
+#define arguments_BDK_AP_ERXMISC1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_erxstatus_el1
+ *
+ * AP Selected Error Record Primary Status Register
+ * Accesses the ERR\<n\>STATUS primary status register for the error record selected by
+ * ERRSELR_EL1.SEL.
+ *
+ * Usage constraints as described in AP_ERXFR_EL1.
+ */
+union bdk_ap_erxstatus_el1
+{
+ uint64_t u;
+ struct bdk_ap_erxstatus_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_erxstatus_el1_s cn; */
+};
+typedef union bdk_ap_erxstatus_el1 bdk_ap_erxstatus_el1_t;
+
+#define BDK_AP_ERXSTATUS_EL1 BDK_AP_ERXSTATUS_EL1_FUNC()
+static inline uint64_t BDK_AP_ERXSTATUS_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ERXSTATUS_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30005040200ll;
+ __bdk_csr_fatal("AP_ERXSTATUS_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ERXSTATUS_EL1 bdk_ap_erxstatus_el1_t
+#define bustype_BDK_AP_ERXSTATUS_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ERXSTATUS_EL1 "AP_ERXSTATUS_EL1"
+#define busnum_BDK_AP_ERXSTATUS_EL1 0
+#define arguments_BDK_AP_ERXSTATUS_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_esr_el#
+ *
+ * AP Exception Syndrome Register
+ * Holds syndrome information for an exception taken to EL*.
+ */
+union bdk_ap_esr_elx
+{
+ uint32_t u;
+ struct bdk_ap_esr_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ec : 6; /**< [ 31: 26](R/W) Exception Class. Indicates the reason for the exception that
+ this register holds information about.
+
+ 0x0 = Unknown or Uncategorized Reason - generally used for
+ exceptions as a result of erroneous execution.
+ 0x1 = Exceptions from WFE/WFI from either AArch32 or AArch64 as a
+ result of configurable traps, enables, or disables.
+
+ Conditional WFE and WFI instructions that fail their condition
+ code check do not cause an exception.
+
+ 0x3 = Exceptions from MCR/MRC to CP15 from AArch32 as a result of
+ configurable traps, enables, or disables that do not use
+ code 0x0.
+ 0x4 = Exceptions from MCRR/MRRC to CP15 from AArch32 as a result of
+ configurable traps, enables, or disables that do not use
+ code 0x0.
+ 0x5 = Exceptions from MCR/MRC to CP14 from AArch32 as a result of
+ configurable traps, enables, or disables that do not use
+ code 0x0.
+ 0x6 = Exceptions from LDC/STC to CP14 from AArch32 as a result of
+ configurable traps, enables, or disables. The only architected
+ uses of these instructions to access CP14 are:
+ An STC to write to AP_DBGDTRRX_EL0 or DBGDTRRXint.
+ An LDC to read from AP_DBGDTRTX_EL0 or DBGDTRTXint.
+
+ 0x7 = Exceptions from access to Advanced SIMD or Floating Point as a
+ result of configurable traps, enables, or disables.
+ 0xC = Exceptions from MCRR/MRRC to CP14 from AArch32 as a result of
+ configurable traps, enables, or disables.
+ 0xE = Exceptions that occur because the value of PSTATE[IL] is 1.
+ 0x13 =SMC that is not disabled executed in AArch32.
+ 0x15 = SVC executed in AArch64.
+ 0x16 = HVC that is not disabled executed in AArch64.
+ 0x17 = SMC that is not disabled executed in AArch64.
+ 0x18 = Exceptions as a result of MSR, MRS, or System AArch64
+ instructions as a result of configurable traps, enables, or
+ disables, except those using codes0b0000000b0000010b000111
+ 0x20 = Instruction Abort that caused entry from a lower Exception
+ level (AArch32 or AArch64). Used for instruction access
+ generated MMU faults and synchronous external aborts,
+ including synchronous parity errors. Not used for debug
+ related exceptions.
+ 0x21 = Instruction Abort that caused entry from a current Exception
+ level (AArch64). Used for instruction access generated MMU
+ faults and synchronous external aborts, including synchronous
+ parity errors. Not used for debug related exceptions.
+ 0x22 = PC Alignment Exception.
+ 0x24 = Data Abort that caused entry from a lower Exception level
+ (AArch32 or AArch64). Used for data access generated MMU
+ faults, alignment faults other than those caused by the Stack
+ Pointer misalignment, and synchronous external aborts,
+ including synchronous parity errors. Not used for debug
+ related exceptions.
+ 0x25 = Data Abort that caused entry from a current Exception level
+ (AArch64). Used for data access generated MMU faults,
+ alignment faults other than those caused by the Stack Pointer
+ misalignment, and synchronous external aborts, including
+ synchronous parity errors. Not used for debug related
+ exceptions.
+ 0x26 = Stack Pointer Alignment Exception.
+ 0x2C = Exceptions as a result of Floating-point exception (optional
+ feature) from AArch64.
+ 0x2F = SError Interrupt.
+ 0x3C = BRK instruction executed in AArch64 state. */
+ uint32_t il : 1; /**< [ 25: 25](R/W) Instruction Length for synchronous exceptions.
+ 0 = 16-bit instruction trapped.
+ 1 = 32-bit instruction trapped. This value is also used when the
+ exception is one of the following:
+ * An SError interrupt.
+ * An Instruction Abort exception.
+ * A Misaligned PC exception.
+ * A Misaligned Stack Pointer exception.
+ * A Data Abort exception for which the value of the ISV bit is 0.
+ * An Illegal Execution State exception.
+ * Any debug exception except for Software Breakpoint
+ Instruction exceptions. For Software Breakpoint Instruction
+ exceptions, this bit has its standard meaning:- 0: 16-bit T32
+ BKPT instruction. - 1: 32-bit A32 BKPT instruction or A64 BRK
+ instruction.
+ * An exception reported using EC value 0b000000. */
+ uint32_t iss : 25; /**< [ 24: 0](R/W) Instruction Specific Syndrome. Architecturally, this field can
+ be defined independently for each defined Exception class.
+ However, in practice, some ISS encodings are used for more
+ than one Exception class.
+
+ Typically, an ISS encoding has a number of subfields. When an
+ ISS subfield holds a register number, the value returned in
+ that field is the AArch64 view of the register number, even if
+ the reported exception was taken from AArch32 state. If the
+ register number is AArch32 register R15, then:
+
+ If the instruction that generated the exception was not
+ UNPREDICTABLE, the field takes the value 0b11111.
+
+ If the instruction that generated the exception was
+ UNPREDICTABLE, the field takes an UNKNOWN value that must be
+ either: The AArch64 view of the register number of a register
+ that might have been used at the Exception level from
+ which the exception was taken. The value 0b11111.
+
+ When the EC field is0b000000 RES0. */
+#else /* Word 0 - Little Endian */
+ uint32_t iss : 25; /**< [ 24: 0](R/W) Instruction Specific Syndrome. Architecturally, this field can
+ be defined independently for each defined Exception class.
+ However, in practice, some ISS encodings are used for more
+ than one Exception class.
+
+ Typically, an ISS encoding has a number of subfields. When an
+ ISS subfield holds a register number, the value returned in
+ that field is the AArch64 view of the register number, even if
+ the reported exception was taken from AArch32 state. If the
+ register number is AArch32 register R15, then:
+
+ If the instruction that generated the exception was not
+ UNPREDICTABLE, the field takes the value 0b11111.
+
+ If the instruction that generated the exception was
+ UNPREDICTABLE, the field takes an UNKNOWN value that must be
+ either: The AArch64 view of the register number of a register
+ that might have been used at the Exception level from
+ which the exception was taken. The value 0b11111.
+
+ When the EC field is0b000000 RES0. */
+ uint32_t il : 1; /**< [ 25: 25](R/W) Instruction Length for synchronous exceptions.
+ 0 = 16-bit instruction trapped.
+ 1 = 32-bit instruction trapped. This value is also used when the
+ exception is one of the following:
+ * An SError interrupt.
+ * An Instruction Abort exception.
+ * A Misaligned PC exception.
+ * A Misaligned Stack Pointer exception.
+ * A Data Abort exception for which the value of the ISV bit is 0.
+ * An Illegal Execution State exception.
+ * Any debug exception except for Software Breakpoint
+ Instruction exceptions. For Software Breakpoint Instruction
+ exceptions, this bit has its standard meaning:- 0: 16-bit T32
+ BKPT instruction. - 1: 32-bit A32 BKPT instruction or A64 BRK
+ instruction.
+ * An exception reported using EC value 0b000000. */
+ uint32_t ec : 6; /**< [ 31: 26](R/W) Exception Class. Indicates the reason for the exception that
+ this register holds information about.
+
+ 0x0 = Unknown or Uncategorized Reason - generally used for
+ exceptions as a result of erroneous execution.
+ 0x1 = Exceptions from WFE/WFI from either AArch32 or AArch64 as a
+ result of configurable traps, enables, or disables.
+
+ Conditional WFE and WFI instructions that fail their condition
+ code check do not cause an exception.
+
+ 0x3 = Exceptions from MCR/MRC to CP15 from AArch32 as a result of
+ configurable traps, enables, or disables that do not use
+ code 0x0.
+ 0x4 = Exceptions from MCRR/MRRC to CP15 from AArch32 as a result of
+ configurable traps, enables, or disables that do not use
+ code 0x0.
+ 0x5 = Exceptions from MCR/MRC to CP14 from AArch32 as a result of
+ configurable traps, enables, or disables that do not use
+ code 0x0.
+ 0x6 = Exceptions from LDC/STC to CP14 from AArch32 as a result of
+ configurable traps, enables, or disables. The only architected
+ uses of these instructions to access CP14 are:
+ An STC to write to AP_DBGDTRRX_EL0 or DBGDTRRXint.
+ An LDC to read from AP_DBGDTRTX_EL0 or DBGDTRTXint.
+
+ 0x7 = Exceptions from access to Advanced SIMD or Floating Point as a
+ result of configurable traps, enables, or disables.
+ 0xC = Exceptions from MCRR/MRRC to CP14 from AArch32 as a result of
+ configurable traps, enables, or disables.
+ 0xE = Exceptions that occur because the value of PSTATE[IL] is 1.
+ 0x13 =SMC that is not disabled executed in AArch32.
+ 0x15 = SVC executed in AArch64.
+ 0x16 = HVC that is not disabled executed in AArch64.
+ 0x17 = SMC that is not disabled executed in AArch64.
+ 0x18 = Exceptions as a result of MSR, MRS, or System AArch64
+ instructions as a result of configurable traps, enables, or
+ disables, except those using codes0b0000000b0000010b000111
+ 0x20 = Instruction Abort that caused entry from a lower Exception
+ level (AArch32 or AArch64). Used for instruction access
+ generated MMU faults and synchronous external aborts,
+ including synchronous parity errors. Not used for debug
+ related exceptions.
+ 0x21 = Instruction Abort that caused entry from a current Exception
+ level (AArch64). Used for instruction access generated MMU
+ faults and synchronous external aborts, including synchronous
+ parity errors. Not used for debug related exceptions.
+ 0x22 = PC Alignment Exception.
+ 0x24 = Data Abort that caused entry from a lower Exception level
+ (AArch32 or AArch64). Used for data access generated MMU
+ faults, alignment faults other than those caused by the Stack
+ Pointer misalignment, and synchronous external aborts,
+ including synchronous parity errors. Not used for debug
+ related exceptions.
+ 0x25 = Data Abort that caused entry from a current Exception level
+ (AArch64). Used for data access generated MMU faults,
+ alignment faults other than those caused by the Stack Pointer
+ misalignment, and synchronous external aborts, including
+ synchronous parity errors. Not used for debug related
+ exceptions.
+ 0x26 = Stack Pointer Alignment Exception.
+ 0x2C = Exceptions as a result of Floating-point exception (optional
+ feature) from AArch64.
+ 0x2F = SError Interrupt.
+ 0x3C = BRK instruction executed in AArch64 state. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_esr_elx_s cn; */
+};
+typedef union bdk_ap_esr_elx bdk_ap_esr_elx_t;
+
+static inline uint64_t BDK_AP_ESR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ESR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=3))
+ return 0x30005020000ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_ESR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ESR_ELX(a) bdk_ap_esr_elx_t
+#define bustype_BDK_AP_ESR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ESR_ELX(a) "AP_ESR_ELX"
+#define busnum_BDK_AP_ESR_ELX(a) (a)
+#define arguments_BDK_AP_ESR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_esr_el12
+ *
+ * AP Exception Syndrome Register
+ * Alias of ESR_EL1 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_esr_el12
+{
+ uint32_t u;
+ struct bdk_ap_esr_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_esr_el12_s cn; */
+};
+typedef union bdk_ap_esr_el12 bdk_ap_esr_el12_t;
+
+#define BDK_AP_ESR_EL12 BDK_AP_ESR_EL12_FUNC()
+static inline uint64_t BDK_AP_ESR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ESR_EL12_FUNC(void)
+{
+ return 0x30505020000ll;
+}
+
+#define typedef_BDK_AP_ESR_EL12 bdk_ap_esr_el12_t
+#define bustype_BDK_AP_ESR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ESR_EL12 "AP_ESR_EL12"
+#define busnum_BDK_AP_ESR_EL12 0
+#define arguments_BDK_AP_ESR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_far_el#
+ *
+ * AP Fault Address Register
+ * Holds the faulting Virtual Address for all synchronous
+ * instruction or data aborts, or exceptions from a misaligned
+ * PC, taken to EL*.
+ */
+union bdk_ap_far_elx
+{
+ uint64_t u;
+ struct bdk_ap_far_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Faulting Virtual Address for exceptions taken to EL*.
+ Exceptions that set the FAR_EL* are all synchronous
+ instruction aborts or data aborts, or an exception from a
+ misaligned PC.
+ If a memory fault that sets FAR_EL* is generated from one of
+ the data cache instructions, this field holds the address
+ specified in the register argument of the instruction. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Faulting Virtual Address for exceptions taken to EL*.
+ Exceptions that set the FAR_EL* are all synchronous
+ instruction aborts or data aborts, or an exception from a
+ misaligned PC.
+ If a memory fault that sets FAR_EL* is generated from one of
+ the data cache instructions, this field holds the address
+ specified in the register argument of the instruction. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_far_elx_s cn; */
+};
+typedef union bdk_ap_far_elx bdk_ap_far_elx_t;
+
+static inline uint64_t BDK_AP_FAR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_FAR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=3))
+ return 0x30006000000ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_FAR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_FAR_ELX(a) bdk_ap_far_elx_t
+#define bustype_BDK_AP_FAR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_FAR_ELX(a) "AP_FAR_ELX"
+#define busnum_BDK_AP_FAR_ELX(a) (a)
+#define arguments_BDK_AP_FAR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_far_el12
+ *
+ * AP Fault Address Register
+ * Alias of ESR_EL1 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_far_el12
+{
+ uint64_t u;
+ struct bdk_ap_far_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_far_el12_s cn; */
+};
+typedef union bdk_ap_far_el12 bdk_ap_far_el12_t;
+
+#define BDK_AP_FAR_EL12 BDK_AP_FAR_EL12_FUNC()
+static inline uint64_t BDK_AP_FAR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_FAR_EL12_FUNC(void)
+{
+ return 0x30506000000ll;
+}
+
+#define typedef_BDK_AP_FAR_EL12 bdk_ap_far_el12_t
+#define bustype_BDK_AP_FAR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_FAR_EL12 "AP_FAR_EL12"
+#define busnum_BDK_AP_FAR_EL12 0
+#define arguments_BDK_AP_FAR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_fpcr
+ *
+ * AP Floating-point Control Register
+ * Controls floating-point extension behavior.
+ */
+union bdk_ap_fpcr
+{
+ uint32_t u;
+ struct bdk_ap_fpcr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t ahp : 1; /**< [ 26: 26](R/W) Alternative half-precision control bit:
+ 0 = IEEE half-precision format selected.
+ 1 = Alternative half-precision format selected. */
+ uint32_t dn : 1; /**< [ 25: 25](R/W) Default NaN mode control bit:
+ The value of this bit controls both scalar and Advanced SIMD
+ floating-point arithmetic.
+ 0 = NaN operands propagate through to the output of a floating-
+ point operation.
+ 1 = Any operation involving one or more NaNs returns the Default
+ NaN. */
+ uint32_t fz : 1; /**< [ 24: 24](R/W) Flush-to-zero mode control bit:
+ The value of this bit controls both scalar and Advanced SIMD
+ floating-point arithmetic.
+ 0 = Flush-to-zero mode disabled. Behavior of the floating-point
+ system is fully compliant with the IEEE 754 standard.
+ 1 = Flush-to-zero mode enabled. */
+ uint32_t rmode : 2; /**< [ 23: 22](R/W) Rounding Mode control field. The encoding of this field is:
+ The specified rounding mode is used by both scalar and
+ Advanced SIMD floating-point instructions.
+ 0x0 = Round to Nearest (RN) mode.
+ 0x1 = Round towards Plus Infinity (RP) mode.
+ 0x2 = Round towards Minus Infinity (RM) mode.
+ 0x3 = Round towards Zero (RZ) mode. */
+ uint32_t reserved_0_21 : 22;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_21 : 22;
+ uint32_t rmode : 2; /**< [ 23: 22](R/W) Rounding Mode control field. The encoding of this field is:
+ The specified rounding mode is used by both scalar and
+ Advanced SIMD floating-point instructions.
+ 0x0 = Round to Nearest (RN) mode.
+ 0x1 = Round towards Plus Infinity (RP) mode.
+ 0x2 = Round towards Minus Infinity (RM) mode.
+ 0x3 = Round towards Zero (RZ) mode. */
+ uint32_t fz : 1; /**< [ 24: 24](R/W) Flush-to-zero mode control bit:
+ The value of this bit controls both scalar and Advanced SIMD
+ floating-point arithmetic.
+ 0 = Flush-to-zero mode disabled. Behavior of the floating-point
+ system is fully compliant with the IEEE 754 standard.
+ 1 = Flush-to-zero mode enabled. */
+ uint32_t dn : 1; /**< [ 25: 25](R/W) Default NaN mode control bit:
+ The value of this bit controls both scalar and Advanced SIMD
+ floating-point arithmetic.
+ 0 = NaN operands propagate through to the output of a floating-
+ point operation.
+ 1 = Any operation involving one or more NaNs returns the Default
+ NaN. */
+ uint32_t ahp : 1; /**< [ 26: 26](R/W) Alternative half-precision control bit:
+ 0 = IEEE half-precision format selected.
+ 1 = Alternative half-precision format selected. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_fpcr_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t ahp : 1; /**< [ 26: 26](R/W) Alternative half-precision control bit:
+ 0 = IEEE half-precision format selected.
+ 1 = Alternative half-precision format selected. */
+ uint32_t dn : 1; /**< [ 25: 25](R/W) Default NaN mode control bit:
+ The value of this bit controls both scalar and Advanced SIMD
+ floating-point arithmetic.
+ 0 = NaN operands propagate through to the output of a floating-
+ point operation.
+ 1 = Any operation involving one or more NaNs returns the Default
+ NaN. */
+ uint32_t fz : 1; /**< [ 24: 24](R/W) Flush-to-zero mode control bit:
+ The value of this bit controls both scalar and Advanced SIMD
+ floating-point arithmetic.
+ 0 = Flush-to-zero mode disabled. Behavior of the floating-point
+ system is fully compliant with the IEEE 754 standard.
+ 1 = Flush-to-zero mode enabled. */
+ uint32_t rmode : 2; /**< [ 23: 22](R/W) Rounding Mode control field. The encoding of this field is:
+ The specified rounding mode is used by both scalar and
+ Advanced SIMD floating-point instructions.
+ 0x0 = Round to Nearest (RN) mode.
+ 0x1 = Round towards Plus Infinity (RP) mode.
+ 0x2 = Round towards Minus Infinity (RM) mode.
+ 0x3 = Round towards Zero (RZ) mode. */
+ uint32_t reserved_20_21 : 2;
+ uint32_t reserved_19 : 1;
+ uint32_t reserved_16_18 : 3;
+ uint32_t reserved_15 : 1;
+ uint32_t reserved_13_14 : 2;
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_11 : 1;
+ uint32_t reserved_10 : 1;
+ uint32_t reserved_9 : 1;
+ uint32_t reserved_8 : 1;
+ uint32_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_7 : 8;
+ uint32_t reserved_8 : 1;
+ uint32_t reserved_9 : 1;
+ uint32_t reserved_10 : 1;
+ uint32_t reserved_11 : 1;
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_13_14 : 2;
+ uint32_t reserved_15 : 1;
+ uint32_t reserved_16_18 : 3;
+ uint32_t reserved_19 : 1;
+ uint32_t reserved_20_21 : 2;
+ uint32_t rmode : 2; /**< [ 23: 22](R/W) Rounding Mode control field. The encoding of this field is:
+ The specified rounding mode is used by both scalar and
+ Advanced SIMD floating-point instructions.
+ 0x0 = Round to Nearest (RN) mode.
+ 0x1 = Round towards Plus Infinity (RP) mode.
+ 0x2 = Round towards Minus Infinity (RM) mode.
+ 0x3 = Round towards Zero (RZ) mode. */
+ uint32_t fz : 1; /**< [ 24: 24](R/W) Flush-to-zero mode control bit:
+ The value of this bit controls both scalar and Advanced SIMD
+ floating-point arithmetic.
+ 0 = Flush-to-zero mode disabled. Behavior of the floating-point
+ system is fully compliant with the IEEE 754 standard.
+ 1 = Flush-to-zero mode enabled. */
+ uint32_t dn : 1; /**< [ 25: 25](R/W) Default NaN mode control bit:
+ The value of this bit controls both scalar and Advanced SIMD
+ floating-point arithmetic.
+ 0 = NaN operands propagate through to the output of a floating-
+ point operation.
+ 1 = Any operation involving one or more NaNs returns the Default
+ NaN. */
+ uint32_t ahp : 1; /**< [ 26: 26](R/W) Alternative half-precision control bit:
+ 0 = IEEE half-precision format selected.
+ 1 = Alternative half-precision format selected. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_fpcr bdk_ap_fpcr_t;
+
+#define BDK_AP_FPCR BDK_AP_FPCR_FUNC()
+static inline uint64_t BDK_AP_FPCR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_FPCR_FUNC(void)
+{
+ return 0x30304040000ll;
+}
+
+#define typedef_BDK_AP_FPCR bdk_ap_fpcr_t
+#define bustype_BDK_AP_FPCR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_FPCR "AP_FPCR"
+#define busnum_BDK_AP_FPCR 0
+#define arguments_BDK_AP_FPCR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_fpexc32_el2
+ *
+ * AP Floating-point Exception Control Register
+ * Allows access to the AArch32 register FPEXC from AArch64 state
+ * only. Its value has no effect on execution in AArch64 state.
+ */
+union bdk_ap_fpexc32_el2
+{
+ uint32_t u;
+ struct bdk_ap_fpexc32_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_fpexc32_el2_s cn; */
+};
+typedef union bdk_ap_fpexc32_el2 bdk_ap_fpexc32_el2_t;
+
+#define BDK_AP_FPEXC32_EL2 BDK_AP_FPEXC32_EL2_FUNC()
+static inline uint64_t BDK_AP_FPEXC32_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_FPEXC32_EL2_FUNC(void)
+{
+ return 0x30405030000ll;
+}
+
+#define typedef_BDK_AP_FPEXC32_EL2 bdk_ap_fpexc32_el2_t
+#define bustype_BDK_AP_FPEXC32_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_FPEXC32_EL2 "AP_FPEXC32_EL2"
+#define busnum_BDK_AP_FPEXC32_EL2 0
+#define arguments_BDK_AP_FPEXC32_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_fpsr
+ *
+ * AP Floating-point Status Register
+ * Provides floating-point system status information.
+ */
+union bdk_ap_fpsr
+{
+ uint32_t u;
+ struct bdk_ap_fpsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t qc : 1; /**< [ 27: 27](R/W) Cumulative saturation bit, Advanced SIMD only. This bit is set
+ to 1 to indicate that an Advanced SIMD integer operation has
+ saturated since 0 was last written to this bit. */
+ uint32_t reserved_8_26 : 19;
+ uint32_t idc : 1; /**< [ 7: 7](R/W) Input Denormal cumulative exception bit. This bit is set to 1
+ to indicate that the Input Denormal exception has occurred
+ since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IDE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IDE] is
+ 0, or if trapping software sets it. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t ixc : 1; /**< [ 4: 4](R/W) Inexact cumulative exception bit. This bit is set to 1 to
+ indicate that the Inexact exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IXE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IXE] is
+ 0, or if trapping software sets it. */
+ uint32_t ufc : 1; /**< [ 3: 3](R/W) Underflow cumulative exception bit. This bit is set to 1 to
+ indicate that the Underflow exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[UFE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[UFE] is
+ 0, or if trapping software sets it. */
+ uint32_t ofc : 1; /**< [ 2: 2](R/W) Overflow cumulative exception bit. This bit is set to 1 to
+ indicate that the Overflow exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[OFE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[OFE] is
+ 0, or if trapping software sets it. */
+ uint32_t dzc : 1; /**< [ 1: 1](R/W) Division by Zero cumulative exception bit. This bit is set to
+ 1 to indicate that the Division by Zero exception has occurred
+ since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[DZE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[DZE] is
+ 0, or if trapping software sets it. */
+ uint32_t ioc : 1; /**< [ 0: 0](R/W) Invalid Operation cumulative exception bit. This bit is set to
+ 1 to indicate that the Invalid Operation exception has
+ occurred since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IOE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IOE] is
+ 0, or if trapping software sets it. */
+#else /* Word 0 - Little Endian */
+ uint32_t ioc : 1; /**< [ 0: 0](R/W) Invalid Operation cumulative exception bit. This bit is set to
+ 1 to indicate that the Invalid Operation exception has
+ occurred since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IOE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IOE] is
+ 0, or if trapping software sets it. */
+ uint32_t dzc : 1; /**< [ 1: 1](R/W) Division by Zero cumulative exception bit. This bit is set to
+ 1 to indicate that the Division by Zero exception has occurred
+ since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[DZE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[DZE] is
+ 0, or if trapping software sets it. */
+ uint32_t ofc : 1; /**< [ 2: 2](R/W) Overflow cumulative exception bit. This bit is set to 1 to
+ indicate that the Overflow exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[OFE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[OFE] is
+ 0, or if trapping software sets it. */
+ uint32_t ufc : 1; /**< [ 3: 3](R/W) Underflow cumulative exception bit. This bit is set to 1 to
+ indicate that the Underflow exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[UFE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[UFE] is
+ 0, or if trapping software sets it. */
+ uint32_t ixc : 1; /**< [ 4: 4](R/W) Inexact cumulative exception bit. This bit is set to 1 to
+ indicate that the Inexact exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IXE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IXE] is
+ 0, or if trapping software sets it. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t idc : 1; /**< [ 7: 7](R/W) Input Denormal cumulative exception bit. This bit is set to 1
+ to indicate that the Input Denormal exception has occurred
+ since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IDE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IDE] is
+ 0, or if trapping software sets it. */
+ uint32_t reserved_8_26 : 19;
+ uint32_t qc : 1; /**< [ 27: 27](R/W) Cumulative saturation bit, Advanced SIMD only. This bit is set
+ to 1 to indicate that an Advanced SIMD integer operation has
+ saturated since 0 was last written to this bit. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_fpsr_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t reserved_30 : 1;
+ uint32_t reserved_29 : 1;
+ uint32_t reserved_28 : 1;
+ uint32_t qc : 1; /**< [ 27: 27](R/W) Cumulative saturation bit, Advanced SIMD only. This bit is set
+ to 1 to indicate that an Advanced SIMD integer operation has
+ saturated since 0 was last written to this bit. */
+ uint32_t reserved_8_26 : 19;
+ uint32_t idc : 1; /**< [ 7: 7](R/W) Input Denormal cumulative exception bit. This bit is set to 1
+ to indicate that the Input Denormal exception has occurred
+ since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IDE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IDE] is
+ 0, or if trapping software sets it. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t ixc : 1; /**< [ 4: 4](R/W) Inexact cumulative exception bit. This bit is set to 1 to
+ indicate that the Inexact exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IXE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IXE] is
+ 0, or if trapping software sets it. */
+ uint32_t ufc : 1; /**< [ 3: 3](R/W) Underflow cumulative exception bit. This bit is set to 1 to
+ indicate that the Underflow exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[UFE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[UFE] is
+ 0, or if trapping software sets it. */
+ uint32_t ofc : 1; /**< [ 2: 2](R/W) Overflow cumulative exception bit. This bit is set to 1 to
+ indicate that the Overflow exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[OFE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[OFE] is
+ 0, or if trapping software sets it. */
+ uint32_t dzc : 1; /**< [ 1: 1](R/W) Division by Zero cumulative exception bit. This bit is set to
+ 1 to indicate that the Division by Zero exception has occurred
+ since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[DZE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[DZE] is
+ 0, or if trapping software sets it. */
+ uint32_t ioc : 1; /**< [ 0: 0](R/W) Invalid Operation cumulative exception bit. This bit is set to
+ 1 to indicate that the Invalid Operation exception has
+ occurred since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IOE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IOE] is
+ 0, or if trapping software sets it. */
+#else /* Word 0 - Little Endian */
+ uint32_t ioc : 1; /**< [ 0: 0](R/W) Invalid Operation cumulative exception bit. This bit is set to
+ 1 to indicate that the Invalid Operation exception has
+ occurred since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IOE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IOE] is
+ 0, or if trapping software sets it. */
+ uint32_t dzc : 1; /**< [ 1: 1](R/W) Division by Zero cumulative exception bit. This bit is set to
+ 1 to indicate that the Division by Zero exception has occurred
+ since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[DZE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[DZE] is
+ 0, or if trapping software sets it. */
+ uint32_t ofc : 1; /**< [ 2: 2](R/W) Overflow cumulative exception bit. This bit is set to 1 to
+ indicate that the Overflow exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[OFE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[OFE] is
+ 0, or if trapping software sets it. */
+ uint32_t ufc : 1; /**< [ 3: 3](R/W) Underflow cumulative exception bit. This bit is set to 1 to
+ indicate that the Underflow exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[UFE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[UFE] is
+ 0, or if trapping software sets it. */
+ uint32_t ixc : 1; /**< [ 4: 4](R/W) Inexact cumulative exception bit. This bit is set to 1 to
+ indicate that the Inexact exception has occurred since 0 was
+ last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IXE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IXE] is
+ 0, or if trapping software sets it. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t idc : 1; /**< [ 7: 7](R/W) Input Denormal cumulative exception bit. This bit is set to 1
+ to indicate that the Input Denormal exception has occurred
+ since 0 was last written to this bit.
+
+ How scalar and Advanced SIMD floating-point instructions
+ update this bit depends on the value of the AP_FPCR[IDE] bit. This
+ bit is only set to 1 to indicate an exception if AP_FPCR[IDE] is
+ 0, or if trapping software sets it. */
+ uint32_t reserved_8_26 : 19;
+ uint32_t qc : 1; /**< [ 27: 27](R/W) Cumulative saturation bit, Advanced SIMD only. This bit is set
+ to 1 to indicate that an Advanced SIMD integer operation has
+ saturated since 0 was last written to this bit. */
+ uint32_t reserved_28 : 1;
+ uint32_t reserved_29 : 1;
+ uint32_t reserved_30 : 1;
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_fpsr bdk_ap_fpsr_t;
+
+#define BDK_AP_FPSR BDK_AP_FPSR_FUNC()
+static inline uint64_t BDK_AP_FPSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_FPSR_FUNC(void)
+{
+ return 0x30304040100ll;
+}
+
+#define typedef_BDK_AP_FPSR bdk_ap_fpsr_t
+#define bustype_BDK_AP_FPSR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_FPSR "AP_FPSR"
+#define busnum_BDK_AP_FPSR 0
+#define arguments_BDK_AP_FPSR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_hacr_el2
+ *
+ * AP Hypervisor Auxiliary Control Register
+ * Controls trapping to EL2 of implementation defined aspects of
+ * nonsecure EL1 or EL0 operation.
+ */
+union bdk_ap_hacr_el2
+{
+ uint32_t u;
+ struct bdk_ap_hacr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_hacr_el2_s cn; */
+};
+typedef union bdk_ap_hacr_el2 bdk_ap_hacr_el2_t;
+
+#define BDK_AP_HACR_EL2 BDK_AP_HACR_EL2_FUNC()
+static inline uint64_t BDK_AP_HACR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_HACR_EL2_FUNC(void)
+{
+ return 0x30401010700ll;
+}
+
+#define typedef_BDK_AP_HACR_EL2 bdk_ap_hacr_el2_t
+#define bustype_BDK_AP_HACR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_HACR_EL2 "AP_HACR_EL2"
+#define busnum_BDK_AP_HACR_EL2 0
+#define arguments_BDK_AP_HACR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_hcr_el2
+ *
+ * AP Hypervisor Configuration Register
+ * Provides configuration controls for virtualization, including
+ * defining whether various nonsecure operations are trapped to
+ * EL2.
+ */
+union bdk_ap_hcr_el2
+{
+ uint64_t u;
+ struct bdk_ap_hcr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t tea : 1; /**< [ 37: 37](R/W) RAS: Route synchronous external aborts to EL2.
+ 0 = Do not route synchronous external aborts from Non-secure EL0 and EL1 to EL2.
+ 1 = Route synchronous external aborts from Non-secure EL0 and EL1 to EL2, if not routed
+ to EL3. */
+ uint64_t terr : 1; /**< [ 36: 36](R/W) RAS: Trap Error record accesses.
+ 0 = Do not trap accesses to error record registers from Non-secure EL1 to EL2.
+ 1 = Accesses to the ER* registers from Non-secure EL1 generate a Trap exception to EL2. */
+ uint64_t tlor : 1; /**< [ 35: 35](R/W) v8.1: Trap access to the LOR Registers from nonsecure EL1 to EL2.
+ 0 = Nonsecure EL1 accesses to the LOR Registers are not trapped to EL2.
+ 1 = Nonsecure EL1 accesses to the LOR Registers are trapped to EL2. */
+ uint64_t e2h : 1; /**< [ 34: 34](R/W) V8.1: Enable EL2 host. */
+ uint64_t id : 1; /**< [ 33: 33](R/W) Stage 2 Instruction cache disable. When AP_HCR_EL2[VM]==1, this
+ forces all stage 2 translations for instruction accesses to
+ Normal memory to be Non-cacheable for the EL1&0 translation
+ regime.
+ This bit has no effect on the EL2 or EL3 translation regimes.
+ 0 = No effect on the stage 2 of the EL1&0 translation regime for
+ instruction accesses.
+ 1 = Forces all stage 2 translations for instruction accesses to
+ Normal memory to be Non-cacheable for the EL1&0 translation
+ regime. */
+ uint64_t cd : 1; /**< [ 32: 32](R/W) Stage 2 Data cache disable. When AP_HCR_EL2[VM]==1, this forces
+ all stage 2 translations for data accesses and translation
+ table walks to Normal memory to be Non-cacheable for the EL1&0
+ translation regime.
+ This bit has no effect on the EL2 or EL3 translation regimes.
+ 0 = No effect on the stage 2 of the EL1&0 translation regime for
+ data accesses and translation table walks.
+ 1 = Forces all stage 2 translations for data accesses and
+ translation table walks to Normal memory to be Non-cacheable
+ for the EL1&0 translation regime. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) rw - Register Width control for lower exception levels:
+ When AP_SCR_EL3[NS]==0, this bit behaves as if it has the same
+ value as the AP_SCR_EL3[RW] bit except for the value read back.
+ The RW bit is permitted to be cached in a TLB.
+ 0 = Lower levels are all AArch32.
+ 1 = EL1 is AArch64. EL0 is determined by the Execution state
+ described in the current process state when executing at EL0. */
+ uint64_t trvm : 1; /**< [ 30: 30](R/W) Trap Read of Virtual Memory controls. When this bit is set to
+ 1, this causes Reads to the EL1 virtual memory control
+ registers from EL1 to be trapped to EL2. This covers the
+ following registers:
+
+ AArch32: SCTLR, TTBR0, TTBR1, TTBCR, DACR, DFSR, IFSR, DFAR,
+ IFAR, ADFSR, AIFSR, PRRR/ MAIR0, NMRR/ MAIR1, AMAIR0, AMAIR1,
+ CONTEXTIDR.
+
+ AArch64: AP_SCTLR_EL1, AP_TTBR0_EL1, AP_TTBR1_EL1, AP_TCR_EL1, ESR_EL1,
+ FAR_EL1, AFSR0_EL1, AFSR1_EL1, MAIR_EL1, AMAIR_EL1,
+ AP_CONTEXTIDR_EL1. */
+ uint64_t reserved_29 : 1;
+ uint64_t tdz : 1; /**< [ 28: 28](R/W) Trap DC ZVA instruction:
+ This bit also has an effect on the value read from the
+ AP_DCZID_EL0 register. If this bit is 1, then reading
+ AP_DCZID_EL0[DZP] from nonsecure EL1 or EL0 will return 1 to
+ indicate that DC ZVA is prohibited.
+ 0 = The instruction is not trapped.
+ 1 = The instruction is trapped to EL2 when executed in nonsecure
+ EL1 or EL0. */
+ uint64_t tge : 1; /**< [ 27: 27](R/W) Trap General Exceptions. If this bit is set to 1, and
+ AP_SCR_EL3[NS] is set to 1, then:
+
+ All exceptions that would be routed to EL1 are routed to EL2.
+
+ The AP_SCTLR_EL1[M] bit is treated as being 0 regardless of its
+ actual state (for EL1 using AArch32 or AArch64) other than for
+ the purpose of reading the bit.
+
+ The AP_HCR_EL2[FMO], IMO and AMO bits are treated as being 1
+ regardless of their actual state other than for the purpose of
+ reading the bits.
+
+ All virtual interrupts are disabled.
+
+ Any implementation defined mechanisms for signalling virtual
+ interrupts are disabled.
+
+ An exception return to EL1 is treated as an illegal exception
+ return.
+
+ Additionally, if AP_HCR_EL2[TGE] == 1, the
+ AP_MDCR_EL2.{TDRA,TDOSA,TDA} bits are ignored and the processor
+ behaves as if they are set to 1, other than for the value read
+ back from AP_MDCR_EL2. */
+ uint64_t tvm : 1; /**< [ 26: 26](R/W) Trap Virtual Memory controls. When this bit is set to 1, this
+ causes Writes to the EL1 virtual memory control registers from
+ EL1 to be trapped to EL2. This covers the following registers:
+
+ AArch32: SCTLR, TTBR0, TTBR1, TTBCR, DACR, DFSR, IFSR, DFAR,
+ IFAR, ADFSR, AIFSR, PRRR/ MAIR0, NMRR/ MAIR1, AMAIR0, AMAIR1,
+ CONTEXTIDR.
+
+ AArch64: AP_SCTLR_EL1, AP_TTBR0_EL1, AP_TTBR1_EL1, AP_TCR_EL1, ESR_EL1,
+ FAR_EL1, AFSR0_EL1, AFSR1_EL1, MAIR_EL1, AMAIR_EL1,
+ AP_CONTEXTIDR_EL1 */
+ uint64_t ttlb : 1; /**< [ 25: 25](R/W) Trap TLB maintenance instructions. When this bit is set to 1,
+ this causes TLB maintenance instructions executed from EL1
+ which are not UNdefined to be trapped to EL2. This covers the
+ following instructions:
+
+ AArch32: TLBIALLIS, TLBIMVAIS, TLBIASIDIS, TLBIMVAAIS,
+ TLBIALL, TLBIMVA, TLBIASID, DTLBIALL, DTLBIMVA, DTLBIASID,
+ ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA, TLBIMVALIS,
+ TLBIMVAALIS, TLBIMVAL, TLBIMVAAL
+
+ AArch64: TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
+ TLBI VALE1, TLBI VAALE1, TLBI VMALLE1IS, TLBI VAE1IS, TLBI
+ ASIDE1IS, TLBI VAAE1IS, TLBI VALE1IS, TLBI VAALE1IS */
+ uint64_t tpu : 1; /**< [ 24: 24](R/W) Trap Cache maintenance instructions to Point of Unification.
+ When this bit is set to 1, this causes Cache maintenance
+ instructions to the point of unification executed from EL1 or
+ EL0 which are not UNdefined to be trapped to EL2. This covers
+ the following instructions:
+
+ AArch32: ICIMVAU, ICIALLU, ICIALLUIS, DCCMVAU.
+
+ AArch64: IC IVAU, IC IALLU, IC IALLUIS, DC CVAU. */
+ uint64_t tpc : 1; /**< [ 23: 23](R/W) Trap Data/Unified Cache maintenance operations to Point of
+ Coherency. When this bit is set to 1, this causes Data or
+ Unified Cache maintenance instructions by address to the point
+ of coherency executed from EL1 or EL0 which are not UNdefined
+ to be trapped to EL2. This covers the following instructions:
+
+ AArch32: DCIMVAC, DCCIMVAC, DCCMVAC.
+
+ AArch64: DC IVAC, DC CIVAC, DC CVAC. */
+ uint64_t tsw : 1; /**< [ 22: 22](R/W) Trap Data/Unified Cache maintenance operations by Set/Way.
+ When this bit is set to 1, this causes Data or Unified Cache
+ maintenance instructions by set/way executed from EL1 which
+ are not UNdefined to be trapped to EL2. This covers the
+ following instructions:
+
+ AArch32: DCISW, DCCSW, DCCISW.
+
+ AArch64: DC ISW, DC CSW, DC CISW. */
+ uint64_t tacr : 1; /**< [ 21: 21](R/W) Trap Auxiliary Control Register. When this bit is set to 1,
+ this causes accesses to the following registers executed from
+ EL1 to be trapped to EL2:
+
+ AArch32: ACTLR.
+
+ AArch64: ACTLR_EL1. */
+ uint64_t tidcp : 1; /**< [ 20: 20](R/W) Trap Implementation Dependent functionality. When this bit is
+ set to 1, this causes accesses to the following instruction
+ set space executed from EL1 to be trapped to EL2.
+
+ AArch32: MCR and MRC instructions as follows:
+
+ All CP15, CRn==9, Opcode1 = {0-7}, CRm == {c0-c2, c5-c8},
+ opcode2 == {0-7}.
+
+ All CP15, CRn==10, Opcode1 =={0-7}, CRm == {c0, c1, c4, c8},
+ opcode2 == {0-7}.
+
+ All CP15, CRn==11, Opcode1=={0-7}, CRm == {c0-c8, c15},
+ opcode2 == {0-7}.
+
+ AArch64: All encoding space reserved for implementation
+ defined system operations ( S1_\<op1\>_\<Cn\>_\<Cm\>_\<op2\>) and
+ system registers ( S3_\<op1\>_\<Cn\>_\<Cm\>_\<op2\>).
+
+ It is implementation defined whether any of this functionality
+ accessed from EL0 is trapped to EL2 when the AP_HCR_EL2[TIDCP] bit
+ is set. If it is not trapped to EL2, it results in an
+ Undefined exception taken to EL1. */
+ uint64_t tsc : 1; /**< [ 19: 19](R/W) Trap SMC. When this bit is set to 1, this causes the following
+ instructions executed from EL1 to be trapped to EL2:
+
+ AArch32: SMC.
+
+ AArch64: SMC.
+
+ If EL3 is not implemented, this bit is RES0. */
+ uint64_t tid3 : 1; /**< [ 18: 18](R/W) Trap ID Group 3. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 to be trapped to
+ EL2:
+
+ AArch32: ID_PFR0, ID_PFR1, ID_DFR0, ID_AFR0, ID_MMFR0,
+ ID_MMFR1, ID_MMFR2, ID_MMFR3, ID_ISAR0, ID_ISAR1, ID_ISAR2,
+ ID_ISAR3, ID_ISAR4, ID_ISAR5, MVFR0, MVFR1, MVFR2. Also MRC to
+ any of the following encodings:
+
+ CP15, CRn == 0, Opc1 == 0, CRm == {3-7}, Opc2 == {0,1}.
+
+ CP15, CRn == 0, Opc1 == 0, CRm == 3, Opc2 == 2.
+
+ CP15, CRn == 0, Opc1 == 0, CRm == 5, Opc2 == {4,5}.
+
+ AArch64: AP_ID_PFR0_EL1, AP_ID_PFR1_EL1, AP_ID_DFR0_EL1, AP_ID_AFR0_EL1,
+ ID_MMFR0_EL1, ID_MMFR1_EL1, ID_MMFR2_EL1, ID_MMFR3_EL1,
+ ID_ISAR0_EL1, ID_ISAR1_EL1, ID_ISAR2_EL1, ID_ISAR3_EL1,
+ ID_ISAR4_EL1, ID_ISAR5_EL1, MVFR0_EL1, MVFR1_EL1, MVFR2_EL1,
+ AP_ID_AA64PFR0_EL1, AP_ID_AA64PFR1_EL1, AP_ID_AA64DFR0_EL1,
+ AP_ID_AA64DFR1_EL1, AP_ID_AA64ISAR0_EL1, AP_ID_AA64ISAR1_EL1,
+ AP_ID_AA64MMFR0_EL1, AP_ID_AA64MMFR1_EL1, AP_ID_AA64AFR0_EL1,
+ AP_ID_AA64AFR1_EL1. */
+ uint64_t tid2 : 1; /**< [ 17: 17](R/W) Trap ID Group 2. When this bit is set to 1, this causes reads
+ (or writes to CSSELR/ AP_CSSELR_EL1) to the following registers
+ executed from EL1 or EL0 if not UNdefined to be trapped to
+ EL2:
+
+ AArch32: CTR, CCSIDR, CLIDR, CSSELR.
+
+ AArch64: AP_CTR_EL0, AP_CCSIDR_EL1, AP_CLIDR_EL1, AP_CSSELR_EL1. */
+ uint64_t tid1 : 1; /**< [ 16: 16](R/W) Trap ID Group 1. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 to be trapped to
+ EL2:
+
+ AArch32: TCMTR, TLBTR, AIDR, REVIDR.
+
+ AArch64: AP_AIDR_EL1, AP_REVIDR_EL1. */
+ uint64_t tid0 : 1; /**< [ 15: 15](R/W) Trap ID Group 0. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 or EL0 if not
+ UNdefined to be trapped to EL2:
+
+ AArch32: FPSID, JIDR.
+
+ AArch64: None. */
+ uint64_t twe : 1; /**< [ 14: 14](R/W) Trap WFE. When this bit is set to 1, this causes the following
+ instructions executed from EL1 or EL0 to be trapped to EL2 if
+ the instruction would otherwise cause suspension of execution
+ (i.e. if the event register is not set):
+
+ AArch32: WFE.
+
+ AArch64: WFE.
+
+ Conditional WFE instructions that fail their condition are not
+ trapped if this bit is set to 1. */
+ uint64_t twi : 1; /**< [ 13: 13](R/W) Trap WFI. When this bit is set to 1, this causes the following
+ instructions executed from EL1 or EL0 to be trapped to EL2 if
+ the instruction would otherwise cause suspension of execution
+ (i.e. if there is not a pending WFI wakeup event):
+
+ AArch32: WFI.
+
+ AArch64: WFI.
+
+ Conditional WFI instructions that fail their condition are not
+ trapped if this bit is set to 1. */
+ uint64_t dc : 1; /**< [ 12: 12](R/W) Default Cacheable. When this bit is set to 1, this causes:
+
+ * The AP_SCTLR_EL1[M] bit to behave as 0 when in the nonsecure
+ state for all purposes other than reading the value of the
+ bit.
+
+ * The AP_HCR_EL2[VM] bit to behave as 1 when in the nonsecure
+ state for all purposes other than reading the value of the
+ bit.
+
+ The memory type produced by the first stage of translation
+ used by EL1 and EL0 is Normal Non-Shareable, Inner WriteBack
+ Read-WriteAllocate, Outer WriteBack Read-WriteAllocate.
+
+ When this bit is 0 and the stage 1 MMU is disabled, the
+ default memory attribute for Data accesses is Device-nGnRnE.
+
+ This bit is permitted to be cached in a TLB. */
+ uint64_t bsu : 2; /**< [ 11: 10](R/W) Barrier Shareability upgrade. The value in this field
+ determines the minimum shareability domain that is applied to
+ any barrier executed from EL1 or EL0.
+
+ This value is combined with the specified level of the barrier
+ held in its instruction, using the same principles as
+ combining the shareability attributes from two stages of
+ address translation.
+
+ 0x0 = No effect.
+ 0x1 = Inner Shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Full system. */
+ uint64_t fb : 1; /**< [ 9: 9](R/W) Force broadcast. When this bit is set to 1, this causes the
+ following instructions to be broadcast within the Inner
+ Shareable domain when executed from nonsecure EL1:
+
+ AArch32: BPIALL, TLBIALL, TLBIMVA, TLBIASID, DTLBIALL,
+ DTLBIMVA, DTLBIASID, ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA,
+ ICIALLU, TLBIMVAL, TLBIMVAAL.
+
+ AArch64: TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
+ TLBI VALE1, TLBI VAALE1, IC IALLU. */
+ uint64_t vse : 1; /**< [ 8: 8](R/W) Virtual System Error/Asynchronous Abort.
+ The virtual System Error/Asynchronous Abort is only enabled
+ when the AP_HCR_EL2[AMO] bit is set.
+ 0 = Virtual System Error/Asynchronous Abort is not pending by this
+ mechanism.
+ 1 = Virtual System Error/Asynchronous Abort is pending by this
+ mechanism. */
+ uint64_t vi : 1; /**< [ 7: 7](R/W) Virtual IRQ Interrupt.
+ The virtual IRQ is only enabled when the AP_HCR_EL2[IMO] bit is
+ set.
+ 0 = Virtual IRQ is not pending by this mechanism.
+ 1 = Virtual IRQ is pending by this mechanism. */
+ uint64_t vf : 1; /**< [ 6: 6](R/W) Virtual FIQ Interrupt.
+ The virtual FIQ is only enabled when the AP_HCR_EL2[FMO] bit is
+ set.
+ 0 = Virtual FIQ is not pending by this mechanism.
+ 1 = Virtual FIQ is pending by this mechanism. */
+ uint64_t amo : 1; /**< [ 5: 5](R/W) Asynchronous abort and error interrupt routing.
+ 0 = Asynchronous External Aborts and SError Interrupts while
+ executing at exception levels lower than EL2 are not taken in
+ EL2. Virtual System Error/Asynchronous Abort is disabled.
+ 1 = Asynchronous External Aborts and SError Interrupts while
+ executing at EL2 or lower are taken in EL2 unless routed by
+ the AP_SCR_EL3[EA] bit to EL3. Virtual System Error/Asynchronous
+ Abort is enabled. */
+ uint64_t imo : 1; /**< [ 4: 4](R/W) Physical IRQ Routing.
+ 0 = Physical IRQ while executing at exception levels lower than
+ EL2 are not taken in EL2. Virtual IRQ Interrupt is disabled.
+ 1 = Physical IRQ while executing at EL2 or lower are taken in EL2
+ unless routed by the AP_SCR_EL3[IRQ] bit to EL3. Virtual IRQ
+ Interrupt is enabled. */
+ uint64_t fmo : 1; /**< [ 3: 3](R/W) Physical FIQ Routing.
+ 0 = Physical FIQ while executing at exception levels lower than
+ EL2 are not taken in EL2. Virtual FIQ Interrupt is disabled.
+ 1 = Physical FIQ while executing at EL2 or lower are taken in EL2
+ unless routed by the AP_SCR_EL3[FIQ] bit to EL3. Virtual FIQ
+ Interrupt is enabled. */
+ uint64_t ptw : 1; /**< [ 2: 2](R/W) Protected Table Walk. When this bit is set to 1, if the stage
+ 2 translation of a translation table access made as part of a
+ stage 1 translation table walk at EL0 or EL1 maps that
+ translation table access to Strongly-ordered or Device memory,
+ the access is faulted as a stage 2 Permission fault.
+ This bit is permitted to be cached in a TLB. */
+ uint64_t swio : 1; /**< [ 1: 1](R/W) Set/Way Invalidation Override. When this bit is set to 1, this
+ causes EL1 execution of the data cache invalidate by set/way
+ instruction to be treated as data cache clean and invalidate
+ by set/way. That is:
+
+ AArch32: DCISW is executed as DCCISW.
+
+ AArch64: DC ISW is executed as DC CISW.
+
+ As a result of changes to the behavior of DCISW, this bit is
+ redundant in ARMv8. It is permissible that an implementation
+ makes this bit RES1. */
+ uint64_t vm : 1; /**< [ 0: 0](R/W) Virtualization MMU enable for EL1 and EL0 stage 2 address
+ translation.
+ This bit is permitted to be cached in a TLB.
+ 0 = EL1 and EL0 stage 2 address translation disabled.
+ 1 = EL1 and EL0 stage 2 address translation enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t vm : 1; /**< [ 0: 0](R/W) Virtualization MMU enable for EL1 and EL0 stage 2 address
+ translation.
+ This bit is permitted to be cached in a TLB.
+ 0 = EL1 and EL0 stage 2 address translation disabled.
+ 1 = EL1 and EL0 stage 2 address translation enabled. */
+ uint64_t swio : 1; /**< [ 1: 1](R/W) Set/Way Invalidation Override. When this bit is set to 1, this
+ causes EL1 execution of the data cache invalidate by set/way
+ instruction to be treated as data cache clean and invalidate
+ by set/way. That is:
+
+ AArch32: DCISW is executed as DCCISW.
+
+ AArch64: DC ISW is executed as DC CISW.
+
+ As a result of changes to the behavior of DCISW, this bit is
+ redundant in ARMv8. It is permissible that an implementation
+ makes this bit RES1. */
+ uint64_t ptw : 1; /**< [ 2: 2](R/W) Protected Table Walk. When this bit is set to 1, if the stage
+ 2 translation of a translation table access made as part of a
+ stage 1 translation table walk at EL0 or EL1 maps that
+ translation table access to Strongly-ordered or Device memory,
+ the access is faulted as a stage 2 Permission fault.
+ This bit is permitted to be cached in a TLB. */
+ uint64_t fmo : 1; /**< [ 3: 3](R/W) Physical FIQ Routing.
+ 0 = Physical FIQ while executing at exception levels lower than
+ EL2 are not taken in EL2. Virtual FIQ Interrupt is disabled.
+ 1 = Physical FIQ while executing at EL2 or lower are taken in EL2
+ unless routed by the AP_SCR_EL3[FIQ] bit to EL3. Virtual FIQ
+ Interrupt is enabled. */
+ uint64_t imo : 1; /**< [ 4: 4](R/W) Physical IRQ Routing.
+ 0 = Physical IRQ while executing at exception levels lower than
+ EL2 are not taken in EL2. Virtual IRQ Interrupt is disabled.
+ 1 = Physical IRQ while executing at EL2 or lower are taken in EL2
+ unless routed by the AP_SCR_EL3[IRQ] bit to EL3. Virtual IRQ
+ Interrupt is enabled. */
+ uint64_t amo : 1; /**< [ 5: 5](R/W) Asynchronous abort and error interrupt routing.
+ 0 = Asynchronous External Aborts and SError Interrupts while
+ executing at exception levels lower than EL2 are not taken in
+ EL2. Virtual System Error/Asynchronous Abort is disabled.
+ 1 = Asynchronous External Aborts and SError Interrupts while
+ executing at EL2 or lower are taken in EL2 unless routed by
+ the AP_SCR_EL3[EA] bit to EL3. Virtual System Error/Asynchronous
+ Abort is enabled. */
+ uint64_t vf : 1; /**< [ 6: 6](R/W) Virtual FIQ Interrupt.
+ The virtual FIQ is only enabled when the AP_HCR_EL2[FMO] bit is
+ set.
+ 0 = Virtual FIQ is not pending by this mechanism.
+ 1 = Virtual FIQ is pending by this mechanism. */
+ uint64_t vi : 1; /**< [ 7: 7](R/W) Virtual IRQ Interrupt.
+ The virtual IRQ is only enabled when the AP_HCR_EL2[IMO] bit is
+ set.
+ 0 = Virtual IRQ is not pending by this mechanism.
+ 1 = Virtual IRQ is pending by this mechanism. */
+ uint64_t vse : 1; /**< [ 8: 8](R/W) Virtual System Error/Asynchronous Abort.
+ The virtual System Error/Asynchronous Abort is only enabled
+ when the AP_HCR_EL2[AMO] bit is set.
+ 0 = Virtual System Error/Asynchronous Abort is not pending by this
+ mechanism.
+ 1 = Virtual System Error/Asynchronous Abort is pending by this
+ mechanism. */
+ uint64_t fb : 1; /**< [ 9: 9](R/W) Force broadcast. When this bit is set to 1, this causes the
+ following instructions to be broadcast within the Inner
+ Shareable domain when executed from nonsecure EL1:
+
+ AArch32: BPIALL, TLBIALL, TLBIMVA, TLBIASID, DTLBIALL,
+ DTLBIMVA, DTLBIASID, ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA,
+ ICIALLU, TLBIMVAL, TLBIMVAAL.
+
+ AArch64: TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
+ TLBI VALE1, TLBI VAALE1, IC IALLU. */
+ uint64_t bsu : 2; /**< [ 11: 10](R/W) Barrier Shareability upgrade. The value in this field
+ determines the minimum shareability domain that is applied to
+ any barrier executed from EL1 or EL0.
+
+ This value is combined with the specified level of the barrier
+ held in its instruction, using the same principles as
+ combining the shareability attributes from two stages of
+ address translation.
+
+ 0x0 = No effect.
+ 0x1 = Inner Shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Full system. */
+ uint64_t dc : 1; /**< [ 12: 12](R/W) Default Cacheable. When this bit is set to 1, this causes:
+
+ * The AP_SCTLR_EL1[M] bit to behave as 0 when in the nonsecure
+ state for all purposes other than reading the value of the
+ bit.
+
+ * The AP_HCR_EL2[VM] bit to behave as 1 when in the nonsecure
+ state for all purposes other than reading the value of the
+ bit.
+
+ The memory type produced by the first stage of translation
+ used by EL1 and EL0 is Normal Non-Shareable, Inner WriteBack
+ Read-WriteAllocate, Outer WriteBack Read-WriteAllocate.
+
+ When this bit is 0 and the stage 1 MMU is disabled, the
+ default memory attribute for Data accesses is Device-nGnRnE.
+
+ This bit is permitted to be cached in a TLB. */
+ uint64_t twi : 1; /**< [ 13: 13](R/W) Trap WFI. When this bit is set to 1, this causes the following
+ instructions executed from EL1 or EL0 to be trapped to EL2 if
+ the instruction would otherwise cause suspension of execution
+ (i.e. if there is not a pending WFI wakeup event):
+
+ AArch32: WFI.
+
+ AArch64: WFI.
+
+ Conditional WFI instructions that fail their condition are not
+ trapped if this bit is set to 1. */
+ uint64_t twe : 1; /**< [ 14: 14](R/W) Trap WFE. When this bit is set to 1, this causes the following
+ instructions executed from EL1 or EL0 to be trapped to EL2 if
+ the instruction would otherwise cause suspension of execution
+ (i.e. if the event register is not set):
+
+ AArch32: WFE.
+
+ AArch64: WFE.
+
+ Conditional WFE instructions that fail their condition are not
+ trapped if this bit is set to 1. */
+ uint64_t tid0 : 1; /**< [ 15: 15](R/W) Trap ID Group 0. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 or EL0 if not
+ UNdefined to be trapped to EL2:
+
+ AArch32: FPSID, JIDR.
+
+ AArch64: None. */
+ uint64_t tid1 : 1; /**< [ 16: 16](R/W) Trap ID Group 1. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 to be trapped to
+ EL2:
+
+ AArch32: TCMTR, TLBTR, AIDR, REVIDR.
+
+ AArch64: AP_AIDR_EL1, AP_REVIDR_EL1. */
+ uint64_t tid2 : 1; /**< [ 17: 17](R/W) Trap ID Group 2. When this bit is set to 1, this causes reads
+ (or writes to CSSELR/ AP_CSSELR_EL1) to the following registers
+ executed from EL1 or EL0 if not UNdefined to be trapped to
+ EL2:
+
+ AArch32: CTR, CCSIDR, CLIDR, CSSELR.
+
+ AArch64: AP_CTR_EL0, AP_CCSIDR_EL1, AP_CLIDR_EL1, AP_CSSELR_EL1. */
+ uint64_t tid3 : 1; /**< [ 18: 18](R/W) Trap ID Group 3. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 to be trapped to
+ EL2:
+
+ AArch32: ID_PFR0, ID_PFR1, ID_DFR0, ID_AFR0, ID_MMFR0,
+ ID_MMFR1, ID_MMFR2, ID_MMFR3, ID_ISAR0, ID_ISAR1, ID_ISAR2,
+ ID_ISAR3, ID_ISAR4, ID_ISAR5, MVFR0, MVFR1, MVFR2. Also MRC to
+ any of the following encodings:
+
+ CP15, CRn == 0, Opc1 == 0, CRm == {3-7}, Opc2 == {0,1}.
+
+ CP15, CRn == 0, Opc1 == 0, CRm == 3, Opc2 == 2.
+
+ CP15, CRn == 0, Opc1 == 0, CRm == 5, Opc2 == {4,5}.
+
+ AArch64: AP_ID_PFR0_EL1, AP_ID_PFR1_EL1, AP_ID_DFR0_EL1, AP_ID_AFR0_EL1,
+ ID_MMFR0_EL1, ID_MMFR1_EL1, ID_MMFR2_EL1, ID_MMFR3_EL1,
+ ID_ISAR0_EL1, ID_ISAR1_EL1, ID_ISAR2_EL1, ID_ISAR3_EL1,
+ ID_ISAR4_EL1, ID_ISAR5_EL1, MVFR0_EL1, MVFR1_EL1, MVFR2_EL1,
+ AP_ID_AA64PFR0_EL1, AP_ID_AA64PFR1_EL1, AP_ID_AA64DFR0_EL1,
+ AP_ID_AA64DFR1_EL1, AP_ID_AA64ISAR0_EL1, AP_ID_AA64ISAR1_EL1,
+ AP_ID_AA64MMFR0_EL1, AP_ID_AA64MMFR1_EL1, AP_ID_AA64AFR0_EL1,
+ AP_ID_AA64AFR1_EL1. */
+ uint64_t tsc : 1; /**< [ 19: 19](R/W) Trap SMC. When this bit is set to 1, this causes the following
+ instructions executed from EL1 to be trapped to EL2:
+
+ AArch32: SMC.
+
+ AArch64: SMC.
+
+ If EL3 is not implemented, this bit is RES0. */
+ uint64_t tidcp : 1; /**< [ 20: 20](R/W) Trap Implementation Dependent functionality. When this bit is
+ set to 1, this causes accesses to the following instruction
+ set space executed from EL1 to be trapped to EL2.
+
+ AArch32: MCR and MRC instructions as follows:
+
+ All CP15, CRn==9, Opcode1 = {0-7}, CRm == {c0-c2, c5-c8},
+ opcode2 == {0-7}.
+
+ All CP15, CRn==10, Opcode1 =={0-7}, CRm == {c0, c1, c4, c8},
+ opcode2 == {0-7}.
+
+ All CP15, CRn==11, Opcode1=={0-7}, CRm == {c0-c8, c15},
+ opcode2 == {0-7}.
+
+ AArch64: All encoding space reserved for implementation
+ defined system operations ( S1_\<op1\>_\<Cn\>_\<Cm\>_\<op2\>) and
+ system registers ( S3_\<op1\>_\<Cn\>_\<Cm\>_\<op2\>).
+
+ It is implementation defined whether any of this functionality
+ accessed from EL0 is trapped to EL2 when the AP_HCR_EL2[TIDCP] bit
+ is set. If it is not trapped to EL2, it results in an
+ Undefined exception taken to EL1. */
+ uint64_t tacr : 1; /**< [ 21: 21](R/W) Trap Auxiliary Control Register. When this bit is set to 1,
+ this causes accesses to the following registers executed from
+ EL1 to be trapped to EL2:
+
+ AArch32: ACTLR.
+
+ AArch64: ACTLR_EL1. */
+ uint64_t tsw : 1; /**< [ 22: 22](R/W) Trap Data/Unified Cache maintenance operations by Set/Way.
+ When this bit is set to 1, this causes Data or Unified Cache
+ maintenance instructions by set/way executed from EL1 which
+ are not UNdefined to be trapped to EL2. This covers the
+ following instructions:
+
+ AArch32: DCISW, DCCSW, DCCISW.
+
+ AArch64: DC ISW, DC CSW, DC CISW. */
+ uint64_t tpc : 1; /**< [ 23: 23](R/W) Trap Data/Unified Cache maintenance operations to Point of
+ Coherency. When this bit is set to 1, this causes Data or
+ Unified Cache maintenance instructions by address to the point
+ of coherency executed from EL1 or EL0 which are not UNdefined
+ to be trapped to EL2. This covers the following instructions:
+
+ AArch32: DCIMVAC, DCCIMVAC, DCCMVAC.
+
+ AArch64: DC IVAC, DC CIVAC, DC CVAC. */
+ uint64_t tpu : 1; /**< [ 24: 24](R/W) Trap Cache maintenance instructions to Point of Unification.
+ When this bit is set to 1, this causes Cache maintenance
+ instructions to the point of unification executed from EL1 or
+ EL0 which are not UNdefined to be trapped to EL2. This covers
+ the following instructions:
+
+ AArch32: ICIMVAU, ICIALLU, ICIALLUIS, DCCMVAU.
+
+ AArch64: IC IVAU, IC IALLU, IC IALLUIS, DC CVAU. */
+ uint64_t ttlb : 1; /**< [ 25: 25](R/W) Trap TLB maintenance instructions. When this bit is set to 1,
+ this causes TLB maintenance instructions executed from EL1
+ which are not UNdefined to be trapped to EL2. This covers the
+ following instructions:
+
+ AArch32: TLBIALLIS, TLBIMVAIS, TLBIASIDIS, TLBIMVAAIS,
+ TLBIALL, TLBIMVA, TLBIASID, DTLBIALL, DTLBIMVA, DTLBIASID,
+ ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA, TLBIMVALIS,
+ TLBIMVAALIS, TLBIMVAL, TLBIMVAAL
+
+ AArch64: TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
+ TLBI VALE1, TLBI VAALE1, TLBI VMALLE1IS, TLBI VAE1IS, TLBI
+ ASIDE1IS, TLBI VAAE1IS, TLBI VALE1IS, TLBI VAALE1IS */
+ uint64_t tvm : 1; /**< [ 26: 26](R/W) Trap Virtual Memory controls. When this bit is set to 1, this
+ causes Writes to the EL1 virtual memory control registers from
+ EL1 to be trapped to EL2. This covers the following registers:
+
+ AArch32: SCTLR, TTBR0, TTBR1, TTBCR, DACR, DFSR, IFSR, DFAR,
+ IFAR, ADFSR, AIFSR, PRRR/ MAIR0, NMRR/ MAIR1, AMAIR0, AMAIR1,
+ CONTEXTIDR.
+
+ AArch64: AP_SCTLR_EL1, AP_TTBR0_EL1, AP_TTBR1_EL1, AP_TCR_EL1, ESR_EL1,
+ FAR_EL1, AFSR0_EL1, AFSR1_EL1, MAIR_EL1, AMAIR_EL1,
+ AP_CONTEXTIDR_EL1 */
+ uint64_t tge : 1; /**< [ 27: 27](R/W) Trap General Exceptions. If this bit is set to 1, and
+ AP_SCR_EL3[NS] is set to 1, then:
+
+ All exceptions that would be routed to EL1 are routed to EL2.
+
+ The AP_SCTLR_EL1[M] bit is treated as being 0 regardless of its
+ actual state (for EL1 using AArch32 or AArch64) other than for
+ the purpose of reading the bit.
+
+ The AP_HCR_EL2[FMO], IMO and AMO bits are treated as being 1
+ regardless of their actual state other than for the purpose of
+ reading the bits.
+
+ All virtual interrupts are disabled.
+
+ Any implementation defined mechanisms for signalling virtual
+ interrupts are disabled.
+
+ An exception return to EL1 is treated as an illegal exception
+ return.
+
+ Additionally, if AP_HCR_EL2[TGE] == 1, the
+ AP_MDCR_EL2.{TDRA,TDOSA,TDA} bits are ignored and the processor
+ behaves as if they are set to 1, other than for the value read
+ back from AP_MDCR_EL2. */
+ uint64_t tdz : 1; /**< [ 28: 28](R/W) Trap DC ZVA instruction:
+ This bit also has an effect on the value read from the
+ AP_DCZID_EL0 register. If this bit is 1, then reading
+ AP_DCZID_EL0[DZP] from nonsecure EL1 or EL0 will return 1 to
+ indicate that DC ZVA is prohibited.
+ 0 = The instruction is not trapped.
+ 1 = The instruction is trapped to EL2 when executed in nonsecure
+ EL1 or EL0. */
+ uint64_t reserved_29 : 1;
+ uint64_t trvm : 1; /**< [ 30: 30](R/W) Trap Read of Virtual Memory controls. When this bit is set to
+ 1, this causes Reads to the EL1 virtual memory control
+ registers from EL1 to be trapped to EL2. This covers the
+ following registers:
+
+ AArch32: SCTLR, TTBR0, TTBR1, TTBCR, DACR, DFSR, IFSR, DFAR,
+ IFAR, ADFSR, AIFSR, PRRR/ MAIR0, NMRR/ MAIR1, AMAIR0, AMAIR1,
+ CONTEXTIDR.
+
+ AArch64: AP_SCTLR_EL1, AP_TTBR0_EL1, AP_TTBR1_EL1, AP_TCR_EL1, ESR_EL1,
+ FAR_EL1, AFSR0_EL1, AFSR1_EL1, MAIR_EL1, AMAIR_EL1,
+ AP_CONTEXTIDR_EL1. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) rw - Register Width control for lower exception levels:
+ When AP_SCR_EL3[NS]==0, this bit behaves as if it has the same
+ value as the AP_SCR_EL3[RW] bit except for the value read back.
+ The RW bit is permitted to be cached in a TLB.
+ 0 = Lower levels are all AArch32.
+ 1 = EL1 is AArch64. EL0 is determined by the Execution state
+ described in the current process state when executing at EL0. */
+ uint64_t cd : 1; /**< [ 32: 32](R/W) Stage 2 Data cache disable. When AP_HCR_EL2[VM]==1, this forces
+ all stage 2 translations for data accesses and translation
+ table walks to Normal memory to be Non-cacheable for the EL1&0
+ translation regime.
+ This bit has no effect on the EL2 or EL3 translation regimes.
+ 0 = No effect on the stage 2 of the EL1&0 translation regime for
+ data accesses and translation table walks.
+ 1 = Forces all stage 2 translations for data accesses and
+ translation table walks to Normal memory to be Non-cacheable
+ for the EL1&0 translation regime. */
+ uint64_t id : 1; /**< [ 33: 33](R/W) Stage 2 Instruction cache disable. When AP_HCR_EL2[VM]==1, this
+ forces all stage 2 translations for instruction accesses to
+ Normal memory to be Non-cacheable for the EL1&0 translation
+ regime.
+ This bit has no effect on the EL2 or EL3 translation regimes.
+ 0 = No effect on the stage 2 of the EL1&0 translation regime for
+ instruction accesses.
+ 1 = Forces all stage 2 translations for instruction accesses to
+ Normal memory to be Non-cacheable for the EL1&0 translation
+ regime. */
+ uint64_t e2h : 1; /**< [ 34: 34](R/W) V8.1: Enable EL2 host. */
+ uint64_t tlor : 1; /**< [ 35: 35](R/W) v8.1: Trap access to the LOR Registers from nonsecure EL1 to EL2.
+ 0 = Nonsecure EL1 accesses to the LOR Registers are not trapped to EL2.
+ 1 = Nonsecure EL1 accesses to the LOR Registers are trapped to EL2. */
+ uint64_t terr : 1; /**< [ 36: 36](R/W) RAS: Trap Error record accesses.
+ 0 = Do not trap accesses to error record registers from Non-secure EL1 to EL2.
+ 1 = Accesses to the ER* registers from Non-secure EL1 generate a Trap exception to EL2. */
+ uint64_t tea : 1; /**< [ 37: 37](R/W) RAS: Route synchronous external aborts to EL2.
+ 0 = Do not route synchronous external aborts from Non-secure EL0 and EL1 to EL2.
+ 1 = Route synchronous external aborts from Non-secure EL0 and EL1 to EL2, if not routed
+ to EL3. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_hcr_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t tlor : 1; /**< [ 35: 35](R/W) v8.1: Trap access to the LOR Registers from nonsecure EL1 to EL2.
+ 0 = Nonsecure EL1 accesses to the LOR Registers are not trapped to EL2.
+ 1 = Nonsecure EL1 accesses to the LOR Registers are trapped to EL2. */
+ uint64_t e2h : 1; /**< [ 34: 34](R/W) V8.1: Enable EL2 host. */
+ uint64_t id : 1; /**< [ 33: 33](R/W) Stage 2 Instruction cache disable. When AP_HCR_EL2[VM]==1, this
+ forces all stage 2 translations for instruction accesses to
+ Normal memory to be Non-cacheable for the EL1&0 translation
+ regime.
+ This bit has no effect on the EL2 or EL3 translation regimes.
+ 0 = No effect on the stage 2 of the EL1&0 translation regime for
+ instruction accesses.
+ 1 = Forces all stage 2 translations for instruction accesses to
+ Normal memory to be Non-cacheable for the EL1&0 translation
+ regime. */
+ uint64_t cd : 1; /**< [ 32: 32](R/W) Stage 2 Data cache disable. When AP_HCR_EL2[VM]==1, this forces
+ all stage 2 translations for data accesses and translation
+ table walks to Normal memory to be Non-cacheable for the EL1&0
+ translation regime.
+ This bit has no effect on the EL2 or EL3 translation regimes.
+ 0 = No effect on the stage 2 of the EL1&0 translation regime for
+ data accesses and translation table walks.
+ 1 = Forces all stage 2 translations for data accesses and
+ translation table walks to Normal memory to be Non-cacheable
+ for the EL1&0 translation regime. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) rw - Register Width control for lower exception levels:
+ When AP_SCR_EL3[NS]==0, this bit behaves as if it has the same
+ value as the AP_SCR_EL3[RW] bit except for the value read back.
+ The RW bit is permitted to be cached in a TLB.
+ 0 = Lower levels are all AArch32.
+ 1 = EL1 is AArch64. EL0 is determined by the Execution state
+ described in the current process state when executing at EL0. */
+ uint64_t trvm : 1; /**< [ 30: 30](R/W) Trap Read of Virtual Memory controls. When this bit is set to
+ 1, this causes Reads to the EL1 virtual memory control
+ registers from EL1 to be trapped to EL2. This covers the
+ following registers:
+
+ AArch32: SCTLR, TTBR0, TTBR1, TTBCR, DACR, DFSR, IFSR, DFAR,
+ IFAR, ADFSR, AIFSR, PRRR/ MAIR0, NMRR/ MAIR1, AMAIR0, AMAIR1,
+ CONTEXTIDR.
+
+ AArch64: AP_SCTLR_EL1, AP_TTBR0_EL1, AP_TTBR1_EL1, AP_TCR_EL1, ESR_EL1,
+ FAR_EL1, AFSR0_EL1, AFSR1_EL1, MAIR_EL1, AMAIR_EL1,
+ AP_CONTEXTIDR_EL1. */
+ uint64_t reserved_29 : 1;
+ uint64_t tdz : 1; /**< [ 28: 28](R/W) Trap DC ZVA instruction:
+ This bit also has an effect on the value read from the
+ AP_DCZID_EL0 register. If this bit is 1, then reading
+ AP_DCZID_EL0[DZP] from nonsecure EL1 or EL0 will return 1 to
+ indicate that DC ZVA is prohibited.
+ 0 = The instruction is not trapped.
+ 1 = The instruction is trapped to EL2 when executed in nonsecure
+ EL1 or EL0. */
+ uint64_t tge : 1; /**< [ 27: 27](R/W) Trap General Exceptions. If this bit is set to 1, and
+ AP_SCR_EL3[NS] is set to 1, then:
+
+ All exceptions that would be routed to EL1 are routed to EL2.
+
+ The AP_SCTLR_EL1[M] bit is treated as being 0 regardless of its
+ actual state (for EL1 using AArch32 or AArch64) other than for
+ the purpose of reading the bit.
+
+ The AP_HCR_EL2[FMO], IMO and AMO bits are treated as being 1
+ regardless of their actual state other than for the purpose of
+ reading the bits.
+
+ All virtual interrupts are disabled.
+
+ Any implementation defined mechanisms for signalling virtual
+ interrupts are disabled.
+
+ An exception return to EL1 is treated as an illegal exception
+ return.
+
+ Additionally, if AP_HCR_EL2[TGE] == 1, the
+ AP_MDCR_EL2.{TDRA,TDOSA,TDA} bits are ignored and the processor
+ behaves as if they are set to 1, other than for the value read
+ back from AP_MDCR_EL2. */
+ uint64_t tvm : 1; /**< [ 26: 26](R/W) Trap Virtual Memory controls. When this bit is set to 1, this
+ causes Writes to the EL1 virtual memory control registers from
+ EL1 to be trapped to EL2. This covers the following registers:
+
+ AArch32: SCTLR, TTBR0, TTBR1, TTBCR, DACR, DFSR, IFSR, DFAR,
+ IFAR, ADFSR, AIFSR, PRRR/ MAIR0, NMRR/ MAIR1, AMAIR0, AMAIR1,
+ CONTEXTIDR.
+
+ AArch64: AP_SCTLR_EL1, AP_TTBR0_EL1, AP_TTBR1_EL1, AP_TCR_EL1, ESR_EL1,
+ FAR_EL1, AFSR0_EL1, AFSR1_EL1, MAIR_EL1, AMAIR_EL1,
+ AP_CONTEXTIDR_EL1 */
+ uint64_t ttlb : 1; /**< [ 25: 25](R/W) Trap TLB maintenance instructions. When this bit is set to 1,
+ this causes TLB maintenance instructions executed from EL1
+ which are not UNdefined to be trapped to EL2. This covers the
+ following instructions:
+
+ AArch32: TLBIALLIS, TLBIMVAIS, TLBIASIDIS, TLBIMVAAIS,
+ TLBIALL, TLBIMVA, TLBIASID, DTLBIALL, DTLBIMVA, DTLBIASID,
+ ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA, TLBIMVALIS,
+ TLBIMVAALIS, TLBIMVAL, TLBIMVAAL
+
+ AArch64: TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
+ TLBI VALE1, TLBI VAALE1, TLBI VMALLE1IS, TLBI VAE1IS, TLBI
+ ASIDE1IS, TLBI VAAE1IS, TLBI VALE1IS, TLBI VAALE1IS */
+ uint64_t tpu : 1; /**< [ 24: 24](R/W) Trap Cache maintenance instructions to Point of Unification.
+ When this bit is set to 1, this causes Cache maintenance
+ instructions to the point of unification executed from EL1 or
+ EL0 which are not UNdefined to be trapped to EL2. This covers
+ the following instructions:
+
+ AArch32: ICIMVAU, ICIALLU, ICIALLUIS, DCCMVAU.
+
+ AArch64: IC IVAU, IC IALLU, IC IALLUIS, DC CVAU. */
+ uint64_t tpc : 1; /**< [ 23: 23](R/W) Trap Data/Unified Cache maintenance operations to Point of
+ Coherency. When this bit is set to 1, this causes Data or
+ Unified Cache maintenance instructions by address to the point
+ of coherency executed from EL1 or EL0 which are not UNdefined
+ to be trapped to EL2. This covers the following instructions:
+
+ AArch32: DCIMVAC, DCCIMVAC, DCCMVAC.
+
+ AArch64: DC IVAC, DC CIVAC, DC CVAC. */
+ uint64_t tsw : 1; /**< [ 22: 22](R/W) Trap Data/Unified Cache maintenance operations by Set/Way.
+ When this bit is set to 1, this causes Data or Unified Cache
+ maintenance instructions by set/way executed from EL1 which
+ are not UNdefined to be trapped to EL2. This covers the
+ following instructions:
+
+ AArch32: DCISW, DCCSW, DCCISW.
+
+ AArch64: DC ISW, DC CSW, DC CISW. */
+ uint64_t tacr : 1; /**< [ 21: 21](R/W) Trap Auxiliary Control Register. When this bit is set to 1,
+ this causes accesses to the following registers executed from
+ EL1 to be trapped to EL2:
+
+ AArch32: ACTLR.
+
+ AArch64: ACTLR_EL1. */
+ uint64_t tidcp : 1; /**< [ 20: 20](R/W) Trap Implementation Dependent functionality. When this bit is
+ set to 1, this causes accesses to the following instruction
+ set space executed from EL1 to be trapped to EL2.
+
+ AArch32: MCR and MRC instructions as follows:
+
+ All CP15, CRn==9, Opcode1 = {0-7}, CRm == {c0-c2, c5-c8},
+ opcode2 == {0-7}.
+
+ All CP15, CRn==10, Opcode1 =={0-7}, CRm == {c0, c1, c4, c8},
+ opcode2 == {0-7}.
+
+ All CP15, CRn==11, Opcode1=={0-7}, CRm == {c0-c8, c15},
+ opcode2 == {0-7}.
+
+ AArch64: All encoding space reserved for implementation
+ defined system operations ( S1_\<op1\>_\<Cn\>_\<Cm\>_\<op2\>) and
+ system registers ( S3_\<op1\>_\<Cn\>_\<Cm\>_\<op2\>).
+
+ It is implementation defined whether any of this functionality
+ accessed from EL0 is trapped to EL2 when the AP_HCR_EL2[TIDCP] bit
+ is set. If it is not trapped to EL2, it results in an
+ Undefined exception taken to EL1. */
+ uint64_t tsc : 1; /**< [ 19: 19](R/W) Trap SMC. When this bit is set to 1, this causes the following
+ instructions executed from EL1 to be trapped to EL2:
+
+ AArch32: SMC.
+
+ AArch64: SMC.
+
+ If EL3 is not implemented, this bit is RES0. */
+ uint64_t tid3 : 1; /**< [ 18: 18](R/W) Trap ID Group 3. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 to be trapped to
+ EL2:
+
+ AArch32: ID_PFR0, ID_PFR1, ID_DFR0, ID_AFR0, ID_MMFR0,
+ ID_MMFR1, ID_MMFR2, ID_MMFR3, ID_ISAR0, ID_ISAR1, ID_ISAR2,
+ ID_ISAR3, ID_ISAR4, ID_ISAR5, MVFR0, MVFR1, MVFR2. Also MRC to
+ any of the following encodings:
+
+ CP15, CRn == 0, Opc1 == 0, CRm == {3-7}, Opc2 == {0,1}.
+
+ CP15, CRn == 0, Opc1 == 0, CRm == 3, Opc2 == 2.
+
+ CP15, CRn == 0, Opc1 == 0, CRm == 5, Opc2 == {4,5}.
+
+ AArch64: AP_ID_PFR0_EL1, AP_ID_PFR1_EL1, AP_ID_DFR0_EL1, AP_ID_AFR0_EL1,
+ ID_MMFR0_EL1, ID_MMFR1_EL1, ID_MMFR2_EL1, ID_MMFR3_EL1,
+ ID_ISAR0_EL1, ID_ISAR1_EL1, ID_ISAR2_EL1, ID_ISAR3_EL1,
+ ID_ISAR4_EL1, ID_ISAR5_EL1, MVFR0_EL1, MVFR1_EL1, MVFR2_EL1,
+ AP_ID_AA64PFR0_EL1, AP_ID_AA64PFR1_EL1, AP_ID_AA64DFR0_EL1,
+ AP_ID_AA64DFR1_EL1, AP_ID_AA64ISAR0_EL1, AP_ID_AA64ISAR1_EL1,
+ AP_ID_AA64MMFR0_EL1, AP_ID_AA64MMFR1_EL1, AP_ID_AA64AFR0_EL1,
+ AP_ID_AA64AFR1_EL1. */
+ uint64_t tid2 : 1; /**< [ 17: 17](R/W) Trap ID Group 2. When this bit is set to 1, this causes reads
+ (or writes to CSSELR/ AP_CSSELR_EL1) to the following registers
+ executed from EL1 or EL0 if not UNdefined to be trapped to
+ EL2:
+
+ AArch32: CTR, CCSIDR, CLIDR, CSSELR.
+
+ AArch64: AP_CTR_EL0, AP_CCSIDR_EL1, AP_CLIDR_EL1, AP_CSSELR_EL1. */
+ uint64_t tid1 : 1; /**< [ 16: 16](R/W) Trap ID Group 1. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 to be trapped to
+ EL2:
+
+ AArch32: TCMTR, TLBTR, AIDR, REVIDR.
+
+ AArch64: AP_AIDR_EL1, AP_REVIDR_EL1. */
+ uint64_t tid0 : 1; /**< [ 15: 15](R/W) Trap ID Group 0. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 or EL0 if not
+ UNdefined to be trapped to EL2:
+
+ AArch32: FPSID, JIDR.
+
+ AArch64: None. */
+ uint64_t twe : 1; /**< [ 14: 14](R/W) Trap WFE. When this bit is set to 1, this causes the following
+ instructions executed from EL1 or EL0 to be trapped to EL2 if
+ the instruction would otherwise cause suspension of execution
+ (i.e. if the event register is not set):
+
+ AArch32: WFE.
+
+ AArch64: WFE.
+
+ Conditional WFE instructions that fail their condition are not
+ trapped if this bit is set to 1. */
+ uint64_t twi : 1; /**< [ 13: 13](R/W) Trap WFI. When this bit is set to 1, this causes the following
+ instructions executed from EL1 or EL0 to be trapped to EL2 if
+ the instruction would otherwise cause suspension of execution
+ (i.e. if there is not a pending WFI wakeup event):
+
+ AArch32: WFI.
+
+ AArch64: WFI.
+
+ Conditional WFI instructions that fail their condition are not
+ trapped if this bit is set to 1. */
+ uint64_t dc : 1; /**< [ 12: 12](R/W) Default Cacheable. When this bit is set to 1, this causes:
+
+ * The AP_SCTLR_EL1[M] bit to behave as 0 when in the nonsecure
+ state for all purposes other than reading the value of the
+ bit.
+
+ * The AP_HCR_EL2[VM] bit to behave as 1 when in the nonsecure
+ state for all purposes other than reading the value of the
+ bit.
+
+ The memory type produced by the first stage of translation
+ used by EL1 and EL0 is Normal Non-Shareable, Inner WriteBack
+ Read-WriteAllocate, Outer WriteBack Read-WriteAllocate.
+
+ When this bit is 0 and the stage 1 MMU is disabled, the
+ default memory attribute for Data accesses is Device-nGnRnE.
+
+ This bit is permitted to be cached in a TLB. */
+ uint64_t bsu : 2; /**< [ 11: 10](R/W) Barrier Shareability upgrade. The value in this field
+ determines the minimum shareability domain that is applied to
+ any barrier executed from EL1 or EL0.
+
+ This value is combined with the specified level of the barrier
+ held in its instruction, using the same principles as
+ combining the shareability attributes from two stages of
+ address translation.
+
+ 0x0 = No effect.
+ 0x1 = Inner Shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Full system. */
+ uint64_t fb : 1; /**< [ 9: 9](R/W) Force broadcast. When this bit is set to 1, this causes the
+ following instructions to be broadcast within the Inner
+ Shareable domain when executed from nonsecure EL1:
+
+ AArch32: BPIALL, TLBIALL, TLBIMVA, TLBIASID, DTLBIALL,
+ DTLBIMVA, DTLBIASID, ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA,
+ ICIALLU, TLBIMVAL, TLBIMVAAL.
+
+ AArch64: TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
+ TLBI VALE1, TLBI VAALE1, IC IALLU. */
+ uint64_t vse : 1; /**< [ 8: 8](R/W) Virtual System Error/Asynchronous Abort.
+ The virtual System Error/Asynchronous Abort is only enabled
+ when the AP_HCR_EL2[AMO] bit is set.
+ 0 = Virtual System Error/Asynchronous Abort is not pending by this
+ mechanism.
+ 1 = Virtual System Error/Asynchronous Abort is pending by this
+ mechanism. */
+ uint64_t vi : 1; /**< [ 7: 7](R/W) Virtual IRQ Interrupt.
+ The virtual IRQ is only enabled when the AP_HCR_EL2[IMO] bit is
+ set.
+ 0 = Virtual IRQ is not pending by this mechanism.
+ 1 = Virtual IRQ is pending by this mechanism. */
+ uint64_t vf : 1; /**< [ 6: 6](R/W) Virtual FIQ Interrupt.
+ The virtual FIQ is only enabled when the AP_HCR_EL2[FMO] bit is
+ set.
+ 0 = Virtual FIQ is not pending by this mechanism.
+ 1 = Virtual FIQ is pending by this mechanism. */
+ uint64_t amo : 1; /**< [ 5: 5](R/W) Asynchronous abort and error interrupt routing.
+ 0 = Asynchronous External Aborts and SError Interrupts while
+ executing at exception levels lower than EL2 are not taken in
+ EL2. Virtual System Error/Asynchronous Abort is disabled.
+ 1 = Asynchronous External Aborts and SError Interrupts while
+ executing at EL2 or lower are taken in EL2 unless routed by
+ the AP_SCR_EL3[EA] bit to EL3. Virtual System Error/Asynchronous
+ Abort is enabled. */
+ uint64_t imo : 1; /**< [ 4: 4](R/W) Physical IRQ Routing.
+ 0 = Physical IRQ while executing at exception levels lower than
+ EL2 are not taken in EL2. Virtual IRQ Interrupt is disabled.
+ 1 = Physical IRQ while executing at EL2 or lower are taken in EL2
+ unless routed by the AP_SCR_EL3[IRQ] bit to EL3. Virtual IRQ
+ Interrupt is enabled. */
+ uint64_t fmo : 1; /**< [ 3: 3](R/W) Physical FIQ Routing.
+ 0 = Physical FIQ while executing at exception levels lower than
+ EL2 are not taken in EL2. Virtual FIQ Interrupt is disabled.
+ 1 = Physical FIQ while executing at EL2 or lower are taken in EL2
+ unless routed by the AP_SCR_EL3[FIQ] bit to EL3. Virtual FIQ
+ Interrupt is enabled. */
+ uint64_t ptw : 1; /**< [ 2: 2](R/W) Protected Table Walk. When this bit is set to 1, if the stage
+ 2 translation of a translation table access made as part of a
+ stage 1 translation table walk at EL0 or EL1 maps that
+ translation table access to Strongly-ordered or Device memory,
+ the access is faulted as a stage 2 Permission fault.
+ This bit is permitted to be cached in a TLB. */
+ uint64_t swio : 1; /**< [ 1: 1](R/W) Set/Way Invalidation Override. When this bit is set to 1, this
+ causes EL1 execution of the data cache invalidate by set/way
+ instruction to be treated as data cache clean and invalidate
+ by set/way. That is:
+
+ AArch32: DCISW is executed as DCCISW.
+
+ AArch64: DC ISW is executed as DC CISW.
+
+ As a result of changes to the behavior of DCISW, this bit is
+ redundant in ARMv8. It is permissible that an implementation
+ makes this bit RES1. */
+ uint64_t vm : 1; /**< [ 0: 0](R/W) Virtualization MMU enable for EL1 and EL0 stage 2 address
+ translation.
+ This bit is permitted to be cached in a TLB.
+ 0 = EL1 and EL0 stage 2 address translation disabled.
+ 1 = EL1 and EL0 stage 2 address translation enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t vm : 1; /**< [ 0: 0](R/W) Virtualization MMU enable for EL1 and EL0 stage 2 address
+ translation.
+ This bit is permitted to be cached in a TLB.
+ 0 = EL1 and EL0 stage 2 address translation disabled.
+ 1 = EL1 and EL0 stage 2 address translation enabled. */
+ uint64_t swio : 1; /**< [ 1: 1](R/W) Set/Way Invalidation Override. When this bit is set to 1, this
+ causes EL1 execution of the data cache invalidate by set/way
+ instruction to be treated as data cache clean and invalidate
+ by set/way. That is:
+
+ AArch32: DCISW is executed as DCCISW.
+
+ AArch64: DC ISW is executed as DC CISW.
+
+ As a result of changes to the behavior of DCISW, this bit is
+ redundant in ARMv8. It is permissible that an implementation
+ makes this bit RES1. */
+ uint64_t ptw : 1; /**< [ 2: 2](R/W) Protected Table Walk. When this bit is set to 1, if the stage
+ 2 translation of a translation table access made as part of a
+ stage 1 translation table walk at EL0 or EL1 maps that
+ translation table access to Strongly-ordered or Device memory,
+ the access is faulted as a stage 2 Permission fault.
+ This bit is permitted to be cached in a TLB. */
+ uint64_t fmo : 1; /**< [ 3: 3](R/W) Physical FIQ Routing.
+ 0 = Physical FIQ while executing at exception levels lower than
+ EL2 are not taken in EL2. Virtual FIQ Interrupt is disabled.
+ 1 = Physical FIQ while executing at EL2 or lower are taken in EL2
+ unless routed by the AP_SCR_EL3[FIQ] bit to EL3. Virtual FIQ
+ Interrupt is enabled. */
+ uint64_t imo : 1; /**< [ 4: 4](R/W) Physical IRQ Routing.
+ 0 = Physical IRQ while executing at exception levels lower than
+ EL2 are not taken in EL2. Virtual IRQ Interrupt is disabled.
+ 1 = Physical IRQ while executing at EL2 or lower are taken in EL2
+ unless routed by the AP_SCR_EL3[IRQ] bit to EL3. Virtual IRQ
+ Interrupt is enabled. */
+ uint64_t amo : 1; /**< [ 5: 5](R/W) Asynchronous abort and error interrupt routing.
+ 0 = Asynchronous External Aborts and SError Interrupts while
+ executing at exception levels lower than EL2 are not taken in
+ EL2. Virtual System Error/Asynchronous Abort is disabled.
+ 1 = Asynchronous External Aborts and SError Interrupts while
+ executing at EL2 or lower are taken in EL2 unless routed by
+ the AP_SCR_EL3[EA] bit to EL3. Virtual System Error/Asynchronous
+ Abort is enabled. */
+ uint64_t vf : 1; /**< [ 6: 6](R/W) Virtual FIQ Interrupt.
+ The virtual FIQ is only enabled when the AP_HCR_EL2[FMO] bit is
+ set.
+ 0 = Virtual FIQ is not pending by this mechanism.
+ 1 = Virtual FIQ is pending by this mechanism. */
+ uint64_t vi : 1; /**< [ 7: 7](R/W) Virtual IRQ Interrupt.
+ The virtual IRQ is only enabled when the AP_HCR_EL2[IMO] bit is
+ set.
+ 0 = Virtual IRQ is not pending by this mechanism.
+ 1 = Virtual IRQ is pending by this mechanism. */
+ uint64_t vse : 1; /**< [ 8: 8](R/W) Virtual System Error/Asynchronous Abort.
+ The virtual System Error/Asynchronous Abort is only enabled
+ when the AP_HCR_EL2[AMO] bit is set.
+ 0 = Virtual System Error/Asynchronous Abort is not pending by this
+ mechanism.
+ 1 = Virtual System Error/Asynchronous Abort is pending by this
+ mechanism. */
+ uint64_t fb : 1; /**< [ 9: 9](R/W) Force broadcast. When this bit is set to 1, this causes the
+ following instructions to be broadcast within the Inner
+ Shareable domain when executed from nonsecure EL1:
+
+ AArch32: BPIALL, TLBIALL, TLBIMVA, TLBIASID, DTLBIALL,
+ DTLBIMVA, DTLBIASID, ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA,
+ ICIALLU, TLBIMVAL, TLBIMVAAL.
+
+ AArch64: TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
+ TLBI VALE1, TLBI VAALE1, IC IALLU. */
+ uint64_t bsu : 2; /**< [ 11: 10](R/W) Barrier Shareability upgrade. The value in this field
+ determines the minimum shareability domain that is applied to
+ any barrier executed from EL1 or EL0.
+
+ This value is combined with the specified level of the barrier
+ held in its instruction, using the same principles as
+ combining the shareability attributes from two stages of
+ address translation.
+
+ 0x0 = No effect.
+ 0x1 = Inner Shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Full system. */
+ uint64_t dc : 1; /**< [ 12: 12](R/W) Default Cacheable. When this bit is set to 1, this causes:
+
+ * The AP_SCTLR_EL1[M] bit to behave as 0 when in the nonsecure
+ state for all purposes other than reading the value of the
+ bit.
+
+ * The AP_HCR_EL2[VM] bit to behave as 1 when in the nonsecure
+ state for all purposes other than reading the value of the
+ bit.
+
+ The memory type produced by the first stage of translation
+ used by EL1 and EL0 is Normal Non-Shareable, Inner WriteBack
+ Read-WriteAllocate, Outer WriteBack Read-WriteAllocate.
+
+ When this bit is 0 and the stage 1 MMU is disabled, the
+ default memory attribute for Data accesses is Device-nGnRnE.
+
+ This bit is permitted to be cached in a TLB. */
+ uint64_t twi : 1; /**< [ 13: 13](R/W) Trap WFI. When this bit is set to 1, this causes the following
+ instructions executed from EL1 or EL0 to be trapped to EL2 if
+ the instruction would otherwise cause suspension of execution
+ (i.e. if there is not a pending WFI wakeup event):
+
+ AArch32: WFI.
+
+ AArch64: WFI.
+
+ Conditional WFI instructions that fail their condition are not
+ trapped if this bit is set to 1. */
+ uint64_t twe : 1; /**< [ 14: 14](R/W) Trap WFE. When this bit is set to 1, this causes the following
+ instructions executed from EL1 or EL0 to be trapped to EL2 if
+ the instruction would otherwise cause suspension of execution
+ (i.e. if the event register is not set):
+
+ AArch32: WFE.
+
+ AArch64: WFE.
+
+ Conditional WFE instructions that fail their condition are not
+ trapped if this bit is set to 1. */
+ uint64_t tid0 : 1; /**< [ 15: 15](R/W) Trap ID Group 0. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 or EL0 if not
+ UNdefined to be trapped to EL2:
+
+ AArch32: FPSID, JIDR.
+
+ AArch64: None. */
+ uint64_t tid1 : 1; /**< [ 16: 16](R/W) Trap ID Group 1. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 to be trapped to
+ EL2:
+
+ AArch32: TCMTR, TLBTR, AIDR, REVIDR.
+
+ AArch64: AP_AIDR_EL1, AP_REVIDR_EL1. */
+ uint64_t tid2 : 1; /**< [ 17: 17](R/W) Trap ID Group 2. When this bit is set to 1, this causes reads
+ (or writes to CSSELR/ AP_CSSELR_EL1) to the following registers
+ executed from EL1 or EL0 if not UNdefined to be trapped to
+ EL2:
+
+ AArch32: CTR, CCSIDR, CLIDR, CSSELR.
+
+ AArch64: AP_CTR_EL0, AP_CCSIDR_EL1, AP_CLIDR_EL1, AP_CSSELR_EL1. */
+ uint64_t tid3 : 1; /**< [ 18: 18](R/W) Trap ID Group 3. When this bit is set to 1, this causes reads
+ to the following registers executed from EL1 to be trapped to
+ EL2:
+
+ AArch32: ID_PFR0, ID_PFR1, ID_DFR0, ID_AFR0, ID_MMFR0,
+ ID_MMFR1, ID_MMFR2, ID_MMFR3, ID_ISAR0, ID_ISAR1, ID_ISAR2,
+ ID_ISAR3, ID_ISAR4, ID_ISAR5, MVFR0, MVFR1, MVFR2. Also MRC to
+ any of the following encodings:
+
+ CP15, CRn == 0, Opc1 == 0, CRm == {3-7}, Opc2 == {0,1}.
+
+ CP15, CRn == 0, Opc1 == 0, CRm == 3, Opc2 == 2.
+
+ CP15, CRn == 0, Opc1 == 0, CRm == 5, Opc2 == {4,5}.
+
+ AArch64: AP_ID_PFR0_EL1, AP_ID_PFR1_EL1, AP_ID_DFR0_EL1, AP_ID_AFR0_EL1,
+ ID_MMFR0_EL1, ID_MMFR1_EL1, ID_MMFR2_EL1, ID_MMFR3_EL1,
+ ID_ISAR0_EL1, ID_ISAR1_EL1, ID_ISAR2_EL1, ID_ISAR3_EL1,
+ ID_ISAR4_EL1, ID_ISAR5_EL1, MVFR0_EL1, MVFR1_EL1, MVFR2_EL1,
+ AP_ID_AA64PFR0_EL1, AP_ID_AA64PFR1_EL1, AP_ID_AA64DFR0_EL1,
+ AP_ID_AA64DFR1_EL1, AP_ID_AA64ISAR0_EL1, AP_ID_AA64ISAR1_EL1,
+ AP_ID_AA64MMFR0_EL1, AP_ID_AA64MMFR1_EL1, AP_ID_AA64AFR0_EL1,
+ AP_ID_AA64AFR1_EL1. */
+ uint64_t tsc : 1; /**< [ 19: 19](R/W) Trap SMC. When this bit is set to 1, this causes the following
+ instructions executed from EL1 to be trapped to EL2:
+
+ AArch32: SMC.
+
+ AArch64: SMC.
+
+ If EL3 is not implemented, this bit is RES0. */
+ uint64_t tidcp : 1; /**< [ 20: 20](R/W) Trap Implementation Dependent functionality. When this bit is
+ set to 1, this causes accesses to the following instruction
+ set space executed from EL1 to be trapped to EL2.
+
+ AArch32: MCR and MRC instructions as follows:
+
+ All CP15, CRn==9, Opcode1 = {0-7}, CRm == {c0-c2, c5-c8},
+ opcode2 == {0-7}.
+
+ All CP15, CRn==10, Opcode1 =={0-7}, CRm == {c0, c1, c4, c8},
+ opcode2 == {0-7}.
+
+ All CP15, CRn==11, Opcode1=={0-7}, CRm == {c0-c8, c15},
+ opcode2 == {0-7}.
+
+ AArch64: All encoding space reserved for implementation
+ defined system operations ( S1_\<op1\>_\<Cn\>_\<Cm\>_\<op2\>) and
+ system registers ( S3_\<op1\>_\<Cn\>_\<Cm\>_\<op2\>).
+
+ It is implementation defined whether any of this functionality
+ accessed from EL0 is trapped to EL2 when the AP_HCR_EL2[TIDCP] bit
+ is set. If it is not trapped to EL2, it results in an
+ Undefined exception taken to EL1. */
+ uint64_t tacr : 1; /**< [ 21: 21](R/W) Trap Auxiliary Control Register. When this bit is set to 1,
+ this causes accesses to the following registers executed from
+ EL1 to be trapped to EL2:
+
+ AArch32: ACTLR.
+
+ AArch64: ACTLR_EL1. */
+ uint64_t tsw : 1; /**< [ 22: 22](R/W) Trap Data/Unified Cache maintenance operations by Set/Way.
+ When this bit is set to 1, this causes Data or Unified Cache
+ maintenance instructions by set/way executed from EL1 which
+ are not UNdefined to be trapped to EL2. This covers the
+ following instructions:
+
+ AArch32: DCISW, DCCSW, DCCISW.
+
+ AArch64: DC ISW, DC CSW, DC CISW. */
+ uint64_t tpc : 1; /**< [ 23: 23](R/W) Trap Data/Unified Cache maintenance operations to Point of
+ Coherency. When this bit is set to 1, this causes Data or
+ Unified Cache maintenance instructions by address to the point
+ of coherency executed from EL1 or EL0 which are not UNdefined
+ to be trapped to EL2. This covers the following instructions:
+
+ AArch32: DCIMVAC, DCCIMVAC, DCCMVAC.
+
+ AArch64: DC IVAC, DC CIVAC, DC CVAC. */
+ uint64_t tpu : 1; /**< [ 24: 24](R/W) Trap Cache maintenance instructions to Point of Unification.
+ When this bit is set to 1, this causes Cache maintenance
+ instructions to the point of unification executed from EL1 or
+ EL0 which are not UNdefined to be trapped to EL2. This covers
+ the following instructions:
+
+ AArch32: ICIMVAU, ICIALLU, ICIALLUIS, DCCMVAU.
+
+ AArch64: IC IVAU, IC IALLU, IC IALLUIS, DC CVAU. */
+ uint64_t ttlb : 1; /**< [ 25: 25](R/W) Trap TLB maintenance instructions. When this bit is set to 1,
+ this causes TLB maintenance instructions executed from EL1
+ which are not UNdefined to be trapped to EL2. This covers the
+ following instructions:
+
+ AArch32: TLBIALLIS, TLBIMVAIS, TLBIASIDIS, TLBIMVAAIS,
+ TLBIALL, TLBIMVA, TLBIASID, DTLBIALL, DTLBIMVA, DTLBIASID,
+ ITLBIALL, ITLBIMVA, ITLBIASID, TLBIMVAA, TLBIMVALIS,
+ TLBIMVAALIS, TLBIMVAL, TLBIMVAAL
+
+ AArch64: TLBI VMALLE1, TLBI VAE1, TLBI ASIDE1, TLBI VAAE1,
+ TLBI VALE1, TLBI VAALE1, TLBI VMALLE1IS, TLBI VAE1IS, TLBI
+ ASIDE1IS, TLBI VAAE1IS, TLBI VALE1IS, TLBI VAALE1IS */
+ uint64_t tvm : 1; /**< [ 26: 26](R/W) Trap Virtual Memory controls. When this bit is set to 1, this
+ causes Writes to the EL1 virtual memory control registers from
+ EL1 to be trapped to EL2. This covers the following registers:
+
+ AArch32: SCTLR, TTBR0, TTBR1, TTBCR, DACR, DFSR, IFSR, DFAR,
+ IFAR, ADFSR, AIFSR, PRRR/ MAIR0, NMRR/ MAIR1, AMAIR0, AMAIR1,
+ CONTEXTIDR.
+
+ AArch64: AP_SCTLR_EL1, AP_TTBR0_EL1, AP_TTBR1_EL1, AP_TCR_EL1, ESR_EL1,
+ FAR_EL1, AFSR0_EL1, AFSR1_EL1, MAIR_EL1, AMAIR_EL1,
+ AP_CONTEXTIDR_EL1 */
+ uint64_t tge : 1; /**< [ 27: 27](R/W) Trap General Exceptions. If this bit is set to 1, and
+ AP_SCR_EL3[NS] is set to 1, then:
+
+ All exceptions that would be routed to EL1 are routed to EL2.
+
+ The AP_SCTLR_EL1[M] bit is treated as being 0 regardless of its
+ actual state (for EL1 using AArch32 or AArch64) other than for
+ the purpose of reading the bit.
+
+ The AP_HCR_EL2[FMO], IMO and AMO bits are treated as being 1
+ regardless of their actual state other than for the purpose of
+ reading the bits.
+
+ All virtual interrupts are disabled.
+
+ Any implementation defined mechanisms for signalling virtual
+ interrupts are disabled.
+
+ An exception return to EL1 is treated as an illegal exception
+ return.
+
+ Additionally, if AP_HCR_EL2[TGE] == 1, the
+ AP_MDCR_EL2.{TDRA,TDOSA,TDA} bits are ignored and the processor
+ behaves as if they are set to 1, other than for the value read
+ back from AP_MDCR_EL2. */
+ uint64_t tdz : 1; /**< [ 28: 28](R/W) Trap DC ZVA instruction:
+ This bit also has an effect on the value read from the
+ AP_DCZID_EL0 register. If this bit is 1, then reading
+ AP_DCZID_EL0[DZP] from nonsecure EL1 or EL0 will return 1 to
+ indicate that DC ZVA is prohibited.
+ 0 = The instruction is not trapped.
+ 1 = The instruction is trapped to EL2 when executed in nonsecure
+ EL1 or EL0. */
+ uint64_t reserved_29 : 1;
+ uint64_t trvm : 1; /**< [ 30: 30](R/W) Trap Read of Virtual Memory controls. When this bit is set to
+ 1, this causes Reads to the EL1 virtual memory control
+ registers from EL1 to be trapped to EL2. This covers the
+ following registers:
+
+ AArch32: SCTLR, TTBR0, TTBR1, TTBCR, DACR, DFSR, IFSR, DFAR,
+ IFAR, ADFSR, AIFSR, PRRR/ MAIR0, NMRR/ MAIR1, AMAIR0, AMAIR1,
+ CONTEXTIDR.
+
+ AArch64: AP_SCTLR_EL1, AP_TTBR0_EL1, AP_TTBR1_EL1, AP_TCR_EL1, ESR_EL1,
+ FAR_EL1, AFSR0_EL1, AFSR1_EL1, MAIR_EL1, AMAIR_EL1,
+ AP_CONTEXTIDR_EL1. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) rw - Register Width control for lower exception levels:
+ When AP_SCR_EL3[NS]==0, this bit behaves as if it has the same
+ value as the AP_SCR_EL3[RW] bit except for the value read back.
+ The RW bit is permitted to be cached in a TLB.
+ 0 = Lower levels are all AArch32.
+ 1 = EL1 is AArch64. EL0 is determined by the Execution state
+ described in the current process state when executing at EL0. */
+ uint64_t cd : 1; /**< [ 32: 32](R/W) Stage 2 Data cache disable. When AP_HCR_EL2[VM]==1, this forces
+ all stage 2 translations for data accesses and translation
+ table walks to Normal memory to be Non-cacheable for the EL1&0
+ translation regime.
+ This bit has no effect on the EL2 or EL3 translation regimes.
+ 0 = No effect on the stage 2 of the EL1&0 translation regime for
+ data accesses and translation table walks.
+ 1 = Forces all stage 2 translations for data accesses and
+ translation table walks to Normal memory to be Non-cacheable
+ for the EL1&0 translation regime. */
+ uint64_t id : 1; /**< [ 33: 33](R/W) Stage 2 Instruction cache disable. When AP_HCR_EL2[VM]==1, this
+ forces all stage 2 translations for instruction accesses to
+ Normal memory to be Non-cacheable for the EL1&0 translation
+ regime.
+ This bit has no effect on the EL2 or EL3 translation regimes.
+ 0 = No effect on the stage 2 of the EL1&0 translation regime for
+ instruction accesses.
+ 1 = Forces all stage 2 translations for instruction accesses to
+ Normal memory to be Non-cacheable for the EL1&0 translation
+ regime. */
+ uint64_t e2h : 1; /**< [ 34: 34](R/W) V8.1: Enable EL2 host. */
+ uint64_t tlor : 1; /**< [ 35: 35](R/W) v8.1: Trap access to the LOR Registers from nonsecure EL1 to EL2.
+ 0 = Nonsecure EL1 accesses to the LOR Registers are not trapped to EL2.
+ 1 = Nonsecure EL1 accesses to the LOR Registers are trapped to EL2. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_hcr_el2_s cn9; */
+};
+typedef union bdk_ap_hcr_el2 bdk_ap_hcr_el2_t;
+
+#define BDK_AP_HCR_EL2 BDK_AP_HCR_EL2_FUNC()
+static inline uint64_t BDK_AP_HCR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_HCR_EL2_FUNC(void)
+{
+ return 0x30401010000ll;
+}
+
+#define typedef_BDK_AP_HCR_EL2 bdk_ap_hcr_el2_t
+#define bustype_BDK_AP_HCR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_HCR_EL2 "AP_HCR_EL2"
+#define busnum_BDK_AP_HCR_EL2 0
+#define arguments_BDK_AP_HCR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_hpfar_el2
+ *
+ * AP Hypervisor IPA Fault Address Register
+ * Holds the faulting IPA for some aborts on a stage 2
+ * translation taken to EL2.
+ */
+union bdk_ap_hpfar_el2
+{
+ uint64_t u;
+ struct bdk_ap_hpfar_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_45_63 : 19;
+ uint64_t fipa : 41; /**< [ 44: 4](R/W) Bits \<47:12\> of the faulting intermediate physical address.
+ For implementations with fewer than 48 physical address bits,
+ the equivalent upper bits in this field are RES0. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t fipa : 41; /**< [ 44: 4](R/W) Bits \<47:12\> of the faulting intermediate physical address.
+ For implementations with fewer than 48 physical address bits,
+ the equivalent upper bits in this field are RES0. */
+ uint64_t reserved_45_63 : 19;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_hpfar_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t fipa : 36; /**< [ 39: 4](R/W) Bits \<47:12\> of the faulting intermediate physical address.
+ For implementations with fewer than 48 physical address bits,
+ the equivalent upper bits in this field are RES0. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t fipa : 36; /**< [ 39: 4](R/W) Bits \<47:12\> of the faulting intermediate physical address.
+ For implementations with fewer than 48 physical address bits,
+ the equivalent upper bits in this field are RES0. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_hpfar_el2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_45_63 : 19;
+ uint64_t fipa : 41; /**< [ 44: 4](R/W) Bits \<51:12\> of the faulting intermediate physical address.
+ For implementations that don't support as large an IPA, or when
+ using a translation granule, the upper in this field are RES0. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t fipa : 41; /**< [ 44: 4](R/W) Bits \<51:12\> of the faulting intermediate physical address.
+ For implementations that don't support as large an IPA, or when
+ using a translation granule, the upper in this field are RES0. */
+ uint64_t reserved_45_63 : 19;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_hpfar_el2 bdk_ap_hpfar_el2_t;
+
+#define BDK_AP_HPFAR_EL2 BDK_AP_HPFAR_EL2_FUNC()
+static inline uint64_t BDK_AP_HPFAR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_HPFAR_EL2_FUNC(void)
+{
+ return 0x30406000400ll;
+}
+
+#define typedef_BDK_AP_HPFAR_EL2 bdk_ap_hpfar_el2_t
+#define bustype_BDK_AP_HPFAR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_HPFAR_EL2 "AP_HPFAR_EL2"
+#define busnum_BDK_AP_HPFAR_EL2 0
+#define arguments_BDK_AP_HPFAR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_hstr_el2
+ *
+ * AP Hypervisor System Trap Register
+ * Controls access to coprocessor registers at lower Exception
+ * levels in AArch32.
+ */
+union bdk_ap_hstr_el2
+{
+ uint32_t u;
+ struct bdk_ap_hstr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_hstr_el2_s cn; */
+};
+typedef union bdk_ap_hstr_el2 bdk_ap_hstr_el2_t;
+
+#define BDK_AP_HSTR_EL2 BDK_AP_HSTR_EL2_FUNC()
+static inline uint64_t BDK_AP_HSTR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_HSTR_EL2_FUNC(void)
+{
+ return 0x30401010300ll;
+}
+
+#define typedef_BDK_AP_HSTR_EL2 bdk_ap_hstr_el2_t
+#define bustype_BDK_AP_HSTR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_HSTR_EL2 "AP_HSTR_EL2"
+#define busnum_BDK_AP_HSTR_EL2 0
+#define arguments_BDK_AP_HSTR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_ap0r#_el1
+ *
+ * AP Interrupt Controller Active Priorities Group 0 (1,3) Register
+ * Provides information about the active priorities for the
+ * current interrupt regime.
+ */
+union bdk_ap_icc_ap0rx_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_ap0rx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRI]bits).
+ For example, in a system with AP_ICC_CTLR_EL1[PRI]bits == 0b100:
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Exception level AP0Rn access
+
+ (Secure) EL3 Permitted. Accesses Group 0 Secure active priorities.
+
+ Secure EL1 Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP0Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 0 active priorities.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRI]bits).
+ For example, in a system with AP_ICC_CTLR_EL1[PRI]bits == 0b100:
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Exception level AP0Rn access
+
+ (Secure) EL3 Permitted. Accesses Group 0 Secure active priorities.
+
+ Secure EL1 Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP0Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 0 active priorities.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_ap0rx_el1_s cn; */
+};
+typedef union bdk_ap_icc_ap0rx_el1 bdk_ap_icc_ap0rx_el1_t;
+
+static inline uint64_t BDK_AP_ICC_AP0RX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_AP0RX_EL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=3))
+ return 0x3000c080400ll + 0x100ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a>=1)&&(a<=3)))
+ return 0x3000c080400ll + 0x100ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_ICC_AP0RX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICC_AP0RX_EL1(a) bdk_ap_icc_ap0rx_el1_t
+#define bustype_BDK_AP_ICC_AP0RX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_AP0RX_EL1(a) "AP_ICC_AP0RX_EL1"
+#define busnum_BDK_AP_ICC_AP0RX_EL1(a) (a)
+#define arguments_BDK_AP_ICC_AP0RX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_ap0r0_el1
+ *
+ * AP Interrupt Controller Active Priorities Group 0 (0,0) Register
+ * Provides information about the active priorities for the
+ * current interrupt regime.
+ */
+union bdk_ap_icc_ap0r0_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_ap0r0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRI]bits).
+ For example, in a system with AP_ICC_CTLR_EL1[PRI]bits == 0b100:
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Exception level AP0Rn access
+
+ (Secure) EL3 Permitted. Accesses Group 0 Secure active priorities.
+
+ Secure EL1 Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP0Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 0 active priorities.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRI]bits).
+ For example, in a system with AP_ICC_CTLR_EL1[PRI]bits == 0b100:
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Exception level AP0Rn access
+
+ (Secure) EL3 Permitted. Accesses Group 0 Secure active priorities.
+
+ Secure EL1 Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP0Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 0 active priorities.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_ap0r0_el1_s cn; */
+};
+typedef union bdk_ap_icc_ap0r0_el1 bdk_ap_icc_ap0r0_el1_t;
+
+#define BDK_AP_ICC_AP0R0_EL1 BDK_AP_ICC_AP0R0_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_AP0R0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_AP0R0_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000c080400ll;
+ __bdk_csr_fatal("AP_ICC_AP0R0_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICC_AP0R0_EL1 bdk_ap_icc_ap0r0_el1_t
+#define bustype_BDK_AP_ICC_AP0R0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_AP0R0_EL1 "AP_ICC_AP0R0_EL1"
+#define busnum_BDK_AP_ICC_AP0R0_EL1 0
+#define arguments_BDK_AP_ICC_AP0R0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_ap1r#_el1
+ *
+ * AP Interrupt Controller Active Priorities Group 1(1,3) Register
+ * Provides information about the active priorities for the
+ * current interrupt regime.
+ */
+union bdk_ap_icc_ap1rx_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_ap1rx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRI]bits).
+
+ For example, in a system with AP_ICC_CTLR_EL1[PRI]bits ==0b100
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Current Exception level and Security state AP1Rn access
+
+ (Secure) EL3 Permitted. When AP_SCR_EL3[NS] is 0, accesses Group 1 Secure active
+ priorities. When AP_SCR_EL3[NS] is 1, accesses Group 1 nonsecure active priorities
+ (unshifted). When a bit is written, the bit is only updated if the corresponding Group 0
+ and Group 1 Secure active priority is zero.
+
+ Secure EL1 Permitted. Accesses Group 1 Secure active priorities (unshifted). When a bit
+ is written, the bit is only updated if the corresponding Group 0 Secure active priority is
+ zero.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP1Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 1 Nonsecure active priorities (shifted). When a bit is
+ written, the bit is only updated if the corresponding Group 0 and Group 1 Secure active
+ priority is zero.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 1 Nonsecure active priorities (unshifted). When a bit is
+ written, the bit is only updated if the Group 0 active priority is zero.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRI]bits).
+
+ For example, in a system with AP_ICC_CTLR_EL1[PRI]bits ==0b100
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Current Exception level and Security state AP1Rn access
+
+ (Secure) EL3 Permitted. When AP_SCR_EL3[NS] is 0, accesses Group 1 Secure active
+ priorities. When AP_SCR_EL3[NS] is 1, accesses Group 1 nonsecure active priorities
+ (unshifted). When a bit is written, the bit is only updated if the corresponding Group 0
+ and Group 1 Secure active priority is zero.
+
+ Secure EL1 Permitted. Accesses Group 1 Secure active priorities (unshifted). When a bit
+ is written, the bit is only updated if the corresponding Group 0 Secure active priority is
+ zero.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP1Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 1 Nonsecure active priorities (shifted). When a bit is
+ written, the bit is only updated if the corresponding Group 0 and Group 1 Secure active
+ priority is zero.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 1 Nonsecure active priorities (unshifted). When a bit is
+ written, the bit is only updated if the Group 0 active priority is zero.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_ap1rx_el1_s cn; */
+};
+typedef union bdk_ap_icc_ap1rx_el1 bdk_ap_icc_ap1rx_el1_t;
+
+static inline uint64_t BDK_AP_ICC_AP1RX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_AP1RX_EL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=3))
+ return 0x3000c090000ll + 0x100ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a>=1)&&(a<=3)))
+ return 0x3000c090000ll + 0x100ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_ICC_AP1RX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICC_AP1RX_EL1(a) bdk_ap_icc_ap1rx_el1_t
+#define bustype_BDK_AP_ICC_AP1RX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_AP1RX_EL1(a) "AP_ICC_AP1RX_EL1"
+#define busnum_BDK_AP_ICC_AP1RX_EL1(a) (a)
+#define arguments_BDK_AP_ICC_AP1RX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_ap1r0_el1
+ *
+ * AP Interrupt Controller Active Priorities Group 1 (0,0) Register
+ * Provides information about the active priorities for the
+ * current interrupt regime.
+ */
+union bdk_ap_icc_ap1r0_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_ap1r0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRI]bits).
+
+ For example, in a system with AP_ICC_CTLR_EL1[PRI]bits ==0b100
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Current Exception level and Security state AP1Rn access
+
+ (Secure) EL3 Permitted. When AP_SCR_EL3[NS] is 0, accesses Group 1 Secure active
+ priorities. When AP_SCR_EL3[NS] is 1, accesses Group 1 nonsecure active priorities
+ (unshifted). When a bit is written, the bit is only updated if the corresponding Group 0
+ and Group 1 Secure active priority is zero.
+
+ Secure EL1 Permitted. Accesses Group 1 Secure active priorities (unshifted). When a bit
+ is written, the bit is only updated if the corresponding Group 0 Secure active priority is
+ zero.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP1Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 1 Nonsecure active priorities (shifted). When a bit is
+ written, the bit is only updated if the corresponding Group 0 and Group 1 Secure active
+ priority is zero.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 1 Nonsecure active priorities (unshifted). When a bit is
+ written, the bit is only updated if the Group 0 active priority is zero.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRI]bits).
+
+ For example, in a system with AP_ICC_CTLR_EL1[PRI]bits ==0b100
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Current Exception level and Security state AP1Rn access
+
+ (Secure) EL3 Permitted. When AP_SCR_EL3[NS] is 0, accesses Group 1 Secure active
+ priorities. When AP_SCR_EL3[NS] is 1, accesses Group 1 nonsecure active priorities
+ (unshifted). When a bit is written, the bit is only updated if the corresponding Group 0
+ and Group 1 Secure active priority is zero.
+
+ Secure EL1 Permitted. Accesses Group 1 Secure active priorities (unshifted). When a bit
+ is written, the bit is only updated if the corresponding Group 0 Secure active priority is
+ zero.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP1Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 1 Nonsecure active priorities (shifted). When a bit is
+ written, the bit is only updated if the corresponding Group 0 and Group 1 Secure active
+ priority is zero.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 1 Nonsecure active priorities (unshifted). When a bit is
+ written, the bit is only updated if the Group 0 active priority is zero.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_ap1r0_el1_s cn; */
+};
+typedef union bdk_ap_icc_ap1r0_el1 bdk_ap_icc_ap1r0_el1_t;
+
+#define BDK_AP_ICC_AP1R0_EL1 BDK_AP_ICC_AP1R0_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_AP1R0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_AP1R0_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3000c090000ll;
+ __bdk_csr_fatal("AP_ICC_AP1R0_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICC_AP1R0_EL1 bdk_ap_icc_ap1r0_el1_t
+#define bustype_BDK_AP_ICC_AP1R0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_AP1R0_EL1 "AP_ICC_AP1R0_EL1"
+#define busnum_BDK_AP_ICC_AP1R0_EL1 0
+#define arguments_BDK_AP_ICC_AP1R0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_asgi1r_el1
+ *
+ * AP Interrupt Controller Alias Software Generated Interrupt Group 1 Register
+ * Provides software the ability to generate group 1 SGIs for the
+ * other Security state.
+ */
+union bdk_ap_icc_asgi1r_el1
+{
+ uint64_t u;
+ struct bdk_ap_icc_asgi1r_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t aff3 : 8; /**< [ 55: 48](R/W) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t irm : 1; /**< [ 40: 40](R/W) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors. Possible
+ values are:
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ "self". */
+ uint64_t aff2 : 8; /**< [ 39: 32](R/W) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t sgiid : 4; /**< [ 27: 24](R/W) SGI Interrupt ID. */
+ uint64_t aff1 : 8; /**< [ 23: 16](R/W) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t targetlist : 16; /**< [ 15: 0](R/W) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+#else /* Word 0 - Little Endian */
+ uint64_t targetlist : 16; /**< [ 15: 0](R/W) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+ uint64_t aff1 : 8; /**< [ 23: 16](R/W) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t sgiid : 4; /**< [ 27: 24](R/W) SGI Interrupt ID. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t aff2 : 8; /**< [ 39: 32](R/W) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t irm : 1; /**< [ 40: 40](R/W) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors. Possible
+ values are:
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ "self". */
+ uint64_t reserved_41_47 : 7;
+ uint64_t aff3 : 8; /**< [ 55: 48](R/W) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_asgi1r_el1_s cn8; */
+ struct bdk_ap_icc_asgi1r_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t aff3 : 8; /**< [ 55: 48](WO) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t irm : 1; /**< [ 40: 40](WO) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors.
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ self. */
+ uint64_t aff2 : 8; /**< [ 39: 32](WO) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t sgiid : 4; /**< [ 27: 24](WO) SGI Interrupt ID. */
+ uint64_t aff1 : 8; /**< [ 23: 16](WO) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t targetlist : 16; /**< [ 15: 0](WO) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+#else /* Word 0 - Little Endian */
+ uint64_t targetlist : 16; /**< [ 15: 0](WO) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+ uint64_t aff1 : 8; /**< [ 23: 16](WO) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t sgiid : 4; /**< [ 27: 24](WO) SGI Interrupt ID. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t aff2 : 8; /**< [ 39: 32](WO) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t irm : 1; /**< [ 40: 40](WO) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors.
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ self. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t aff3 : 8; /**< [ 55: 48](WO) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_icc_asgi1r_el1 bdk_ap_icc_asgi1r_el1_t;
+
+#define BDK_AP_ICC_ASGI1R_EL1 BDK_AP_ICC_ASGI1R_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_ASGI1R_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_ASGI1R_EL1_FUNC(void)
+{
+ return 0x3000c0b0600ll;
+}
+
+#define typedef_BDK_AP_ICC_ASGI1R_EL1 bdk_ap_icc_asgi1r_el1_t
+#define bustype_BDK_AP_ICC_ASGI1R_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_ASGI1R_EL1 "AP_ICC_ASGI1R_EL1"
+#define busnum_BDK_AP_ICC_ASGI1R_EL1 0
+#define arguments_BDK_AP_ICC_ASGI1R_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_bpr0_el1
+ *
+ * AP Interrupt Controller Binary Point Register 0
+ * Defines the point at which the priority value fields split
+ * into two parts, the group priority field and the subpriority
+ * field. The group priority field is used to determine interrupt
+ * preemption.
+ */
+union bdk_ap_icc_bpr0_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_bpr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t binarypoint : 3; /**< [ 2: 0](R/W) The value of this field controls how the 8-bit interrupt
+ priority field is split into a group priority field, used to
+ determine interrupt preemption, and a subpriority field. This
+ is done as follows:
+
+ \<pre\>
+ Binary point value Group priority field Subpriority field Field with binary
+ point
+ 0 [7:1] [0] ggggggg.s
+ 1 [7:2] [1:0] gggggg.ss
+ 2 [7:3] [2:0] ggggg.sss
+ 3 [7:4] [3:0] gggg.ssss
+ 4 [7:5] [4:0] ggg.sssss
+ 5 [7:6] [5:0] gg.ssssss
+ 6 [7] [6:0] g.sssssss
+ 7 No preemption [7:0] .ssssssss
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint32_t binarypoint : 3; /**< [ 2: 0](R/W) The value of this field controls how the 8-bit interrupt
+ priority field is split into a group priority field, used to
+ determine interrupt preemption, and a subpriority field. This
+ is done as follows:
+
+ \<pre\>
+ Binary point value Group priority field Subpriority field Field with binary
+ point
+ 0 [7:1] [0] ggggggg.s
+ 1 [7:2] [1:0] gggggg.ss
+ 2 [7:3] [2:0] ggggg.sss
+ 3 [7:4] [3:0] gggg.ssss
+ 4 [7:5] [4:0] ggg.sssss
+ 5 [7:6] [5:0] gg.ssssss
+ 6 [7] [6:0] g.sssssss
+ 7 No preemption [7:0] .ssssssss
+ \</pre\> */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_bpr0_el1_s cn; */
+};
+typedef union bdk_ap_icc_bpr0_el1 bdk_ap_icc_bpr0_el1_t;
+
+#define BDK_AP_ICC_BPR0_EL1 BDK_AP_ICC_BPR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_BPR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_BPR0_EL1_FUNC(void)
+{
+ return 0x3000c080300ll;
+}
+
+#define typedef_BDK_AP_ICC_BPR0_EL1 bdk_ap_icc_bpr0_el1_t
+#define bustype_BDK_AP_ICC_BPR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_BPR0_EL1 "AP_ICC_BPR0_EL1"
+#define busnum_BDK_AP_ICC_BPR0_EL1 0
+#define arguments_BDK_AP_ICC_BPR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_bpr1_el1
+ *
+ * AP Interrupt Controller Binary Point Register 1
+ * Defines the point at which the priority value fields split
+ * into two parts, the group priority field and the subpriority
+ * field. The group priority field is used to determine Group 1
+ * interrupt preemption.
+ */
+union bdk_ap_icc_bpr1_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_bpr1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t binarypoint : 3; /**< [ 2: 0](R/W) The value of this field controls how the 8-bit interrupt
+ priority field is split into a group priority field, used to
+ determine interrupt preemption, and a subpriority field. This
+ is done as follows:
+ \<pre\>
+ Binary point value Group priority field Subpriority field Field with binary
+ point
+ 0 [7:1] [0] ggggggg.s
+ 1 [7:2] [1:0] gggggg.ss
+ 2 [7:3] [2:0] ggggg.sss
+ 3 [7:4] [3:0] gggg.ssss
+ 4 [7:5] [4:0] ggg.sssss
+ 5 [7:6] [5:0] gg.ssssss
+ 6 [7] [6:0] g.sssssss
+ 7 No preemption [7:0] .ssssssss
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint32_t binarypoint : 3; /**< [ 2: 0](R/W) The value of this field controls how the 8-bit interrupt
+ priority field is split into a group priority field, used to
+ determine interrupt preemption, and a subpriority field. This
+ is done as follows:
+ \<pre\>
+ Binary point value Group priority field Subpriority field Field with binary
+ point
+ 0 [7:1] [0] ggggggg.s
+ 1 [7:2] [1:0] gggggg.ss
+ 2 [7:3] [2:0] ggggg.sss
+ 3 [7:4] [3:0] gggg.ssss
+ 4 [7:5] [4:0] ggg.sssss
+ 5 [7:6] [5:0] gg.ssssss
+ 6 [7] [6:0] g.sssssss
+ 7 No preemption [7:0] .ssssssss
+ \</pre\> */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_bpr1_el1_s cn; */
+};
+typedef union bdk_ap_icc_bpr1_el1 bdk_ap_icc_bpr1_el1_t;
+
+#define BDK_AP_ICC_BPR1_EL1 BDK_AP_ICC_BPR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_BPR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_BPR1_EL1_FUNC(void)
+{
+ return 0x3000c0c0300ll;
+}
+
+#define typedef_BDK_AP_ICC_BPR1_EL1 bdk_ap_icc_bpr1_el1_t
+#define bustype_BDK_AP_ICC_BPR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_BPR1_EL1 "AP_ICC_BPR1_EL1"
+#define busnum_BDK_AP_ICC_BPR1_EL1 0
+#define arguments_BDK_AP_ICC_BPR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_ctlr_el1
+ *
+ * AP Interrupt Controller Control EL1 Register
+ * Controls aspects of the behaviour of the GIC CPU interface and
+ * provides information about the features implemented.
+ */
+union bdk_ap_icc_ctlr_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_ctlr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t a3v : 1; /**< [ 15: 15](RO) Affinity 3 Valid. Read-only and writes are ignored. Possible
+ values are:
+ Virtual accesses return the value from AP_ICH_VTR_EL2[A3V].
+ 0 = The CPU interface logic only supports zero values of Affinity
+ 3 in SGI generation system registers.
+ 1 = The CPU interface logic supports nonzero values of Affinity 3
+ in SGI generation system registers. */
+ uint32_t seis : 1; /**< [ 14: 14](RO) SEI Support. Read-only and writes are ignored. Indicates
+ whether the CPU interface supports local generation of SEIs:
+ Virtual accesses return the value from AP_ICH_VTR_EL2[SEIS].
+ 0 = The CPU interface logic does not support local generation of
+ SEIs by the CPU interface.
+ 1 = The CPU interface logic supports local generation of SEIs by
+ the CPU interface. */
+ uint32_t idbits : 3; /**< [ 13: 11](RO) Identifier bits. Read-only and writes are ignored. The number
+ of physical interrupt identifier bits supported:
+ All other values are reserved.
+ Virtual accesses return the value from AP_ICH_VTR_EL2[IDBITS].
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t pribits : 3; /**< [ 10: 8](RO) Priority bits. Read-only and writes are ignored. The number of
+ priority bits implemented, minus one.
+ Virtual accesses return the value from AP_ICH_VTR_EL2[PRI] bits. */
+ uint32_t reserved_7 : 1;
+ uint32_t pmhe : 1; /**< [ 6: 6](R/W) Priority Mask Hint Enable.
+ If EL3 is present and GICD_CTLR[DS] == 0, this bit is a read-
+ only alias of AP_ICC_CTLR_EL3[PMHE].
+ If EL3 is present and GICD_CTLR[DS] == 1, this bit is writable
+ at EL1 and EL2. */
+ uint32_t reserved_2_5 : 4;
+ uint32_t eoimode : 1; /**< [ 1: 1](R/W) Alias of AP_ICC_CTLR_EL3[EOI]mode_EL1{S,NS} as appropriate to the
+ current Security state.
+ Virtual accesses modify AP_ICH_VMCR_EL2[VEOIM]. */
+ uint32_t cbpr : 1; /**< [ 0: 0](R/W) Common Binary Point Register.
+ If EL3 is present and GICD_CTLR[DS] == 0, this bit is a read-
+ only alias of AP_ICC_CTLR_EL3[CBPR]_EL1{S,NS} as appropriate.
+ If EL3 is not present, this field resets to zero.
+ If EL3 is present and GICD_CTLR[DS] == 1, this bit is writable
+ at EL1 and EL2.
+ Virtual accesses modify AP_ICH_VMCR_EL2[VCBPR]. An access is
+ virtual when accessed at nonsecure EL1 and either of FIQ or
+ IRQ has been virtualized. That is, when (AP_SCR_EL3[NS] == 1 &&
+ (AP_HCR_EL2[FMO] == 1 || AP_HCR_EL2[IMO] == 1)). */
+#else /* Word 0 - Little Endian */
+ uint32_t cbpr : 1; /**< [ 0: 0](R/W) Common Binary Point Register.
+ If EL3 is present and GICD_CTLR[DS] == 0, this bit is a read-
+ only alias of AP_ICC_CTLR_EL3[CBPR]_EL1{S,NS} as appropriate.
+ If EL3 is not present, this field resets to zero.
+ If EL3 is present and GICD_CTLR[DS] == 1, this bit is writable
+ at EL1 and EL2.
+ Virtual accesses modify AP_ICH_VMCR_EL2[VCBPR]. An access is
+ virtual when accessed at nonsecure EL1 and either of FIQ or
+ IRQ has been virtualized. That is, when (AP_SCR_EL3[NS] == 1 &&
+ (AP_HCR_EL2[FMO] == 1 || AP_HCR_EL2[IMO] == 1)). */
+ uint32_t eoimode : 1; /**< [ 1: 1](R/W) Alias of AP_ICC_CTLR_EL3[EOI]mode_EL1{S,NS} as appropriate to the
+ current Security state.
+ Virtual accesses modify AP_ICH_VMCR_EL2[VEOIM]. */
+ uint32_t reserved_2_5 : 4;
+ uint32_t pmhe : 1; /**< [ 6: 6](R/W) Priority Mask Hint Enable.
+ If EL3 is present and GICD_CTLR[DS] == 0, this bit is a read-
+ only alias of AP_ICC_CTLR_EL3[PMHE].
+ If EL3 is present and GICD_CTLR[DS] == 1, this bit is writable
+ at EL1 and EL2. */
+ uint32_t reserved_7 : 1;
+ uint32_t pribits : 3; /**< [ 10: 8](RO) Priority bits. Read-only and writes are ignored. The number of
+ priority bits implemented, minus one.
+ Virtual accesses return the value from AP_ICH_VTR_EL2[PRI] bits. */
+ uint32_t idbits : 3; /**< [ 13: 11](RO) Identifier bits. Read-only and writes are ignored. The number
+ of physical interrupt identifier bits supported:
+ All other values are reserved.
+ Virtual accesses return the value from AP_ICH_VTR_EL2[IDBITS].
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t seis : 1; /**< [ 14: 14](RO) SEI Support. Read-only and writes are ignored. Indicates
+ whether the CPU interface supports local generation of SEIs:
+ Virtual accesses return the value from AP_ICH_VTR_EL2[SEIS].
+ 0 = The CPU interface logic does not support local generation of
+ SEIs by the CPU interface.
+ 1 = The CPU interface logic supports local generation of SEIs by
+ the CPU interface. */
+ uint32_t a3v : 1; /**< [ 15: 15](RO) Affinity 3 Valid. Read-only and writes are ignored. Possible
+ values are:
+ Virtual accesses return the value from AP_ICH_VTR_EL2[A3V].
+ 0 = The CPU interface logic only supports zero values of Affinity
+ 3 in SGI generation system registers.
+ 1 = The CPU interface logic supports nonzero values of Affinity 3
+ in SGI generation system registers. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_ctlr_el1_s cn; */
+};
+typedef union bdk_ap_icc_ctlr_el1 bdk_ap_icc_ctlr_el1_t;
+
+#define BDK_AP_ICC_CTLR_EL1 BDK_AP_ICC_CTLR_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_CTLR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_CTLR_EL1_FUNC(void)
+{
+ return 0x3000c0c0400ll;
+}
+
+#define typedef_BDK_AP_ICC_CTLR_EL1 bdk_ap_icc_ctlr_el1_t
+#define bustype_BDK_AP_ICC_CTLR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_CTLR_EL1 "AP_ICC_CTLR_EL1"
+#define busnum_BDK_AP_ICC_CTLR_EL1 0
+#define arguments_BDK_AP_ICC_CTLR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_ctlr_el3
+ *
+ * AP Interrupt Controller Control EL3 Register
+ * Controls aspects of the behaviour of the GIC CPU interface and
+ * provides information about the features implemented.
+ */
+union bdk_ap_icc_ctlr_el3
+{
+ uint32_t u;
+ struct bdk_ap_icc_ctlr_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t a3v : 1; /**< [ 15: 15](RO) Affinity 3 Valid. Read-only and writes are ignored. Possible
+ values are:
+ Virtual accesses return the value from AP_ICH_VTR_EL2[A3V].
+ 0 = The CPU interface logic only supports zero values of Affinity
+ 3 in SGI generation system registers.
+ 1 = The CPU interface logic supports nonzero values of Affinity 3
+ in SGI generation system registers. */
+ uint32_t seis : 1; /**< [ 14: 14](RO) SEI Support. Read-only and writes are ignored. Indicates
+ whether the CPU interface supports generation of SEIs:
+ Virtual accesses return the value from AP_ICH_VTR_EL2[SEIS].
+ 0 = The CPU interface logic does not support generation of SEIs.
+ 1 = The CPU interface logic supports generation of SEIs. */
+ uint32_t idbits : 3; /**< [ 13: 11](RO) Identifier bits. Read-only and writes are ignored. The number
+ of physical interrupt identifier bits supported:
+ All other values are reserved.
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t pribits : 3; /**< [ 10: 8](RO) Priority bits. Read-only and writes are ignored. The number of
+ priority bits implemented, minus one. */
+ uint32_t reserved_7 : 1;
+ uint32_t pmhe : 1; /**< [ 6: 6](R/W) Priority Mask Hint Enable.
+ When set, enables use of the PMR as a hint for interrupt
+ distribution. */
+ uint32_t rm : 1; /**< [ 5: 5](RO) Routing Modifier. Note: In systems without EL3 or where the secure
+ copy of AP_ICC_SRE_EL1 is RES1, this bit is RES0.
+ This bit is used to modify the behaviour of
+ AP_ICC_IAR0_EL1 and AP_ICC_IAR1_EL1 such that systems with legacy
+ secure software may be supported correctly.
+ 0 = Secure Group 0 and nonsecure group 1 interrupts can be
+ acknowleged and observed as the highest priority interrupt
+ at EL3 in AArch64 or Monitor mode in AArch32.
+ 1 = Secure Group 0 and nonsecure group 1 interrupts cannot be
+ acknowleged and observed as the highest priority interrupt
+ at EL3 in AArch64 or Monitor mode in AArch32 but return
+ special values. */
+ uint32_t eoimode_el1ns : 1; /**< [ 4: 4](R/W) EOI mode for interrupts handled at nonsecure EL1 and EL2. */
+ uint32_t eoimode_el1s : 1; /**< [ 3: 3](R/W) EOI mode for interrupts handled at secure EL1. */
+ uint32_t eoimode_el3 : 1; /**< [ 2: 2](R/W) EOI mode for interrupts handled at EL3. */
+ uint32_t cbpr_el1ns : 1; /**< [ 1: 1](R/W) When set, nonsecure accesses to GICC_BPR and AP_ICC_BPR1_EL1
+ access the state of AP_ICC_BPR0_EL1. AP_ICC_BPR0_EL1 is used to
+ determine the preemption group for nonsecure group 1
+ interrupts. */
+ uint32_t cbpr_el1s : 1; /**< [ 0: 0](R/W) When set, secure EL1 accesses to AP_ICC_BPR1_EL1 access the state
+ of AP_ICC_BPR0_EL1. AP_ICC_BPR0_EL1 is used to determine the
+ preemption group for Secure Group 1 interrupts. */
+#else /* Word 0 - Little Endian */
+ uint32_t cbpr_el1s : 1; /**< [ 0: 0](R/W) When set, secure EL1 accesses to AP_ICC_BPR1_EL1 access the state
+ of AP_ICC_BPR0_EL1. AP_ICC_BPR0_EL1 is used to determine the
+ preemption group for Secure Group 1 interrupts. */
+ uint32_t cbpr_el1ns : 1; /**< [ 1: 1](R/W) When set, nonsecure accesses to GICC_BPR and AP_ICC_BPR1_EL1
+ access the state of AP_ICC_BPR0_EL1. AP_ICC_BPR0_EL1 is used to
+ determine the preemption group for nonsecure group 1
+ interrupts. */
+ uint32_t eoimode_el3 : 1; /**< [ 2: 2](R/W) EOI mode for interrupts handled at EL3. */
+ uint32_t eoimode_el1s : 1; /**< [ 3: 3](R/W) EOI mode for interrupts handled at secure EL1. */
+ uint32_t eoimode_el1ns : 1; /**< [ 4: 4](R/W) EOI mode for interrupts handled at nonsecure EL1 and EL2. */
+ uint32_t rm : 1; /**< [ 5: 5](RO) Routing Modifier. Note: In systems without EL3 or where the secure
+ copy of AP_ICC_SRE_EL1 is RES1, this bit is RES0.
+ This bit is used to modify the behaviour of
+ AP_ICC_IAR0_EL1 and AP_ICC_IAR1_EL1 such that systems with legacy
+ secure software may be supported correctly.
+ 0 = Secure Group 0 and nonsecure group 1 interrupts can be
+ acknowleged and observed as the highest priority interrupt
+ at EL3 in AArch64 or Monitor mode in AArch32.
+ 1 = Secure Group 0 and nonsecure group 1 interrupts cannot be
+ acknowleged and observed as the highest priority interrupt
+ at EL3 in AArch64 or Monitor mode in AArch32 but return
+ special values. */
+ uint32_t pmhe : 1; /**< [ 6: 6](R/W) Priority Mask Hint Enable.
+ When set, enables use of the PMR as a hint for interrupt
+ distribution. */
+ uint32_t reserved_7 : 1;
+ uint32_t pribits : 3; /**< [ 10: 8](RO) Priority bits. Read-only and writes are ignored. The number of
+ priority bits implemented, minus one. */
+ uint32_t idbits : 3; /**< [ 13: 11](RO) Identifier bits. Read-only and writes are ignored. The number
+ of physical interrupt identifier bits supported:
+ All other values are reserved.
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t seis : 1; /**< [ 14: 14](RO) SEI Support. Read-only and writes are ignored. Indicates
+ whether the CPU interface supports generation of SEIs:
+ Virtual accesses return the value from AP_ICH_VTR_EL2[SEIS].
+ 0 = The CPU interface logic does not support generation of SEIs.
+ 1 = The CPU interface logic supports generation of SEIs. */
+ uint32_t a3v : 1; /**< [ 15: 15](RO) Affinity 3 Valid. Read-only and writes are ignored. Possible
+ values are:
+ Virtual accesses return the value from AP_ICH_VTR_EL2[A3V].
+ 0 = The CPU interface logic only supports zero values of Affinity
+ 3 in SGI generation system registers.
+ 1 = The CPU interface logic supports nonzero values of Affinity 3
+ in SGI generation system registers. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_ctlr_el3_s cn; */
+};
+typedef union bdk_ap_icc_ctlr_el3 bdk_ap_icc_ctlr_el3_t;
+
+#define BDK_AP_ICC_CTLR_EL3 BDK_AP_ICC_CTLR_EL3_FUNC()
+static inline uint64_t BDK_AP_ICC_CTLR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_CTLR_EL3_FUNC(void)
+{
+ return 0x3060c0c0400ll;
+}
+
+#define typedef_BDK_AP_ICC_CTLR_EL3 bdk_ap_icc_ctlr_el3_t
+#define bustype_BDK_AP_ICC_CTLR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_CTLR_EL3 "AP_ICC_CTLR_EL3"
+#define busnum_BDK_AP_ICC_CTLR_EL3 0
+#define arguments_BDK_AP_ICC_CTLR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_dir_el1
+ *
+ * AP Interrupt Controller Deactivate Interrupt Register
+ * When interrupt priority drop is separated from interrupt
+ * deactivation, a write to this register deactivates the
+ * specified interrupt.
+ */
+union bdk_ap_icc_dir_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_dir_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t interruptid : 24; /**< [ 23: 0](WO) The interrupt ID.
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+#else /* Word 0 - Little Endian */
+ uint32_t interruptid : 24; /**< [ 23: 0](WO) The interrupt ID.
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_dir_el1_s cn; */
+};
+typedef union bdk_ap_icc_dir_el1 bdk_ap_icc_dir_el1_t;
+
+#define BDK_AP_ICC_DIR_EL1 BDK_AP_ICC_DIR_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_DIR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_DIR_EL1_FUNC(void)
+{
+ return 0x3000c0b0100ll;
+}
+
+#define typedef_BDK_AP_ICC_DIR_EL1 bdk_ap_icc_dir_el1_t
+#define bustype_BDK_AP_ICC_DIR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_DIR_EL1 "AP_ICC_DIR_EL1"
+#define busnum_BDK_AP_ICC_DIR_EL1 0
+#define arguments_BDK_AP_ICC_DIR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_eoir0_el1
+ *
+ * AP Interrupt Controller End Of Interrupt Register 0
+ * A processor writes to this register to inform the CPU
+ * interface that it has completed the processing of the
+ * specified interrupt.
+ */
+union bdk_ap_icc_eoir0_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_eoir0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t intvector : 24; /**< [ 23: 0](WO) The InterruptID value from the corresponding GICC_IAR access.
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+#else /* Word 0 - Little Endian */
+ uint32_t intvector : 24; /**< [ 23: 0](WO) The InterruptID value from the corresponding GICC_IAR access.
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_eoir0_el1_s cn; */
+};
+typedef union bdk_ap_icc_eoir0_el1 bdk_ap_icc_eoir0_el1_t;
+
+#define BDK_AP_ICC_EOIR0_EL1 BDK_AP_ICC_EOIR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_EOIR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_EOIR0_EL1_FUNC(void)
+{
+ return 0x3000c080100ll;
+}
+
+#define typedef_BDK_AP_ICC_EOIR0_EL1 bdk_ap_icc_eoir0_el1_t
+#define bustype_BDK_AP_ICC_EOIR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_EOIR0_EL1 "AP_ICC_EOIR0_EL1"
+#define busnum_BDK_AP_ICC_EOIR0_EL1 0
+#define arguments_BDK_AP_ICC_EOIR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_eoir1_el1
+ *
+ * AP Interrupt Controller End Of Interrupt Register 1
+ * A processor writes to this register to inform the CPU
+ * interface that it has completed the processing of the
+ * specified Group 1 interrupt.
+ */
+union bdk_ap_icc_eoir1_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_eoir1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t intvector : 24; /**< [ 23: 0](WO) The InterruptID value from the corresponding GICC_IAR access.
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+#else /* Word 0 - Little Endian */
+ uint32_t intvector : 24; /**< [ 23: 0](WO) The InterruptID value from the corresponding GICC_IAR access.
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_eoir1_el1_s cn; */
+};
+typedef union bdk_ap_icc_eoir1_el1 bdk_ap_icc_eoir1_el1_t;
+
+#define BDK_AP_ICC_EOIR1_EL1 BDK_AP_ICC_EOIR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_EOIR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_EOIR1_EL1_FUNC(void)
+{
+ return 0x3000c0c0100ll;
+}
+
+#define typedef_BDK_AP_ICC_EOIR1_EL1 bdk_ap_icc_eoir1_el1_t
+#define bustype_BDK_AP_ICC_EOIR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_EOIR1_EL1 "AP_ICC_EOIR1_EL1"
+#define busnum_BDK_AP_ICC_EOIR1_EL1 0
+#define arguments_BDK_AP_ICC_EOIR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_hppir0_el1
+ *
+ * AP Interrupt Controller Highest Priority Pending Interrupt Register 0
+ * Indicates the Interrupt ID, and processor ID if appropriate,
+ * of the highest priority pending interrupt on the CPU
+ * interface.
+ */
+union bdk_ap_icc_hppir0_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_hppir0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t intvector : 24; /**< [ 23: 0](RO) The interrupt ID of the highest priority pending interrupt.
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+#else /* Word 0 - Little Endian */
+ uint32_t intvector : 24; /**< [ 23: 0](RO) The interrupt ID of the highest priority pending interrupt.
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_hppir0_el1_s cn; */
+};
+typedef union bdk_ap_icc_hppir0_el1 bdk_ap_icc_hppir0_el1_t;
+
+#define BDK_AP_ICC_HPPIR0_EL1 BDK_AP_ICC_HPPIR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_HPPIR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_HPPIR0_EL1_FUNC(void)
+{
+ return 0x3000c080200ll;
+}
+
+#define typedef_BDK_AP_ICC_HPPIR0_EL1 bdk_ap_icc_hppir0_el1_t
+#define bustype_BDK_AP_ICC_HPPIR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_HPPIR0_EL1 "AP_ICC_HPPIR0_EL1"
+#define busnum_BDK_AP_ICC_HPPIR0_EL1 0
+#define arguments_BDK_AP_ICC_HPPIR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_hppir1_el1
+ *
+ * AP Interrupt Controller Highest Priority Pending Interrupt Register 1
+ * If the highest priority pending interrupt on the CPU interface
+ * is a Group 1 interrupt, returns the interrupt ID of that
+ * interrupt. Otherwise, returns a spurious interrupt ID of 1023.
+ */
+union bdk_ap_icc_hppir1_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_hppir1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t intvector : 24; /**< [ 23: 0](RO) The interrupt ID of the highest priority pending interrupt.
+
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+#else /* Word 0 - Little Endian */
+ uint32_t intvector : 24; /**< [ 23: 0](RO) The interrupt ID of the highest priority pending interrupt.
+
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_hppir1_el1_s cn; */
+};
+typedef union bdk_ap_icc_hppir1_el1 bdk_ap_icc_hppir1_el1_t;
+
+#define BDK_AP_ICC_HPPIR1_EL1 BDK_AP_ICC_HPPIR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_HPPIR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_HPPIR1_EL1_FUNC(void)
+{
+ return 0x3000c0c0200ll;
+}
+
+#define typedef_BDK_AP_ICC_HPPIR1_EL1 bdk_ap_icc_hppir1_el1_t
+#define bustype_BDK_AP_ICC_HPPIR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_HPPIR1_EL1 "AP_ICC_HPPIR1_EL1"
+#define busnum_BDK_AP_ICC_HPPIR1_EL1 0
+#define arguments_BDK_AP_ICC_HPPIR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_iar0_el1
+ *
+ * AP Interrupt Controller Interrupt Acknowledge Register 0
+ * The processor reads this register to obtain the interrupt ID
+ * of the signaled interrupt. This read acts as an acknowledge
+ * for the interrupt.
+ */
+union bdk_ap_icc_iar0_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_iar0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t intvector : 24; /**< [ 23: 0](RO) The ID of the signaled interrupt. IDs 1020 to 1023 are
+ reserved and convey additional information such as spurious
+ interrupts.
+
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+#else /* Word 0 - Little Endian */
+ uint32_t intvector : 24; /**< [ 23: 0](RO) The ID of the signaled interrupt. IDs 1020 to 1023 are
+ reserved and convey additional information such as spurious
+ interrupts.
+
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_iar0_el1_s cn; */
+};
+typedef union bdk_ap_icc_iar0_el1 bdk_ap_icc_iar0_el1_t;
+
+#define BDK_AP_ICC_IAR0_EL1 BDK_AP_ICC_IAR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_IAR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_IAR0_EL1_FUNC(void)
+{
+ return 0x3000c080000ll;
+}
+
+#define typedef_BDK_AP_ICC_IAR0_EL1 bdk_ap_icc_iar0_el1_t
+#define bustype_BDK_AP_ICC_IAR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_IAR0_EL1 "AP_ICC_IAR0_EL1"
+#define busnum_BDK_AP_ICC_IAR0_EL1 0
+#define arguments_BDK_AP_ICC_IAR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_iar1_el1
+ *
+ * AP Interrupt Controller Interrupt Acknowledge Register 1
+ * The processor reads this register to obtain the interrupt ID
+ * of the signaled Group 1 interrupt. This read acts as an
+ * acknowledge for the interrupt.
+ */
+union bdk_ap_icc_iar1_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_iar1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t intvector : 24; /**< [ 23: 0](RO) The ID of the signaled interrupt. IDs 1020 to 1023 are
+ reserved and convey additional information such as spurious
+ interrupts.
+
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+#else /* Word 0 - Little Endian */
+ uint32_t intvector : 24; /**< [ 23: 0](RO) The ID of the signaled interrupt. IDs 1020 to 1023 are
+ reserved and convey additional information such as spurious
+ interrupts.
+
+ This field has either 16 or 24 bits implemented. The number of
+ implemented bits can be found in AP_ICC_CTLR_EL1[IDBITS] and
+ AP_ICC_CTLR_EL3[IDBITS]. If only 16 bits are implemented, bits
+ [23:16] of this register are RES0. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_iar1_el1_s cn; */
+};
+typedef union bdk_ap_icc_iar1_el1 bdk_ap_icc_iar1_el1_t;
+
+#define BDK_AP_ICC_IAR1_EL1 BDK_AP_ICC_IAR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_IAR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_IAR1_EL1_FUNC(void)
+{
+ return 0x3000c0c0000ll;
+}
+
+#define typedef_BDK_AP_ICC_IAR1_EL1 bdk_ap_icc_iar1_el1_t
+#define bustype_BDK_AP_ICC_IAR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_IAR1_EL1 "AP_ICC_IAR1_EL1"
+#define busnum_BDK_AP_ICC_IAR1_EL1 0
+#define arguments_BDK_AP_ICC_IAR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_igrpen0_el1
+ *
+ * AP Interrupt Controller Interrupt Group 0 Enable Register
+ * Controls whether Group 0 interrupts are enabled or not.
+ */
+union bdk_ap_icc_igrpen0_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_igrpen0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables Group 0 interrupts.
+ Virtual accesses to this register update AP_ICH_VMCR_EL2[VENG0].
+ 0 = Group 0 interrupts are disabled.
+ 1 = Group 0 interrupts are enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables Group 0 interrupts.
+ Virtual accesses to this register update AP_ICH_VMCR_EL2[VENG0].
+ 0 = Group 0 interrupts are disabled.
+ 1 = Group 0 interrupts are enabled. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_igrpen0_el1_s cn; */
+};
+typedef union bdk_ap_icc_igrpen0_el1 bdk_ap_icc_igrpen0_el1_t;
+
+#define BDK_AP_ICC_IGRPEN0_EL1 BDK_AP_ICC_IGRPEN0_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_IGRPEN0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_IGRPEN0_EL1_FUNC(void)
+{
+ return 0x3000c0c0600ll;
+}
+
+#define typedef_BDK_AP_ICC_IGRPEN0_EL1 bdk_ap_icc_igrpen0_el1_t
+#define bustype_BDK_AP_ICC_IGRPEN0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_IGRPEN0_EL1 "AP_ICC_IGRPEN0_EL1"
+#define busnum_BDK_AP_ICC_IGRPEN0_EL1 0
+#define arguments_BDK_AP_ICC_IGRPEN0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_igrpen1_el1
+ *
+ * AP Interrupt Controller Interrupt Group 1 Enable Register
+ * Controls whether Group 1 interrupts are enabled for the
+ * current Security state.
+ */
+union bdk_ap_icc_igrpen1_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_igrpen1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables Group 1 interrupts for the current Security state.
+ Virtual accesses to this register update AP_ICH_VMCR_EL2[VENG1].
+ When this register is accessed at EL3, the copy of this
+ register appropriate to the current setting of AP_SCR_EL3[NS] is
+ accessed.
+ 0 = Group 1 interrupts are disabled for the current Security
+ state.
+ 1 = Group 1 interrupts are enabled for the current Security state. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables Group 1 interrupts for the current Security state.
+ Virtual accesses to this register update AP_ICH_VMCR_EL2[VENG1].
+ When this register is accessed at EL3, the copy of this
+ register appropriate to the current setting of AP_SCR_EL3[NS] is
+ accessed.
+ 0 = Group 1 interrupts are disabled for the current Security
+ state.
+ 1 = Group 1 interrupts are enabled for the current Security state. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_igrpen1_el1_s cn; */
+};
+typedef union bdk_ap_icc_igrpen1_el1 bdk_ap_icc_igrpen1_el1_t;
+
+#define BDK_AP_ICC_IGRPEN1_EL1 BDK_AP_ICC_IGRPEN1_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_IGRPEN1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_IGRPEN1_EL1_FUNC(void)
+{
+ return 0x3000c0c0700ll;
+}
+
+#define typedef_BDK_AP_ICC_IGRPEN1_EL1 bdk_ap_icc_igrpen1_el1_t
+#define bustype_BDK_AP_ICC_IGRPEN1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_IGRPEN1_EL1 "AP_ICC_IGRPEN1_EL1"
+#define busnum_BDK_AP_ICC_IGRPEN1_EL1 0
+#define arguments_BDK_AP_ICC_IGRPEN1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_igrpen1_el3
+ *
+ * AP Interrupt Controller Interrupt Group 1 Enable EL3 Register
+ * Controls whether Group 1 interrupts are enabled or not.
+ */
+union bdk_ap_icc_igrpen1_el3
+{
+ uint32_t u;
+ struct bdk_ap_icc_igrpen1_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t enablegrp1s : 1; /**< [ 1: 1](R/W) Enables Group 1 interrupts for the Secure state.
+ 0 = Group 1 interrupts are disabled for the Secure state.
+ 1 = Group 1 interrupts are enabled for the Secure state. */
+ uint32_t enablegrp1ns : 1; /**< [ 0: 0](R/W) Enables Group 1 interrupts for the nonsecure state.
+ 0 = Group 1 interrupts are disabled for the nonsecure state.
+ 1 = Group 1 interrupts are enabled for the nonsecure state. */
+#else /* Word 0 - Little Endian */
+ uint32_t enablegrp1ns : 1; /**< [ 0: 0](R/W) Enables Group 1 interrupts for the nonsecure state.
+ 0 = Group 1 interrupts are disabled for the nonsecure state.
+ 1 = Group 1 interrupts are enabled for the nonsecure state. */
+ uint32_t enablegrp1s : 1; /**< [ 1: 1](R/W) Enables Group 1 interrupts for the Secure state.
+ 0 = Group 1 interrupts are disabled for the Secure state.
+ 1 = Group 1 interrupts are enabled for the Secure state. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_igrpen1_el3_s cn; */
+};
+typedef union bdk_ap_icc_igrpen1_el3 bdk_ap_icc_igrpen1_el3_t;
+
+#define BDK_AP_ICC_IGRPEN1_EL3 BDK_AP_ICC_IGRPEN1_EL3_FUNC()
+static inline uint64_t BDK_AP_ICC_IGRPEN1_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_IGRPEN1_EL3_FUNC(void)
+{
+ return 0x3060c0c0700ll;
+}
+
+#define typedef_BDK_AP_ICC_IGRPEN1_EL3 bdk_ap_icc_igrpen1_el3_t
+#define bustype_BDK_AP_ICC_IGRPEN1_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_IGRPEN1_EL3 "AP_ICC_IGRPEN1_EL3"
+#define busnum_BDK_AP_ICC_IGRPEN1_EL3 0
+#define arguments_BDK_AP_ICC_IGRPEN1_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_pmr_el1
+ *
+ * AP Interrupt Controller Interrupt Priority Mask Register
+ * Provides an interrupt priority filter. Only interrupts with
+ * higher priority than the value in this register are signaled
+ * to the processor.
+ */
+union bdk_ap_icc_pmr_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_pmr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t pri : 8; /**< [ 7: 0](R/W) The priority mask level for the CPU interface. If the priority
+ of an interrupt is higher than the value indicated by this
+ field, the interface signals the interrupt to the processor.
+
+ If the GIC supports fewer than 256 priority levels then some
+ bits are RAZ/WI, as follows:
+ - 128 supported levels: Bit [0] = 0.
+ - 64 supported levels: Bits [1:0] =0b00
+ - 32 supported levels: Bits [2:0] =0b000
+ - 16 supported levels: Bits [3:0] =0b0000
+
+ The possible priority field values are as follows:
+
+ \<pre\>
+ Implemented priority bits
+ Possible priority field values
+ Number of priority levels
+ [7:0] 0x000xFF 256
+ [7:1] 0x000xFE 128
+ [7:2] 0x000xFC 64
+ [7:3] 0x000xF8 32
+ [7:4] 0x000xF0 16
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint32_t pri : 8; /**< [ 7: 0](R/W) The priority mask level for the CPU interface. If the priority
+ of an interrupt is higher than the value indicated by this
+ field, the interface signals the interrupt to the processor.
+
+ If the GIC supports fewer than 256 priority levels then some
+ bits are RAZ/WI, as follows:
+ - 128 supported levels: Bit [0] = 0.
+ - 64 supported levels: Bits [1:0] =0b00
+ - 32 supported levels: Bits [2:0] =0b000
+ - 16 supported levels: Bits [3:0] =0b0000
+
+ The possible priority field values are as follows:
+
+ \<pre\>
+ Implemented priority bits
+ Possible priority field values
+ Number of priority levels
+ [7:0] 0x000xFF 256
+ [7:1] 0x000xFE 128
+ [7:2] 0x000xFC 64
+ [7:3] 0x000xF8 32
+ [7:4] 0x000xF0 16
+ \</pre\> */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_pmr_el1_s cn; */
+};
+typedef union bdk_ap_icc_pmr_el1 bdk_ap_icc_pmr_el1_t;
+
+#define BDK_AP_ICC_PMR_EL1 BDK_AP_ICC_PMR_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_PMR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_PMR_EL1_FUNC(void)
+{
+ return 0x30004060000ll;
+}
+
+#define typedef_BDK_AP_ICC_PMR_EL1 bdk_ap_icc_pmr_el1_t
+#define bustype_BDK_AP_ICC_PMR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_PMR_EL1 "AP_ICC_PMR_EL1"
+#define busnum_BDK_AP_ICC_PMR_EL1 0
+#define arguments_BDK_AP_ICC_PMR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_rpr_el1
+ *
+ * AP Interrupt Controller Running Priority Register
+ * Indicates the Running priority of the CPU interface.
+ */
+union bdk_ap_icc_rpr_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_rpr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t pri : 8; /**< [ 7: 0](R/W) The current running priority on the CPU interface. This is the
+ priority of the current active interrupt. */
+#else /* Word 0 - Little Endian */
+ uint32_t pri : 8; /**< [ 7: 0](R/W) The current running priority on the CPU interface. This is the
+ priority of the current active interrupt. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_rpr_el1_s cn; */
+};
+typedef union bdk_ap_icc_rpr_el1 bdk_ap_icc_rpr_el1_t;
+
+#define BDK_AP_ICC_RPR_EL1 BDK_AP_ICC_RPR_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_RPR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_RPR_EL1_FUNC(void)
+{
+ return 0x3000c0b0300ll;
+}
+
+#define typedef_BDK_AP_ICC_RPR_EL1 bdk_ap_icc_rpr_el1_t
+#define bustype_BDK_AP_ICC_RPR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_RPR_EL1 "AP_ICC_RPR_EL1"
+#define busnum_BDK_AP_ICC_RPR_EL1 0
+#define arguments_BDK_AP_ICC_RPR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_seien_el1
+ *
+ * AP Interrupt Controller System Error Interrupt Enable Register
+ * Controls whether System Error Interrupts generated by bus
+ * message are enabled.
+ */
+union bdk_ap_icc_seien_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_seien_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables System Error Interrupts generated by bus message.
+ Virtual accesses to this register update AP_ICH_VMCR_EL2[VENSEI].
+ 0 = System Error Interrupts generated by bus message are disabled.
+ 1 = System Error Interrupts generated by bus message are enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enables System Error Interrupts generated by bus message.
+ Virtual accesses to this register update AP_ICH_VMCR_EL2[VENSEI].
+ 0 = System Error Interrupts generated by bus message are disabled.
+ 1 = System Error Interrupts generated by bus message are enabled. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_seien_el1_s cn; */
+};
+typedef union bdk_ap_icc_seien_el1 bdk_ap_icc_seien_el1_t;
+
+#define BDK_AP_ICC_SEIEN_EL1 BDK_AP_ICC_SEIEN_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_SEIEN_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_SEIEN_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x3000c0d0000ll;
+ __bdk_csr_fatal("AP_ICC_SEIEN_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICC_SEIEN_EL1 bdk_ap_icc_seien_el1_t
+#define bustype_BDK_AP_ICC_SEIEN_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_SEIEN_EL1 "AP_ICC_SEIEN_EL1"
+#define busnum_BDK_AP_ICC_SEIEN_EL1 0
+#define arguments_BDK_AP_ICC_SEIEN_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_sgi0r_el1
+ *
+ * AP Interrupt Controller Software Generated Interrupt group 0 Register
+ * Provides software the ability to generate secure group 0 SGIs,
+ * including from the nonsecure state when permitted by
+ * GICR_NSACR.
+ */
+union bdk_ap_icc_sgi0r_el1
+{
+ uint64_t u;
+ struct bdk_ap_icc_sgi0r_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t aff3 : 8; /**< [ 55: 48](R/W) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t irm : 1; /**< [ 40: 40](R/W) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors. Possible
+ values are:
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ "self". */
+ uint64_t aff2 : 8; /**< [ 39: 32](R/W) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t sgiid : 4; /**< [ 27: 24](R/W) SGI Interrupt ID. */
+ uint64_t aff1 : 8; /**< [ 23: 16](R/W) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t targetlist : 16; /**< [ 15: 0](R/W) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+#else /* Word 0 - Little Endian */
+ uint64_t targetlist : 16; /**< [ 15: 0](R/W) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+ uint64_t aff1 : 8; /**< [ 23: 16](R/W) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t sgiid : 4; /**< [ 27: 24](R/W) SGI Interrupt ID. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t aff2 : 8; /**< [ 39: 32](R/W) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t irm : 1; /**< [ 40: 40](R/W) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors. Possible
+ values are:
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ "self". */
+ uint64_t reserved_41_47 : 7;
+ uint64_t aff3 : 8; /**< [ 55: 48](R/W) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_sgi0r_el1_s cn8; */
+ struct bdk_ap_icc_sgi0r_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t aff3 : 8; /**< [ 55: 48](WO) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t irm : 1; /**< [ 40: 40](WO) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors. Possible
+ values are:
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ self. */
+ uint64_t aff2 : 8; /**< [ 39: 32](WO) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t sgiid : 4; /**< [ 27: 24](WO) SGI Interrupt ID. */
+ uint64_t aff1 : 8; /**< [ 23: 16](WO) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t targetlist : 16; /**< [ 15: 0](WO) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+#else /* Word 0 - Little Endian */
+ uint64_t targetlist : 16; /**< [ 15: 0](WO) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+ uint64_t aff1 : 8; /**< [ 23: 16](WO) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t sgiid : 4; /**< [ 27: 24](WO) SGI Interrupt ID. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t aff2 : 8; /**< [ 39: 32](WO) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t irm : 1; /**< [ 40: 40](WO) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors. Possible
+ values are:
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ self. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t aff3 : 8; /**< [ 55: 48](WO) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_icc_sgi0r_el1 bdk_ap_icc_sgi0r_el1_t;
+
+#define BDK_AP_ICC_SGI0R_EL1 BDK_AP_ICC_SGI0R_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_SGI0R_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_SGI0R_EL1_FUNC(void)
+{
+ return 0x3000c0b0700ll;
+}
+
+#define typedef_BDK_AP_ICC_SGI0R_EL1 bdk_ap_icc_sgi0r_el1_t
+#define bustype_BDK_AP_ICC_SGI0R_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_SGI0R_EL1 "AP_ICC_SGI0R_EL1"
+#define busnum_BDK_AP_ICC_SGI0R_EL1 0
+#define arguments_BDK_AP_ICC_SGI0R_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_sgi1r_el1
+ *
+ * AP Interrupt Controller Software Generated Interrupt group 1 Register
+ * Provides software the ability to generate group 1 SGIs for the
+ * current security state.
+ */
+union bdk_ap_icc_sgi1r_el1
+{
+ uint64_t u;
+ struct bdk_ap_icc_sgi1r_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t aff3 : 8; /**< [ 55: 48](R/W) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t irm : 1; /**< [ 40: 40](R/W) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors. Possible
+ values are:
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ "self". */
+ uint64_t aff2 : 8; /**< [ 39: 32](R/W) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t sgiid : 4; /**< [ 27: 24](R/W) SGI Interrupt ID. */
+ uint64_t aff1 : 8; /**< [ 23: 16](R/W) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t targetlist : 16; /**< [ 15: 0](R/W) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+#else /* Word 0 - Little Endian */
+ uint64_t targetlist : 16; /**< [ 15: 0](R/W) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+ uint64_t aff1 : 8; /**< [ 23: 16](R/W) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t sgiid : 4; /**< [ 27: 24](R/W) SGI Interrupt ID. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t aff2 : 8; /**< [ 39: 32](R/W) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t irm : 1; /**< [ 40: 40](R/W) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors. Possible
+ values are:
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ "self". */
+ uint64_t reserved_41_47 : 7;
+ uint64_t aff3 : 8; /**< [ 55: 48](R/W) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_sgi1r_el1_s cn8; */
+ struct bdk_ap_icc_sgi1r_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t aff3 : 8; /**< [ 55: 48](WO) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t irm : 1; /**< [ 40: 40](WO) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors.
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ self. */
+ uint64_t aff2 : 8; /**< [ 39: 32](WO) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t sgiid : 4; /**< [ 27: 24](WO) SGI Interrupt ID. */
+ uint64_t aff1 : 8; /**< [ 23: 16](WO) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t targetlist : 16; /**< [ 15: 0](WO) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+#else /* Word 0 - Little Endian */
+ uint64_t targetlist : 16; /**< [ 15: 0](WO) Target List. The set of processors for which SGI interrupts
+ will be generated. Each bit corresponds to the processor
+ within a cluster with an Affinity 0 value equal to the bit
+ number.
+ If a bit is 1 and the bit does not correspond to a valid
+ target processor, the bit must be ignored by the Distributor.
+ In such cases, a Distributor may optionally generate an SError
+ interrupt.
+ This restricts distribution of SGIs to the first 16 processors
+ of an affinity 1 cluster. */
+ uint64_t aff1 : 8; /**< [ 23: 16](WO) The affinity 1 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t sgiid : 4; /**< [ 27: 24](WO) SGI Interrupt ID. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t aff2 : 8; /**< [ 39: 32](WO) The affinity 2 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t irm : 1; /**< [ 40: 40](WO) Interrupt Routing Mode. Determines how the generated
+ interrupts should be distributed to processors.
+ 0 = Interrupts routed to the processors specified by a.b.c.{target
+ list}. In this routing, a, b, and c are the values of fields
+ Aff3, Aff2, and Aff1 respectively.
+ 1 = Interrupts routed to all processors in the system, excluding
+ self. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t aff3 : 8; /**< [ 55: 48](WO) The affinity 3 value of the affinity path of the cluster for
+ which SGI interrupts will be generated. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_icc_sgi1r_el1 bdk_ap_icc_sgi1r_el1_t;
+
+#define BDK_AP_ICC_SGI1R_EL1 BDK_AP_ICC_SGI1R_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_SGI1R_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_SGI1R_EL1_FUNC(void)
+{
+ return 0x3000c0b0500ll;
+}
+
+#define typedef_BDK_AP_ICC_SGI1R_EL1 bdk_ap_icc_sgi1r_el1_t
+#define bustype_BDK_AP_ICC_SGI1R_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_SGI1R_EL1 "AP_ICC_SGI1R_EL1"
+#define busnum_BDK_AP_ICC_SGI1R_EL1 0
+#define arguments_BDK_AP_ICC_SGI1R_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_sre_el1
+ *
+ * AP Interrupt Controller System Register Enable EL1 Register
+ * Controls whether the system register interface or the memory
+ * mapped interface to the GIC CPU interface is used for EL0 and
+ * EL1.
+ */
+union bdk_ap_icc_sre_el1
+{
+ uint32_t u;
+ struct bdk_ap_icc_sre_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t dib : 1; /**< [ 2: 2](RO) Disable IRQ bypass.
+ If EL3 is present, this field is a read-only alias of
+ AP_ICC_SRE_EL3[DIB].
+ If EL3 is not present and EL2 is present, this field is a
+ read-only alias of AP_ICC_SRE_EL2[DIB]. */
+ uint32_t dfb : 1; /**< [ 1: 1](RO) Disable FIQ bypass.
+ If EL3 is present, this field is a read-only alias of
+ AP_ICC_SRE_EL3[DFB].
+ If EL3 is not present and EL2 is present, this field is a
+ read-only alias of AP_ICC_SRE_EL2[DFB]. */
+ uint32_t sre : 1; /**< [ 0: 0](RO) System Register Enable.
+ Virtual accesses modify AP_ICH_VMCR_EL2[VSRE].
+ 0 = The memory mapped interface must be used. Access at EL1 to any
+ ICC_* system register other than AP_ICC_SRE_EL1 results in an
+ Undefined exception.
+ 1 = The system register interface for the current Security state
+ is enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t sre : 1; /**< [ 0: 0](RO) System Register Enable.
+ Virtual accesses modify AP_ICH_VMCR_EL2[VSRE].
+ 0 = The memory mapped interface must be used. Access at EL1 to any
+ ICC_* system register other than AP_ICC_SRE_EL1 results in an
+ Undefined exception.
+ 1 = The system register interface for the current Security state
+ is enabled. */
+ uint32_t dfb : 1; /**< [ 1: 1](RO) Disable FIQ bypass.
+ If EL3 is present, this field is a read-only alias of
+ AP_ICC_SRE_EL3[DFB].
+ If EL3 is not present and EL2 is present, this field is a
+ read-only alias of AP_ICC_SRE_EL2[DFB]. */
+ uint32_t dib : 1; /**< [ 2: 2](RO) Disable IRQ bypass.
+ If EL3 is present, this field is a read-only alias of
+ AP_ICC_SRE_EL3[DIB].
+ If EL3 is not present and EL2 is present, this field is a
+ read-only alias of AP_ICC_SRE_EL2[DIB]. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_sre_el1_s cn; */
+};
+typedef union bdk_ap_icc_sre_el1 bdk_ap_icc_sre_el1_t;
+
+#define BDK_AP_ICC_SRE_EL1 BDK_AP_ICC_SRE_EL1_FUNC()
+static inline uint64_t BDK_AP_ICC_SRE_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_SRE_EL1_FUNC(void)
+{
+ return 0x3000c0c0500ll;
+}
+
+#define typedef_BDK_AP_ICC_SRE_EL1 bdk_ap_icc_sre_el1_t
+#define bustype_BDK_AP_ICC_SRE_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_SRE_EL1 "AP_ICC_SRE_EL1"
+#define busnum_BDK_AP_ICC_SRE_EL1 0
+#define arguments_BDK_AP_ICC_SRE_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_sre_el2
+ *
+ * AP Interrupt Controller System Register Enable EL2 Register
+ * Controls whether the system register interface or the memory
+ * mapped interface to the GIC CPU interface is used for EL2.
+ */
+union bdk_ap_icc_sre_el2
+{
+ uint32_t u;
+ struct bdk_ap_icc_sre_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t enable : 1; /**< [ 3: 3](R/W) Enable. Enables lower Exception level access to AP_ICC_SRE_EL1.
+ 0 = Nonsecure EL1 accesses to AP_ICC_SRE_EL1 trap to EL2.
+ 1 = Nonsecure EL1 accesses to AP_ICC_SRE_EL1 are permitted if EL3 is
+ not present or AP_ICC_SRE_EL3[ENABLE] is 1, otherwise nonsecure
+ EL1 accesses to AP_ICC_SRE_EL1 trap to EL3. */
+ uint32_t dib : 1; /**< [ 2: 2](RO) Disable IRQ bypass.
+ If EL3 is present and GICD_CTLR[DS] is 0, this field is a read-
+ only alias of AP_ICC_SRE_EL3[DIB]. */
+ uint32_t dfb : 1; /**< [ 1: 1](RO) Disable FIQ bypass.
+ If EL3 is present and GICD_CTLR[DS] is 0, this field is a read-
+ only alias of AP_ICC_SRE_EL3[DFB]. */
+ uint32_t sre : 1; /**< [ 0: 0](RO) System Register Enable.
+ 0 = The memory mapped interface must be used. Access at EL2 to any
+ ICH_* system register, or any EL1 or EL2 ICC_* register other
+ than AP_ICC_SRE_EL1 or AP_ICC_SRE_EL2, results in an Undefined
+ exception.
+ 1 = The system register interface to the ICH_* registers and the
+ EL1 and EL2 ICC_* registers is enabled for EL2. */
+#else /* Word 0 - Little Endian */
+ uint32_t sre : 1; /**< [ 0: 0](RO) System Register Enable.
+ 0 = The memory mapped interface must be used. Access at EL2 to any
+ ICH_* system register, or any EL1 or EL2 ICC_* register other
+ than AP_ICC_SRE_EL1 or AP_ICC_SRE_EL2, results in an Undefined
+ exception.
+ 1 = The system register interface to the ICH_* registers and the
+ EL1 and EL2 ICC_* registers is enabled for EL2. */
+ uint32_t dfb : 1; /**< [ 1: 1](RO) Disable FIQ bypass.
+ If EL3 is present and GICD_CTLR[DS] is 0, this field is a read-
+ only alias of AP_ICC_SRE_EL3[DFB]. */
+ uint32_t dib : 1; /**< [ 2: 2](RO) Disable IRQ bypass.
+ If EL3 is present and GICD_CTLR[DS] is 0, this field is a read-
+ only alias of AP_ICC_SRE_EL3[DIB]. */
+ uint32_t enable : 1; /**< [ 3: 3](R/W) Enable. Enables lower Exception level access to AP_ICC_SRE_EL1.
+ 0 = Nonsecure EL1 accesses to AP_ICC_SRE_EL1 trap to EL2.
+ 1 = Nonsecure EL1 accesses to AP_ICC_SRE_EL1 are permitted if EL3 is
+ not present or AP_ICC_SRE_EL3[ENABLE] is 1, otherwise nonsecure
+ EL1 accesses to AP_ICC_SRE_EL1 trap to EL3. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_sre_el2_s cn; */
+};
+typedef union bdk_ap_icc_sre_el2 bdk_ap_icc_sre_el2_t;
+
+#define BDK_AP_ICC_SRE_EL2 BDK_AP_ICC_SRE_EL2_FUNC()
+static inline uint64_t BDK_AP_ICC_SRE_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_SRE_EL2_FUNC(void)
+{
+ return 0x3040c090500ll;
+}
+
+#define typedef_BDK_AP_ICC_SRE_EL2 bdk_ap_icc_sre_el2_t
+#define bustype_BDK_AP_ICC_SRE_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_SRE_EL2 "AP_ICC_SRE_EL2"
+#define busnum_BDK_AP_ICC_SRE_EL2 0
+#define arguments_BDK_AP_ICC_SRE_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_icc_sre_el3
+ *
+ * AP Interrupt Controller System Register Enable EL3 Register
+ * Controls whether the system register interface or the memory
+ * mapped interface to the GIC CPU interface is used for EL2.
+ */
+union bdk_ap_icc_sre_el3
+{
+ uint32_t u;
+ struct bdk_ap_icc_sre_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t enable : 1; /**< [ 3: 3](R/W) Enable. Enables lower Exception level access to AP_ICC_SRE_EL1
+ and AP_ICC_SRE_EL2.
+ 0 = EL1 and EL2 accesses to AP_ICC_SRE_EL1 or AP_ICC_SRE_EL2 trap to
+ EL3.
+ 1 = EL2 accesses to AP_ICC_SRE_EL2 are permitted. If the Enable bit
+ of AP_ICC_SRE_EL2 is 1, then EL1 accesses to AP_ICC_SRE_EL1 are also
+ permitted. */
+ uint32_t dib : 1; /**< [ 2: 2](RO) Disable IRQ bypass. */
+ uint32_t dfb : 1; /**< [ 1: 1](RO) Disable FIQ bypass. */
+ uint32_t sre : 1; /**< [ 0: 0](RO) System Register Enable.
+ 0 = The memory mapped interface must be used. Access at EL3 to any
+ ICH_* system register, or any EL1, EL2, or EL3 ICC_* register
+ other than AP_ICC_SRE_EL1, AP_ICC_SRE_EL2, or AP_ICC_SRE_EL3, results
+ in an Undefined exception.
+ 1 = The system register interface to the ICH_* registers and the
+ EL1, EL2, and EL3 ICC_* registers is enabled for EL3. */
+#else /* Word 0 - Little Endian */
+ uint32_t sre : 1; /**< [ 0: 0](RO) System Register Enable.
+ 0 = The memory mapped interface must be used. Access at EL3 to any
+ ICH_* system register, or any EL1, EL2, or EL3 ICC_* register
+ other than AP_ICC_SRE_EL1, AP_ICC_SRE_EL2, or AP_ICC_SRE_EL3, results
+ in an Undefined exception.
+ 1 = The system register interface to the ICH_* registers and the
+ EL1, EL2, and EL3 ICC_* registers is enabled for EL3. */
+ uint32_t dfb : 1; /**< [ 1: 1](RO) Disable FIQ bypass. */
+ uint32_t dib : 1; /**< [ 2: 2](RO) Disable IRQ bypass. */
+ uint32_t enable : 1; /**< [ 3: 3](R/W) Enable. Enables lower Exception level access to AP_ICC_SRE_EL1
+ and AP_ICC_SRE_EL2.
+ 0 = EL1 and EL2 accesses to AP_ICC_SRE_EL1 or AP_ICC_SRE_EL2 trap to
+ EL3.
+ 1 = EL2 accesses to AP_ICC_SRE_EL2 are permitted. If the Enable bit
+ of AP_ICC_SRE_EL2 is 1, then EL1 accesses to AP_ICC_SRE_EL1 are also
+ permitted. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_icc_sre_el3_s cn; */
+};
+typedef union bdk_ap_icc_sre_el3 bdk_ap_icc_sre_el3_t;
+
+#define BDK_AP_ICC_SRE_EL3 BDK_AP_ICC_SRE_EL3_FUNC()
+static inline uint64_t BDK_AP_ICC_SRE_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICC_SRE_EL3_FUNC(void)
+{
+ return 0x3060c0c0500ll;
+}
+
+#define typedef_BDK_AP_ICC_SRE_EL3 bdk_ap_icc_sre_el3_t
+#define bustype_BDK_AP_ICC_SRE_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICC_SRE_EL3 "AP_ICC_SRE_EL3"
+#define busnum_BDK_AP_ICC_SRE_EL3 0
+#define arguments_BDK_AP_ICC_SRE_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_ap0r0_el2
+ *
+ * AP Interrupt Controller Hyp Active Priorities (0,0) Register
+ * Provides information about the active priorities for the
+ * current EL2 interrupt regime.
+ */
+union bdk_ap_ich_ap0r0_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_ap0r0_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRIBITS]).
+
+ For example, in a system with AP_ICC_CTLR_EL1[PRIBITS] == 0x4:
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Exception level AP0Rn access
+
+ (Secure) EL3 Permitted. Accesses Group 0 Secure active priorities.
+
+ Secure EL1 Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP0Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 0 active priorities.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRIBITS]).
+
+ For example, in a system with AP_ICC_CTLR_EL1[PRIBITS] == 0x4:
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Exception level AP0Rn access
+
+ (Secure) EL3 Permitted. Accesses Group 0 Secure active priorities.
+
+ Secure EL1 Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP0Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 0 Secure active priorities.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 0 active priorities.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_ap0r0_el2_s cn; */
+};
+typedef union bdk_ap_ich_ap0r0_el2 bdk_ap_ich_ap0r0_el2_t;
+
+#define BDK_AP_ICH_AP0R0_EL2 BDK_AP_ICH_AP0R0_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_AP0R0_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_AP0R0_EL2_FUNC(void)
+{
+ return 0x3040c080000ll;
+}
+
+#define typedef_BDK_AP_ICH_AP0R0_EL2 bdk_ap_ich_ap0r0_el2_t
+#define bustype_BDK_AP_ICH_AP0R0_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_AP0R0_EL2 "AP_ICH_AP0R0_EL2"
+#define busnum_BDK_AP_ICH_AP0R0_EL2 0
+#define arguments_BDK_AP_ICH_AP0R0_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_ap0r1_el2
+ *
+ * AP Interrupt Controller Hyp Active Priorities (0,1) Register
+ * Provides information about the active priorities for the
+ * current EL2 interrupt regime.
+ */
+union bdk_ap_ich_ap0r1_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_ap0r1_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP0R0_EL2[PRIORITYBITS]. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP0R0_EL2[PRIORITYBITS]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_ap0r1_el2_s cn; */
+};
+typedef union bdk_ap_ich_ap0r1_el2 bdk_ap_ich_ap0r1_el2_t;
+
+#define BDK_AP_ICH_AP0R1_EL2 BDK_AP_ICH_AP0R1_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_AP0R1_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_AP0R1_EL2_FUNC(void)
+{
+ return 0x3040c080100ll;
+}
+
+#define typedef_BDK_AP_ICH_AP0R1_EL2 bdk_ap_ich_ap0r1_el2_t
+#define bustype_BDK_AP_ICH_AP0R1_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_AP0R1_EL2 "AP_ICH_AP0R1_EL2"
+#define busnum_BDK_AP_ICH_AP0R1_EL2 0
+#define arguments_BDK_AP_ICH_AP0R1_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_ap0r2_el2
+ *
+ * AP Interrupt Controller Hyp Active Priorities (0,2) Register
+ * Provides information about the active priorities for the
+ * current EL2 interrupt regime.
+ */
+union bdk_ap_ich_ap0r2_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_ap0r2_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP0R0_EL2[PRIORITYBITS]. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP0R0_EL2[PRIORITYBITS]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_ap0r2_el2_s cn; */
+};
+typedef union bdk_ap_ich_ap0r2_el2 bdk_ap_ich_ap0r2_el2_t;
+
+#define BDK_AP_ICH_AP0R2_EL2 BDK_AP_ICH_AP0R2_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_AP0R2_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_AP0R2_EL2_FUNC(void)
+{
+ return 0x3040c080200ll;
+}
+
+#define typedef_BDK_AP_ICH_AP0R2_EL2 bdk_ap_ich_ap0r2_el2_t
+#define bustype_BDK_AP_ICH_AP0R2_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_AP0R2_EL2 "AP_ICH_AP0R2_EL2"
+#define busnum_BDK_AP_ICH_AP0R2_EL2 0
+#define arguments_BDK_AP_ICH_AP0R2_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_ap0r3_el2
+ *
+ * AP Interrupt Controller Hyp Active Priorities (0,3) Register
+ * Provides information about the active priorities for the
+ * current EL2 interrupt regime.
+ */
+union bdk_ap_ich_ap0r3_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_ap0r3_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP0R0_EL2[PRIORITYBITS]. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP0R0_EL2[PRIORITYBITS]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_ap0r3_el2_s cn; */
+};
+typedef union bdk_ap_ich_ap0r3_el2 bdk_ap_ich_ap0r3_el2_t;
+
+#define BDK_AP_ICH_AP0R3_EL2 BDK_AP_ICH_AP0R3_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_AP0R3_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_AP0R3_EL2_FUNC(void)
+{
+ return 0x3040c080300ll;
+}
+
+#define typedef_BDK_AP_ICH_AP0R3_EL2 bdk_ap_ich_ap0r3_el2_t
+#define bustype_BDK_AP_ICH_AP0R3_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_AP0R3_EL2 "AP_ICH_AP0R3_EL2"
+#define busnum_BDK_AP_ICH_AP0R3_EL2 0
+#define arguments_BDK_AP_ICH_AP0R3_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_ap1r0_el2
+ *
+ * AP Interrupt Controller Hyp Active Priorities (1,0) Register
+ * Provides information about the active priorities for the
+ * current EL2 interrupt regime.
+ */
+union bdk_ap_ich_ap1r0_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_ap1r0_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRIBITS]).
+
+ For example, in a system with AP_ICC_CTLR_EL1[PRIBITS] = 0x4:
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Current Exception level and Security state AP1Rn access
+
+ (Secure) EL3 Permitted. When AP_SCR_EL3[NS] is 0, accesses Group 1 Secure active
+ priorities. When AP_SCR_EL3[NS] is 1, accesses Group 1 nonsecure active priorities
+ (unshifted). When a bit is written, the bit is only updated if the corresponding Group 0
+ and Group 1 Secure active priority is zero.
+
+ Secure EL1 Permitted. Accesses Group 1 Secure active priorities (unshifted). When a bit
+ is written, the bit is only updated if the corresponding Group 0 Secure active priority is
+ zero.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP1Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 1 nonsecure active priorities (shifted). When a bit is
+ written, the bit is only updated if the corresponding Group 0 and Group 1 Secure active
+ priority is zero.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 1 nonsecure active priorities (unshifted). When a bit is
+ written, the bit is only updated if the Group 0 active priority is zero.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) Provides information about priority M, according to the
+ following relationship:
+
+ Bit P\<n\> corresponds to priority (M divided by 22^(U))
+ minus 1, where U is the number of unimplemented bits of
+ priority and is equal to (7 - AP_ICC_CTLR_EL1[PRIBITS]).
+
+ For example, in a system with AP_ICC_CTLR_EL1[PRIBITS] = 0x4:
+
+ There are 5 bits of implemented priority.
+
+ This means there are 3 bits of unimplemented priority, which
+ are always at the least significant end (bits [2:0] are RES0).
+
+ Valid priorities are 8, 16, 24, 32, and so on. Dividing these
+ by 22^(3) gives 1, 2, 3, 4, and so on.
+
+ Subtracting 1 from each gives bits 0, 1, 2, 3, and so on that
+ provide information about those priorities.
+
+ Accesses to these registers from an interrupt regime give a
+ view of the active priorities that is appropriate for that
+ interrupt regime, to allow save and restore of the appropriate
+ state.
+
+ Interrupt regime and the number of Security states supported
+ by the Distributor affect the view as follows. Unless
+ otherwise stated, when a bit is successfully set to one, this
+ clears any other active priorities corresponding to that bit.
+
+ Current Exception level and Security state AP1Rn access
+
+ (Secure) EL3 Permitted. When AP_SCR_EL3[NS] is 0, accesses Group 1 Secure active
+ priorities. When AP_SCR_EL3[NS] is 1, accesses Group 1 nonsecure active priorities
+ (unshifted). When a bit is written, the bit is only updated if the corresponding Group 0
+ and Group 1 Secure active priority is zero.
+
+ Secure EL1 Permitted. Accesses Group 1 Secure active priorities (unshifted). When a bit
+ is written, the bit is only updated if the corresponding Group 0 Secure active priority is
+ zero.
+
+ Nonsecure EL1 access for a Virtual interrupt ICH_AP1Rn_EL2
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports two Security states (GICD_CTLR[DS] is
+ 0) Permitted. Accesses Group 1 nonsecure active priorities (shifted). When a bit is
+ written, the bit is only updated if the corresponding Group 0 and Group 1 Secure active
+ priority is zero.
+
+ Nonsecure EL1 or EL2 when GIC Distributor supports one Security state (GICD_CTLR[DS] is
+ 1) Permitted. Accesses Group 1 nonsecure active priorities (unshifted). When a bit is
+ written, the bit is only updated if the Group 0 active priority is zero.
+
+ A Virtual interrupt in this case means that the interrupt
+ group associated with the register has been virtualized. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_ap1r0_el2_s cn; */
+};
+typedef union bdk_ap_ich_ap1r0_el2 bdk_ap_ich_ap1r0_el2_t;
+
+#define BDK_AP_ICH_AP1R0_EL2 BDK_AP_ICH_AP1R0_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_AP1R0_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_AP1R0_EL2_FUNC(void)
+{
+ return 0x3040c090000ll;
+}
+
+#define typedef_BDK_AP_ICH_AP1R0_EL2 bdk_ap_ich_ap1r0_el2_t
+#define bustype_BDK_AP_ICH_AP1R0_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_AP1R0_EL2 "AP_ICH_AP1R0_EL2"
+#define busnum_BDK_AP_ICH_AP1R0_EL2 0
+#define arguments_BDK_AP_ICH_AP1R0_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_ap1r1_el2
+ *
+ * AP Interrupt Controller Hyp Active Priorities (1,1) Register
+ * Provides information about the active priorities for the
+ * current EL2 interrupt regime.
+ */
+union bdk_ap_ich_ap1r1_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_ap1r1_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP1R0_EL2[PRIORITYBITS]. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP1R0_EL2[PRIORITYBITS]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_ap1r1_el2_s cn; */
+};
+typedef union bdk_ap_ich_ap1r1_el2 bdk_ap_ich_ap1r1_el2_t;
+
+#define BDK_AP_ICH_AP1R1_EL2 BDK_AP_ICH_AP1R1_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_AP1R1_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_AP1R1_EL2_FUNC(void)
+{
+ return 0x3040c090100ll;
+}
+
+#define typedef_BDK_AP_ICH_AP1R1_EL2 bdk_ap_ich_ap1r1_el2_t
+#define bustype_BDK_AP_ICH_AP1R1_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_AP1R1_EL2 "AP_ICH_AP1R1_EL2"
+#define busnum_BDK_AP_ICH_AP1R1_EL2 0
+#define arguments_BDK_AP_ICH_AP1R1_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_ap1r2_el2
+ *
+ * AP Interrupt Controller Hyp Active Priorities (1,2) Register
+ * Provides information about the active priorities for the
+ * current EL2 interrupt regime.
+ */
+union bdk_ap_ich_ap1r2_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_ap1r2_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP1R0_EL2[PRIORITYBITS]. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP1R0_EL2[PRIORITYBITS]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_ap1r2_el2_s cn; */
+};
+typedef union bdk_ap_ich_ap1r2_el2 bdk_ap_ich_ap1r2_el2_t;
+
+#define BDK_AP_ICH_AP1R2_EL2 BDK_AP_ICH_AP1R2_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_AP1R2_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_AP1R2_EL2_FUNC(void)
+{
+ return 0x3040c090200ll;
+}
+
+#define typedef_BDK_AP_ICH_AP1R2_EL2 bdk_ap_ich_ap1r2_el2_t
+#define bustype_BDK_AP_ICH_AP1R2_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_AP1R2_EL2 "AP_ICH_AP1R2_EL2"
+#define busnum_BDK_AP_ICH_AP1R2_EL2 0
+#define arguments_BDK_AP_ICH_AP1R2_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_ap1r3_el2
+ *
+ * AP Interrupt Controller Hyp Active Priorities (1,3) Register
+ * Provides information about the active priorities for the
+ * current EL2 interrupt regime.
+ */
+union bdk_ap_ich_ap1r3_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_ap1r3_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP1R0_EL2[PRIORITYBITS]. */
+#else /* Word 0 - Little Endian */
+ uint32_t prioritybits : 32; /**< [ 31: 0](R/W) See description of AP_ICH_AP1R0_EL2[PRIORITYBITS]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_ap1r3_el2_s cn; */
+};
+typedef union bdk_ap_ich_ap1r3_el2 bdk_ap_ich_ap1r3_el2_t;
+
+#define BDK_AP_ICH_AP1R3_EL2 BDK_AP_ICH_AP1R3_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_AP1R3_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_AP1R3_EL2_FUNC(void)
+{
+ return 0x3040c090300ll;
+}
+
+#define typedef_BDK_AP_ICH_AP1R3_EL2 bdk_ap_ich_ap1r3_el2_t
+#define bustype_BDK_AP_ICH_AP1R3_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_AP1R3_EL2 "AP_ICH_AP1R3_EL2"
+#define busnum_BDK_AP_ICH_AP1R3_EL2 0
+#define arguments_BDK_AP_ICH_AP1R3_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_eisr_el2
+ *
+ * AP Interrupt Controller End of Interrupt Status Register
+ * When a maintenance interrupt is received, this register helps
+ * determine which List registers have outstanding EOI interrupts
+ * that require servicing.
+ */
+union bdk_ap_ich_eisr_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_eisr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t status_bits : 32; /**< [ 31: 0](RO) EOI status bit for List register \<n\>:
+ For any ICH_LR\<n\>_EL2, the corresponding status bit is set to
+ 1 if ICH_LR\<n\>_EL2[State] is 0x0.
+ 0 = List register \<n\>, ICH_LR\<n\>_EL2, does not have an EOI.
+ 1 = List register \<n\>, ICH_LR\<n\>_EL2, has an EOI. */
+#else /* Word 0 - Little Endian */
+ uint32_t status_bits : 32; /**< [ 31: 0](RO) EOI status bit for List register \<n\>:
+ For any ICH_LR\<n\>_EL2, the corresponding status bit is set to
+ 1 if ICH_LR\<n\>_EL2[State] is 0x0.
+ 0 = List register \<n\>, ICH_LR\<n\>_EL2, does not have an EOI.
+ 1 = List register \<n\>, ICH_LR\<n\>_EL2, has an EOI. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_eisr_el2_s cn; */
+};
+typedef union bdk_ap_ich_eisr_el2 bdk_ap_ich_eisr_el2_t;
+
+#define BDK_AP_ICH_EISR_EL2 BDK_AP_ICH_EISR_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_EISR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_EISR_EL2_FUNC(void)
+{
+ return 0x3040c0b0300ll;
+}
+
+#define typedef_BDK_AP_ICH_EISR_EL2 bdk_ap_ich_eisr_el2_t
+#define bustype_BDK_AP_ICH_EISR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_EISR_EL2 "AP_ICH_EISR_EL2"
+#define busnum_BDK_AP_ICH_EISR_EL2 0
+#define arguments_BDK_AP_ICH_EISR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_elrsr_el2
+ *
+ * AP Interrupt Controller Empty List Register Status Register
+ * This register can be used to locate a usable List register
+ * when the hypervisor is delivering an interrupt to a Guest OS.
+ *
+ * Internal:
+ * This register was renamed ICH_ELRSR_EL2 in OBAN of 2014-06-13 after release v20 of GIC v3.
+ */
+union bdk_ap_ich_elrsr_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_elrsr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t status_bits : 32; /**< [ 31: 0](RO) Status bit for List register \<n\>, ICH_LR\<n\>_EL2:
+ For any ICH_LR\<n\>_EL2, the corresponding status bit is set to
+ 1 if ICH_LR\<n\>_EL2[State] is 0x0.
+ 0 = List register ICH_LR\<n\>_EL2, if implemented, contains a valid
+ interrupt. Using this List register can result in overwriting
+ a valid interrupt.
+ 1 = List register ICH_LR\<n\>_EL2 does not contain a valid
+ interrupt. The List register is empty and can be used without
+ overwriting a valid interrupt or losing an EOI maintenance
+ interrupt. */
+#else /* Word 0 - Little Endian */
+ uint32_t status_bits : 32; /**< [ 31: 0](RO) Status bit for List register \<n\>, ICH_LR\<n\>_EL2:
+ For any ICH_LR\<n\>_EL2, the corresponding status bit is set to
+ 1 if ICH_LR\<n\>_EL2[State] is 0x0.
+ 0 = List register ICH_LR\<n\>_EL2, if implemented, contains a valid
+ interrupt. Using this List register can result in overwriting
+ a valid interrupt.
+ 1 = List register ICH_LR\<n\>_EL2 does not contain a valid
+ interrupt. The List register is empty and can be used without
+ overwriting a valid interrupt or losing an EOI maintenance
+ interrupt. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_elrsr_el2_s cn; */
+};
+typedef union bdk_ap_ich_elrsr_el2 bdk_ap_ich_elrsr_el2_t;
+
+#define BDK_AP_ICH_ELRSR_EL2 BDK_AP_ICH_ELRSR_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_ELRSR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_ELRSR_EL2_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3040c0b0500ll;
+ __bdk_csr_fatal("AP_ICH_ELRSR_EL2", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICH_ELRSR_EL2 bdk_ap_ich_elrsr_el2_t
+#define bustype_BDK_AP_ICH_ELRSR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_ELRSR_EL2 "AP_ICH_ELRSR_EL2"
+#define busnum_BDK_AP_ICH_ELRSR_EL2 0
+#define arguments_BDK_AP_ICH_ELRSR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_elsr_el2
+ *
+ * AP Interrupt Controller Empty List Register Status Register
+ * This register can be used to locate a usable List register
+ * when the hypervisor is delivering an interrupt to a Guest OS.
+ */
+union bdk_ap_ich_elsr_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_elsr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t status_bits : 32; /**< [ 31: 0](RO) Status bit for List register \<n\>, ICH_LR\<n\>_EL2:
+ For any ICH_LR\<n\>_EL2, the corresponding status bit is set to
+ 1 if ICH_LR\<n\>_EL2[State] is 0x0.
+ 0 = List register ICH_LR\<n\>_EL2, if implemented, contains a valid
+ interrupt. Using this List register can result in overwriting
+ a valid interrupt.
+ 1 = List register ICH_LR\<n\>_EL2 does not contain a valid
+ interrupt. The List register is empty and can be used without
+ overwriting a valid interrupt or losing an EOI maintenance
+ interrupt. */
+#else /* Word 0 - Little Endian */
+ uint32_t status_bits : 32; /**< [ 31: 0](RO) Status bit for List register \<n\>, ICH_LR\<n\>_EL2:
+ For any ICH_LR\<n\>_EL2, the corresponding status bit is set to
+ 1 if ICH_LR\<n\>_EL2[State] is 0x0.
+ 0 = List register ICH_LR\<n\>_EL2, if implemented, contains a valid
+ interrupt. Using this List register can result in overwriting
+ a valid interrupt.
+ 1 = List register ICH_LR\<n\>_EL2 does not contain a valid
+ interrupt. The List register is empty and can be used without
+ overwriting a valid interrupt or losing an EOI maintenance
+ interrupt. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_elsr_el2_s cn; */
+};
+typedef union bdk_ap_ich_elsr_el2 bdk_ap_ich_elsr_el2_t;
+
+#define BDK_AP_ICH_ELSR_EL2 BDK_AP_ICH_ELSR_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_ELSR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_ELSR_EL2_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x3040c0b0500ll;
+ __bdk_csr_fatal("AP_ICH_ELSR_EL2", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICH_ELSR_EL2 bdk_ap_ich_elsr_el2_t
+#define bustype_BDK_AP_ICH_ELSR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_ELSR_EL2 "AP_ICH_ELSR_EL2"
+#define busnum_BDK_AP_ICH_ELSR_EL2 0
+#define arguments_BDK_AP_ICH_ELSR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_hcr_el2
+ *
+ * AP Interrupt Controller Hyp Control Register
+ * Controls the environment for guest operating systems.
+ */
+union bdk_ap_ich_hcr_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_hcr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t eoicount : 5; /**< [ 31: 27](R/W) Counts the number of EOIs received that do not have a
+ corresponding entry in the List registers. The virtual CPU
+ interface increments this field automatically when a matching
+ EOI is received.
+
+ EOIs that do not clear a bit in one of the Active Priorities
+ registers ICH_APmRn_EL2 do not cause an increment.
+
+ Although not possible under correct operation, if an EOI
+ occurs when the value of this field is 31, this field wraps to
+ 0.
+
+ The maintenance interrupt is asserted whenever this field is
+ nonzero and the LRENPIE bit is set to 1. */
+ uint32_t reserved_15_26 : 12;
+ uint32_t tdir : 1; /**< [ 14: 14](R/W) Trap nonsecure EL1 writes to ICC_DIR_EL1:
+ 0 = Nonsecure EL1 writes of ICC_DIR_EL1 do not cause a trap to EL2, unless trapped by
+ other mechanisms.
+ 1 = Nonsecure EL1 writes of ICC_DIR_EL1 are trapped to EL2. */
+ uint32_t tsei : 1; /**< [ 13: 13](RO) A locally generated SEI will trap to EL2 if this bit is set. This bit is RES0 when
+ AP_ICH_VTR_EL2[SEIS] is not set. */
+ uint32_t tall1 : 1; /**< [ 12: 12](R/W) Trap all nonsecure EL1 accesses to ICC_* system registers for
+ group 1 interrupts to EL2.
+ 0 = Non-Secure EL1 accesses to ICC_* registers for group 1
+ interrupts proceed as normal.
+ 1 = Any nonsecure EL1 accesses to ICC_* registers for group 1
+ interrupts trap to EL2. */
+ uint32_t tall0 : 1; /**< [ 11: 11](R/W) Trap all nonsecure EL1 accesses to ICC_* system registers for
+ group 0 interrupts to EL2.
+ 0 = Non-Secure EL1 accesses to ICC_* registers for group 0
+ interrupts proceed as normal.
+ 1 = Any nonsecure EL1 accesses to ICC_* registers for group 0
+ interrupts trap to EL2. */
+ uint32_t tc : 1; /**< [ 10: 10](R/W) Trap all nonsecure El1 accesses to system registers that are
+ common to group 0 and group 1 to EL2.
+ This affects AP_ICC_DIR_EL1, AP_ICC_PMR_EL1, and AP_ICC_RPR_EL1.
+ 0 = Nonsecure EL1 accesses to common registers proceed as normal.
+ 1 = Any nonsecure EL1 accesses to common registers trap to EL2. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t vgrp1die : 1; /**< [ 7: 7](R/W) VM Disable Group 1 Interrupt Enable. Enables the signaling of
+ a maintenance interrupt while signaling of Group 1 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is disabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp1] is
+ set to 0. */
+ uint32_t vgrp1eie : 1; /**< [ 6: 6](R/W) VM Enable Group 1 Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while signaling of Group 1 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is enabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp1] is
+ set to 1. */
+ uint32_t vgrp0die : 1; /**< [ 5: 5](R/W) VM Disable Group 0 Interrupt Enable. Enables the signaling of
+ a maintenance interrupt while signaling of Group 0 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is disabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp0] is
+ set to 0. */
+ uint32_t vgrp0eie : 1; /**< [ 4: 4](R/W) VM Enable Group 0 Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while signaling of Group 0 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is enabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp0] is
+ set to 1. */
+ uint32_t npie : 1; /**< [ 3: 3](R/W) No Pending Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while no pending interrupts are present
+ in the List registers:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interupt signaled while the List registers contain
+ no interrupts in the pending state. */
+ uint32_t lrenpie : 1; /**< [ 2: 2](R/W) List Register Entry Not Present Interrupt Enable. Enables the
+ signaling of a maintenance interrupt while the virtual CPU
+ interface does not have a corresponding valid List register
+ entry for an EOI request:
+ 0 = Maintenance interrupt disabled.
+ 1 = A maintenance interrupt is asserted while the EOIcount field
+ is not 0. */
+ uint32_t uie : 1; /**< [ 1: 1](R/W) Underflow Interrupt Enable. Enables the signaling of a
+ maintenance interrupt when the List registers are empty, or
+ hold only one valid entry:
+ 0 = Maintenance interrupt disabled.
+ 1 = A maintenance interrupt is asserted if none, or only one, of
+ the List register entries is marked as a valid interrupt. */
+ uint32_t en : 1; /**< [ 0: 0](R/W) Enable. Global enable bit for the virtual CPU interface:
+ When this field is set to 0:
+ The virtual CPU interface does not signal any maintenance
+ interrupts.
+ The virtual CPU interface does not signal any virtual
+ interrupts.
+ A read of GICV_IAR or GICV_AIAR returns a spurious interrupt
+ ID.
+ 0 = Virtual CPU interface operation disabled.
+ 1 = Virtual CPU interface operation enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t en : 1; /**< [ 0: 0](R/W) Enable. Global enable bit for the virtual CPU interface:
+ When this field is set to 0:
+ The virtual CPU interface does not signal any maintenance
+ interrupts.
+ The virtual CPU interface does not signal any virtual
+ interrupts.
+ A read of GICV_IAR or GICV_AIAR returns a spurious interrupt
+ ID.
+ 0 = Virtual CPU interface operation disabled.
+ 1 = Virtual CPU interface operation enabled. */
+ uint32_t uie : 1; /**< [ 1: 1](R/W) Underflow Interrupt Enable. Enables the signaling of a
+ maintenance interrupt when the List registers are empty, or
+ hold only one valid entry:
+ 0 = Maintenance interrupt disabled.
+ 1 = A maintenance interrupt is asserted if none, or only one, of
+ the List register entries is marked as a valid interrupt. */
+ uint32_t lrenpie : 1; /**< [ 2: 2](R/W) List Register Entry Not Present Interrupt Enable. Enables the
+ signaling of a maintenance interrupt while the virtual CPU
+ interface does not have a corresponding valid List register
+ entry for an EOI request:
+ 0 = Maintenance interrupt disabled.
+ 1 = A maintenance interrupt is asserted while the EOIcount field
+ is not 0. */
+ uint32_t npie : 1; /**< [ 3: 3](R/W) No Pending Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while no pending interrupts are present
+ in the List registers:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interupt signaled while the List registers contain
+ no interrupts in the pending state. */
+ uint32_t vgrp0eie : 1; /**< [ 4: 4](R/W) VM Enable Group 0 Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while signaling of Group 0 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is enabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp0] is
+ set to 1. */
+ uint32_t vgrp0die : 1; /**< [ 5: 5](R/W) VM Disable Group 0 Interrupt Enable. Enables the signaling of
+ a maintenance interrupt while signaling of Group 0 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is disabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp0] is
+ set to 0. */
+ uint32_t vgrp1eie : 1; /**< [ 6: 6](R/W) VM Enable Group 1 Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while signaling of Group 1 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is enabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp1] is
+ set to 1. */
+ uint32_t vgrp1die : 1; /**< [ 7: 7](R/W) VM Disable Group 1 Interrupt Enable. Enables the signaling of
+ a maintenance interrupt while signaling of Group 1 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is disabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp1] is
+ set to 0. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t tc : 1; /**< [ 10: 10](R/W) Trap all nonsecure El1 accesses to system registers that are
+ common to group 0 and group 1 to EL2.
+ This affects AP_ICC_DIR_EL1, AP_ICC_PMR_EL1, and AP_ICC_RPR_EL1.
+ 0 = Nonsecure EL1 accesses to common registers proceed as normal.
+ 1 = Any nonsecure EL1 accesses to common registers trap to EL2. */
+ uint32_t tall0 : 1; /**< [ 11: 11](R/W) Trap all nonsecure EL1 accesses to ICC_* system registers for
+ group 0 interrupts to EL2.
+ 0 = Non-Secure EL1 accesses to ICC_* registers for group 0
+ interrupts proceed as normal.
+ 1 = Any nonsecure EL1 accesses to ICC_* registers for group 0
+ interrupts trap to EL2. */
+ uint32_t tall1 : 1; /**< [ 12: 12](R/W) Trap all nonsecure EL1 accesses to ICC_* system registers for
+ group 1 interrupts to EL2.
+ 0 = Non-Secure EL1 accesses to ICC_* registers for group 1
+ interrupts proceed as normal.
+ 1 = Any nonsecure EL1 accesses to ICC_* registers for group 1
+ interrupts trap to EL2. */
+ uint32_t tsei : 1; /**< [ 13: 13](RO) A locally generated SEI will trap to EL2 if this bit is set. This bit is RES0 when
+ AP_ICH_VTR_EL2[SEIS] is not set. */
+ uint32_t tdir : 1; /**< [ 14: 14](R/W) Trap nonsecure EL1 writes to ICC_DIR_EL1:
+ 0 = Nonsecure EL1 writes of ICC_DIR_EL1 do not cause a trap to EL2, unless trapped by
+ other mechanisms.
+ 1 = Nonsecure EL1 writes of ICC_DIR_EL1 are trapped to EL2. */
+ uint32_t reserved_15_26 : 12;
+ uint32_t eoicount : 5; /**< [ 31: 27](R/W) Counts the number of EOIs received that do not have a
+ corresponding entry in the List registers. The virtual CPU
+ interface increments this field automatically when a matching
+ EOI is received.
+
+ EOIs that do not clear a bit in one of the Active Priorities
+ registers ICH_APmRn_EL2 do not cause an increment.
+
+ Although not possible under correct operation, if an EOI
+ occurs when the value of this field is 31, this field wraps to
+ 0.
+
+ The maintenance interrupt is asserted whenever this field is
+ nonzero and the LRENPIE bit is set to 1. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ich_hcr_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t eoicount : 5; /**< [ 31: 27](R/W) Counts the number of EOIs received that do not have a
+ corresponding entry in the List registers. The virtual CPU
+ interface increments this field automatically when a matching
+ EOI is received.
+
+ EOIs that do not clear a bit in one of the Active Priorities
+ registers ICH_APmRn_EL2 do not cause an increment.
+
+ Although not possible under correct operation, if an EOI
+ occurs when the value of this field is 31, this field wraps to
+ 0.
+
+ The maintenance interrupt is asserted whenever this field is
+ nonzero and the LRENPIE bit is set to 1. */
+ uint32_t reserved_14_26 : 13;
+ uint32_t tsei : 1; /**< [ 13: 13](RO) A locally generated SEI will trap to EL2 if this bit is set. This bit is RES0 when
+ AP_ICH_VTR_EL2[SEIS] is not set. */
+ uint32_t tall1 : 1; /**< [ 12: 12](R/W) Trap all nonsecure EL1 accesses to ICC_* system registers for
+ group 1 interrupts to EL2.
+ 0 = Non-Secure EL1 accesses to ICC_* registers for group 1
+ interrupts proceed as normal.
+ 1 = Any nonsecure EL1 accesses to ICC_* registers for group 1
+ interrupts trap to EL2. */
+ uint32_t tall0 : 1; /**< [ 11: 11](R/W) Trap all nonsecure EL1 accesses to ICC_* system registers for
+ group 0 interrupts to EL2.
+ 0 = Non-Secure EL1 accesses to ICC_* registers for group 0
+ interrupts proceed as normal.
+ 1 = Any nonsecure EL1 accesses to ICC_* registers for group 0
+ interrupts trap to EL2. */
+ uint32_t tc : 1; /**< [ 10: 10](R/W) Trap all nonsecure El1 accesses to system registers that are
+ common to group 0 and group 1 to EL2.
+ This affects AP_ICC_DIR_EL1, AP_ICC_PMR_EL1, and AP_ICC_RPR_EL1.
+ 0 = Nonsecure EL1 accesses to common registers proceed as normal.
+ 1 = Any nonsecure EL1 accesses to common registers trap to EL2. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t vgrp1die : 1; /**< [ 7: 7](R/W) VM Disable Group 1 Interrupt Enable. Enables the signaling of
+ a maintenance interrupt while signaling of Group 1 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is disabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp1] is
+ set to 0. */
+ uint32_t vgrp1eie : 1; /**< [ 6: 6](R/W) VM Enable Group 1 Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while signaling of Group 1 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is enabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp1] is
+ set to 1. */
+ uint32_t vgrp0die : 1; /**< [ 5: 5](R/W) VM Disable Group 0 Interrupt Enable. Enables the signaling of
+ a maintenance interrupt while signaling of Group 0 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is disabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp0] is
+ set to 0. */
+ uint32_t vgrp0eie : 1; /**< [ 4: 4](R/W) VM Enable Group 0 Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while signaling of Group 0 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is enabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp0] is
+ set to 1. */
+ uint32_t npie : 1; /**< [ 3: 3](R/W) No Pending Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while no pending interrupts are present
+ in the List registers:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interupt signaled while the List registers contain
+ no interrupts in the pending state. */
+ uint32_t lrenpie : 1; /**< [ 2: 2](R/W) List Register Entry Not Present Interrupt Enable. Enables the
+ signaling of a maintenance interrupt while the virtual CPU
+ interface does not have a corresponding valid List register
+ entry for an EOI request:
+ 0 = Maintenance interrupt disabled.
+ 1 = A maintenance interrupt is asserted while the EOIcount field
+ is not 0. */
+ uint32_t uie : 1; /**< [ 1: 1](R/W) Underflow Interrupt Enable. Enables the signaling of a
+ maintenance interrupt when the List registers are empty, or
+ hold only one valid entry:
+ 0 = Maintenance interrupt disabled.
+ 1 = A maintenance interrupt is asserted if none, or only one, of
+ the List register entries is marked as a valid interrupt. */
+ uint32_t en : 1; /**< [ 0: 0](R/W) Enable. Global enable bit for the virtual CPU interface:
+ When this field is set to 0:
+ The virtual CPU interface does not signal any maintenance
+ interrupts.
+ The virtual CPU interface does not signal any virtual
+ interrupts.
+ A read of GICV_IAR or GICV_AIAR returns a spurious interrupt
+ ID.
+ 0 = Virtual CPU interface operation disabled.
+ 1 = Virtual CPU interface operation enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t en : 1; /**< [ 0: 0](R/W) Enable. Global enable bit for the virtual CPU interface:
+ When this field is set to 0:
+ The virtual CPU interface does not signal any maintenance
+ interrupts.
+ The virtual CPU interface does not signal any virtual
+ interrupts.
+ A read of GICV_IAR or GICV_AIAR returns a spurious interrupt
+ ID.
+ 0 = Virtual CPU interface operation disabled.
+ 1 = Virtual CPU interface operation enabled. */
+ uint32_t uie : 1; /**< [ 1: 1](R/W) Underflow Interrupt Enable. Enables the signaling of a
+ maintenance interrupt when the List registers are empty, or
+ hold only one valid entry:
+ 0 = Maintenance interrupt disabled.
+ 1 = A maintenance interrupt is asserted if none, or only one, of
+ the List register entries is marked as a valid interrupt. */
+ uint32_t lrenpie : 1; /**< [ 2: 2](R/W) List Register Entry Not Present Interrupt Enable. Enables the
+ signaling of a maintenance interrupt while the virtual CPU
+ interface does not have a corresponding valid List register
+ entry for an EOI request:
+ 0 = Maintenance interrupt disabled.
+ 1 = A maintenance interrupt is asserted while the EOIcount field
+ is not 0. */
+ uint32_t npie : 1; /**< [ 3: 3](R/W) No Pending Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while no pending interrupts are present
+ in the List registers:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interupt signaled while the List registers contain
+ no interrupts in the pending state. */
+ uint32_t vgrp0eie : 1; /**< [ 4: 4](R/W) VM Enable Group 0 Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while signaling of Group 0 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is enabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp0] is
+ set to 1. */
+ uint32_t vgrp0die : 1; /**< [ 5: 5](R/W) VM Disable Group 0 Interrupt Enable. Enables the signaling of
+ a maintenance interrupt while signaling of Group 0 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is disabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp0] is
+ set to 0. */
+ uint32_t vgrp1eie : 1; /**< [ 6: 6](R/W) VM Enable Group 1 Interrupt Enable. Enables the signaling of a
+ maintenance interrupt while signaling of Group 1 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is enabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp1] is
+ set to 1. */
+ uint32_t vgrp1die : 1; /**< [ 7: 7](R/W) VM Disable Group 1 Interrupt Enable. Enables the signaling of
+ a maintenance interrupt while signaling of Group 1 interrupts
+ from the virtual CPU interface to the connected virtual
+ machine is disabled:
+ 0 = Maintenance interrupt disabled.
+ 1 = Maintenance interrupt signaled while GICV_CTLR[EnableGrp1] is
+ set to 0. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t tc : 1; /**< [ 10: 10](R/W) Trap all nonsecure El1 accesses to system registers that are
+ common to group 0 and group 1 to EL2.
+ This affects AP_ICC_DIR_EL1, AP_ICC_PMR_EL1, and AP_ICC_RPR_EL1.
+ 0 = Nonsecure EL1 accesses to common registers proceed as normal.
+ 1 = Any nonsecure EL1 accesses to common registers trap to EL2. */
+ uint32_t tall0 : 1; /**< [ 11: 11](R/W) Trap all nonsecure EL1 accesses to ICC_* system registers for
+ group 0 interrupts to EL2.
+ 0 = Non-Secure EL1 accesses to ICC_* registers for group 0
+ interrupts proceed as normal.
+ 1 = Any nonsecure EL1 accesses to ICC_* registers for group 0
+ interrupts trap to EL2. */
+ uint32_t tall1 : 1; /**< [ 12: 12](R/W) Trap all nonsecure EL1 accesses to ICC_* system registers for
+ group 1 interrupts to EL2.
+ 0 = Non-Secure EL1 accesses to ICC_* registers for group 1
+ interrupts proceed as normal.
+ 1 = Any nonsecure EL1 accesses to ICC_* registers for group 1
+ interrupts trap to EL2. */
+ uint32_t tsei : 1; /**< [ 13: 13](RO) A locally generated SEI will trap to EL2 if this bit is set. This bit is RES0 when
+ AP_ICH_VTR_EL2[SEIS] is not set. */
+ uint32_t reserved_14_26 : 13;
+ uint32_t eoicount : 5; /**< [ 31: 27](R/W) Counts the number of EOIs received that do not have a
+ corresponding entry in the List registers. The virtual CPU
+ interface increments this field automatically when a matching
+ EOI is received.
+
+ EOIs that do not clear a bit in one of the Active Priorities
+ registers ICH_APmRn_EL2 do not cause an increment.
+
+ Although not possible under correct operation, if an EOI
+ occurs when the value of this field is 31, this field wraps to
+ 0.
+
+ The maintenance interrupt is asserted whenever this field is
+ nonzero and the LRENPIE bit is set to 1. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_ich_hcr_el2_s cn9; */
+};
+typedef union bdk_ap_ich_hcr_el2 bdk_ap_ich_hcr_el2_t;
+
+#define BDK_AP_ICH_HCR_EL2 BDK_AP_ICH_HCR_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_HCR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_HCR_EL2_FUNC(void)
+{
+ return 0x3040c0b0000ll;
+}
+
+#define typedef_BDK_AP_ICH_HCR_EL2 bdk_ap_ich_hcr_el2_t
+#define bustype_BDK_AP_ICH_HCR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_HCR_EL2 "AP_ICH_HCR_EL2"
+#define busnum_BDK_AP_ICH_HCR_EL2 0
+#define arguments_BDK_AP_ICH_HCR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_lr#_el2
+ *
+ * AP List Registers
+ * Provides interrupt context information for the virtual CPU interface.
+ */
+union bdk_ap_ich_lrx_el2
+{
+ uint64_t u;
+ struct bdk_ap_ich_lrx_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t state : 2; /**< [ 63: 62](R/W) 0x0 = Invalid.
+ 0x1 = Pending.
+ 0x2 = Active.
+ 0x3 = Pending and Active. */
+ uint64_t hw : 1; /**< [ 61: 61](R/W) Virtual interrupt corresponds to physical interrupt. */
+ uint64_t group : 1; /**< [ 60: 60](R/W) Indicates interrupt is group 1. */
+ uint64_t reserved_56_59 : 4;
+ uint64_t pri : 8; /**< [ 55: 48](R/W) Interrupt priority. */
+ uint64_t reserved_42_47 : 6;
+ uint64_t physical_id : 10; /**< [ 41: 32](R/W) When [HW] is zero, bit 41 indicates whether this interrupt triggers an EOI
+ maintenance interrupt and bits 40..32 are RES0. */
+ uint64_t virtual_id : 32; /**< [ 31: 0](R/W) Virtual interrupt ID. */
+#else /* Word 0 - Little Endian */
+ uint64_t virtual_id : 32; /**< [ 31: 0](R/W) Virtual interrupt ID. */
+ uint64_t physical_id : 10; /**< [ 41: 32](R/W) When [HW] is zero, bit 41 indicates whether this interrupt triggers an EOI
+ maintenance interrupt and bits 40..32 are RES0. */
+ uint64_t reserved_42_47 : 6;
+ uint64_t pri : 8; /**< [ 55: 48](R/W) Interrupt priority. */
+ uint64_t reserved_56_59 : 4;
+ uint64_t group : 1; /**< [ 60: 60](R/W) Indicates interrupt is group 1. */
+ uint64_t hw : 1; /**< [ 61: 61](R/W) Virtual interrupt corresponds to physical interrupt. */
+ uint64_t state : 2; /**< [ 63: 62](R/W) 0x0 = Invalid.
+ 0x1 = Pending.
+ 0x2 = Active.
+ 0x3 = Pending and Active. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_lrx_el2_s cn; */
+};
+typedef union bdk_ap_ich_lrx_el2 bdk_ap_ich_lrx_el2_t;
+
+static inline uint64_t BDK_AP_ICH_LRX_EL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_LRX_EL2(unsigned long a)
+{
+ if (a<=15)
+ return 0x3040c0c0000ll + 0x100ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_ICH_LRX_EL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICH_LRX_EL2(a) bdk_ap_ich_lrx_el2_t
+#define bustype_BDK_AP_ICH_LRX_EL2(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_LRX_EL2(a) "AP_ICH_LRX_EL2"
+#define busnum_BDK_AP_ICH_LRX_EL2(a) (a)
+#define arguments_BDK_AP_ICH_LRX_EL2(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_lrc#
+ *
+ * AP List 32-bit Registers
+ * Provides interrupt context information for the virtual CPU interface. Only used for Aarch32.
+ * Here for disassembly only.
+ */
+union bdk_ap_ich_lrcx
+{
+ uint32_t u;
+ struct bdk_ap_ich_lrcx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_lrcx_s cn; */
+};
+typedef union bdk_ap_ich_lrcx bdk_ap_ich_lrcx_t;
+
+static inline uint64_t BDK_AP_ICH_LRCX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_LRCX(unsigned long a)
+{
+ if (a<=15)
+ return 0x3040c0e0000ll + 0x100ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_ICH_LRCX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICH_LRCX(a) bdk_ap_ich_lrcx_t
+#define bustype_BDK_AP_ICH_LRCX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_LRCX(a) "AP_ICH_LRCX"
+#define busnum_BDK_AP_ICH_LRCX(a) (a)
+#define arguments_BDK_AP_ICH_LRCX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_misr_el2
+ *
+ * AP Interrupt Controller Maintenance Interrupt State Register
+ * Indicates which maintenance interrupts are asserted.
+ */
+union bdk_ap_ich_misr_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_misr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t vsei : 1; /**< [ 8: 8](RO) Virtual SEI. Set to 1 when a condition that would result in
+ generation of an SEI is detected during a virtual access to an
+ ICC_* system register. */
+ uint32_t vgrp1d : 1; /**< [ 7: 7](RO) Disabled Group 1 maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[VGrp1DIE] is 1 and
+ AP_ICH_VMCR_EL2[VMGrp1En] is 0. */
+ uint32_t vgrp1e : 1; /**< [ 6: 6](RO) Enabled Group 1 maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[VGrp1EIE] is 1 and
+ AP_ICH_VMCR_EL2[VMGrp1En] is 1. */
+ uint32_t vgrp0d : 1; /**< [ 5: 5](RO) Disabled Group 0 maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[VGrp0DIE] is 1 and
+ AP_ICH_VMCR_EL2[VMGrp0En] is 0. */
+ uint32_t vgrp0e : 1; /**< [ 4: 4](RO) Enabled Group 0 maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[VGrp0EIE] is 1 and
+ AP_ICH_VMCR_EL2[VMGrp0En] is 1. */
+ uint32_t np : 1; /**< [ 3: 3](RO) No Pending maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[NPIE] is 1 and no List register
+ is in pending state. */
+ uint32_t lrenp : 1; /**< [ 2: 2](RO) List Register Entry Not Present maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[LRENPIE] is 1 and
+ AP_ICH_HCR_EL2[EOIcount] is nonzero. */
+ uint32_t u : 1; /**< [ 1: 1](RO) Underflow maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[UIE] is 1 and if none, or only
+ one, of the List register entries are marked as a valid
+ interrupt, that is, if the corresponding ICH_LR\<n\>_EL2[State]
+ bits do not equal0x0 */
+ uint32_t eoi : 1; /**< [ 0: 0](RO) EOI maintenance interrupt.
+ Asserted whenever at least one List register is asserting an
+ EOI interrupt. That is, when at least one bit in ICH_EISR0_EL1
+ or ICH_EISR1_EL1 is 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t eoi : 1; /**< [ 0: 0](RO) EOI maintenance interrupt.
+ Asserted whenever at least one List register is asserting an
+ EOI interrupt. That is, when at least one bit in ICH_EISR0_EL1
+ or ICH_EISR1_EL1 is 1. */
+ uint32_t u : 1; /**< [ 1: 1](RO) Underflow maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[UIE] is 1 and if none, or only
+ one, of the List register entries are marked as a valid
+ interrupt, that is, if the corresponding ICH_LR\<n\>_EL2[State]
+ bits do not equal0x0 */
+ uint32_t lrenp : 1; /**< [ 2: 2](RO) List Register Entry Not Present maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[LRENPIE] is 1 and
+ AP_ICH_HCR_EL2[EOIcount] is nonzero. */
+ uint32_t np : 1; /**< [ 3: 3](RO) No Pending maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[NPIE] is 1 and no List register
+ is in pending state. */
+ uint32_t vgrp0e : 1; /**< [ 4: 4](RO) Enabled Group 0 maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[VGrp0EIE] is 1 and
+ AP_ICH_VMCR_EL2[VMGrp0En] is 1. */
+ uint32_t vgrp0d : 1; /**< [ 5: 5](RO) Disabled Group 0 maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[VGrp0DIE] is 1 and
+ AP_ICH_VMCR_EL2[VMGrp0En] is 0. */
+ uint32_t vgrp1e : 1; /**< [ 6: 6](RO) Enabled Group 1 maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[VGrp1EIE] is 1 and
+ AP_ICH_VMCR_EL2[VMGrp1En] is 1. */
+ uint32_t vgrp1d : 1; /**< [ 7: 7](RO) Disabled Group 1 maintenance interrupt.
+ Asserted whenever AP_ICH_HCR_EL2[VGrp1DIE] is 1 and
+ AP_ICH_VMCR_EL2[VMGrp1En] is 0. */
+ uint32_t vsei : 1; /**< [ 8: 8](RO) Virtual SEI. Set to 1 when a condition that would result in
+ generation of an SEI is detected during a virtual access to an
+ ICC_* system register. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_misr_el2_s cn; */
+};
+typedef union bdk_ap_ich_misr_el2 bdk_ap_ich_misr_el2_t;
+
+#define BDK_AP_ICH_MISR_EL2 BDK_AP_ICH_MISR_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_MISR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_MISR_EL2_FUNC(void)
+{
+ return 0x3040c0b0200ll;
+}
+
+#define typedef_BDK_AP_ICH_MISR_EL2 bdk_ap_ich_misr_el2_t
+#define bustype_BDK_AP_ICH_MISR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_MISR_EL2 "AP_ICH_MISR_EL2"
+#define busnum_BDK_AP_ICH_MISR_EL2 0
+#define arguments_BDK_AP_ICH_MISR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_vmcr_el2
+ *
+ * AP Interrupt Controller Virtual Machine Control Register
+ * Enables the hypervisor to save and restore the virtual machine
+ * view of the GIC state.
+ */
+union bdk_ap_ich_vmcr_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_vmcr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vpmr : 8; /**< [ 31: 24](R/W) Virtual Priority Mask.
+ Visible to the guest OS as AP_ICC_PMR_EL1 / GICV_PMR. */
+ uint32_t vbpr0 : 3; /**< [ 23: 21](R/W) Virtual BPR0.
+ Visible to the guest OS as AP_ICC_BPR0_EL1 / GICV_BPR. */
+ uint32_t vbpr1 : 3; /**< [ 20: 18](R/W) Virtual BPR1.
+ Visible to the guest OS as AP_ICC_BPR1_EL1 / GICV_ABPR. */
+ uint32_t reserved_10_17 : 8;
+ uint32_t veoim : 1; /**< [ 9: 9](R/W) Virtual EOImode.
+ Visible to the guest OS as AP_ICC_CTLR_EL1[EOImode] /
+ GICV_CTLR[EOImode]. */
+ uint32_t reserved_5_8 : 4;
+ uint32_t vcbpr : 1; /**< [ 4: 4](R/W) Virtual CBPR.
+ Visible to the guest OS as AP_ICC_CTLR_EL1[CBPR] / GICV_CTLR[CBPR]. */
+ uint32_t vfiqen : 1; /**< [ 3: 3](R/W) Virtual FIQ enable.
+ Visible to the guest OS as GICV_CTLR[FIQEn]. */
+ uint32_t vackctl : 1; /**< [ 2: 2](R/W) Virtual AckCtl.
+ Visible to the guest OS as GICV_CTLR[AckCtl]. */
+ uint32_t veng1 : 1; /**< [ 1: 1](R/W) Virtual group 1 interrupt enable.
+ Visible to the guest OS as AP_ICC_IGRPEN1_EL1[Enable] /
+ GICV_CTLR[EnableGrp1]. */
+ uint32_t veng0 : 1; /**< [ 0: 0](R/W) Virtual group 0 interrupt enable.
+ Visible to the guest OS as AP_ICC_IGRPEN0_EL1[Enable] /
+ GICV_CTLR[EnableGrp0]. */
+#else /* Word 0 - Little Endian */
+ uint32_t veng0 : 1; /**< [ 0: 0](R/W) Virtual group 0 interrupt enable.
+ Visible to the guest OS as AP_ICC_IGRPEN0_EL1[Enable] /
+ GICV_CTLR[EnableGrp0]. */
+ uint32_t veng1 : 1; /**< [ 1: 1](R/W) Virtual group 1 interrupt enable.
+ Visible to the guest OS as AP_ICC_IGRPEN1_EL1[Enable] /
+ GICV_CTLR[EnableGrp1]. */
+ uint32_t vackctl : 1; /**< [ 2: 2](R/W) Virtual AckCtl.
+ Visible to the guest OS as GICV_CTLR[AckCtl]. */
+ uint32_t vfiqen : 1; /**< [ 3: 3](R/W) Virtual FIQ enable.
+ Visible to the guest OS as GICV_CTLR[FIQEn]. */
+ uint32_t vcbpr : 1; /**< [ 4: 4](R/W) Virtual CBPR.
+ Visible to the guest OS as AP_ICC_CTLR_EL1[CBPR] / GICV_CTLR[CBPR]. */
+ uint32_t reserved_5_8 : 4;
+ uint32_t veoim : 1; /**< [ 9: 9](R/W) Virtual EOImode.
+ Visible to the guest OS as AP_ICC_CTLR_EL1[EOImode] /
+ GICV_CTLR[EOImode]. */
+ uint32_t reserved_10_17 : 8;
+ uint32_t vbpr1 : 3; /**< [ 20: 18](R/W) Virtual BPR1.
+ Visible to the guest OS as AP_ICC_BPR1_EL1 / GICV_ABPR. */
+ uint32_t vbpr0 : 3; /**< [ 23: 21](R/W) Virtual BPR0.
+ Visible to the guest OS as AP_ICC_BPR0_EL1 / GICV_BPR. */
+ uint32_t vpmr : 8; /**< [ 31: 24](R/W) Virtual Priority Mask.
+ Visible to the guest OS as AP_ICC_PMR_EL1 / GICV_PMR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_vmcr_el2_s cn; */
+};
+typedef union bdk_ap_ich_vmcr_el2 bdk_ap_ich_vmcr_el2_t;
+
+#define BDK_AP_ICH_VMCR_EL2 BDK_AP_ICH_VMCR_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_VMCR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_VMCR_EL2_FUNC(void)
+{
+ return 0x3040c0b0700ll;
+}
+
+#define typedef_BDK_AP_ICH_VMCR_EL2 bdk_ap_ich_vmcr_el2_t
+#define bustype_BDK_AP_ICH_VMCR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_VMCR_EL2 "AP_ICH_VMCR_EL2"
+#define busnum_BDK_AP_ICH_VMCR_EL2 0
+#define arguments_BDK_AP_ICH_VMCR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_vseir_el2
+ *
+ * AP Interrupt Controller Virtual System Error Interrupt Register
+ * Allows the hypervisor to inject a virtual SEI.
+ */
+union bdk_ap_ich_vseir_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_vseir_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t valid : 1; /**< [ 31: 31](R/W) System error interrupt valid.
+ 0 = No virtual system error exception is pending.
+ 1 = A virtual system error exception is pending for nonsecure
+ EL1. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t syndrome : 25; /**< [ 24: 0](R/W) The value that will be presented in bits [24:0] of ESR_EL1 on
+ entry into the SError exception handler. */
+#else /* Word 0 - Little Endian */
+ uint32_t syndrome : 25; /**< [ 24: 0](R/W) The value that will be presented in bits [24:0] of ESR_EL1 on
+ entry into the SError exception handler. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t valid : 1; /**< [ 31: 31](R/W) System error interrupt valid.
+ 0 = No virtual system error exception is pending.
+ 1 = A virtual system error exception is pending for nonsecure
+ EL1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ich_vseir_el2_s cn; */
+};
+typedef union bdk_ap_ich_vseir_el2 bdk_ap_ich_vseir_el2_t;
+
+#define BDK_AP_ICH_VSEIR_EL2 BDK_AP_ICH_VSEIR_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_VSEIR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_VSEIR_EL2_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x3040c090400ll;
+ __bdk_csr_fatal("AP_ICH_VSEIR_EL2", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ICH_VSEIR_EL2 bdk_ap_ich_vseir_el2_t
+#define bustype_BDK_AP_ICH_VSEIR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_VSEIR_EL2 "AP_ICH_VSEIR_EL2"
+#define busnum_BDK_AP_ICH_VSEIR_EL2 0
+#define arguments_BDK_AP_ICH_VSEIR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ich_vtr_el2
+ *
+ * AP Interrupt Controller VGIC Type Register
+ * Describes the number of implemented virtual priority bits and
+ * List registers.
+ */
+union bdk_ap_ich_vtr_el2
+{
+ uint32_t u;
+ struct bdk_ap_ich_vtr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pribits : 3; /**< [ 31: 29](RO) The number of virtual priority bits implemented, minus one. */
+ uint32_t prebits : 3; /**< [ 28: 26](RO) The number of virtual preemption bits implemented, minus one. */
+ uint32_t idbits : 3; /**< [ 25: 23](RO) The number of virtual interrupt identifier bits supported:
+ All other values are reserved.
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t seis : 1; /**< [ 22: 22](RO) SEI Support. Indicates whether the virtual CPU interface
+ supports generation of SEIs:
+ Virtual system errors may still be generated by writing to
+ AP_ICH_VSEIR_EL2 regardless of the value of this field.
+ 0 = The virtual CPU interface logic does not support generation of
+ SEIs.
+ 1 = The virtual CPU interface logic supports generation of SEIs. */
+ uint32_t a3v : 1; /**< [ 21: 21](RO) Affinity 3 Valid.
+ 0 = The virtual CPU interface logic only supports zero values of
+ Affinity 3 in SGI generation system registers.
+ 1 = The virtual CPU interface logic supports nonzero values of
+ Affinity 3 in SGI generation system registers. */
+ uint32_t reserved_20 : 1;
+ uint32_t tds : 1; /**< [ 19: 19](RO) Separate trapping of nonsecure EL1 writes supported.
+ 0 = Implementation does not support CIM()_ICH_HCR_EL2[TDIR].
+ 1 = Implementation supports CIM()_ICH_HCR_EL2[TDIR]. */
+ uint32_t reserved_5_18 : 14;
+ uint32_t listregs : 5; /**< [ 4: 0](RO) The number of implemented List registers, minus one. */
+#else /* Word 0 - Little Endian */
+ uint32_t listregs : 5; /**< [ 4: 0](RO) The number of implemented List registers, minus one. */
+ uint32_t reserved_5_18 : 14;
+ uint32_t tds : 1; /**< [ 19: 19](RO) Separate trapping of nonsecure EL1 writes supported.
+ 0 = Implementation does not support CIM()_ICH_HCR_EL2[TDIR].
+ 1 = Implementation supports CIM()_ICH_HCR_EL2[TDIR]. */
+ uint32_t reserved_20 : 1;
+ uint32_t a3v : 1; /**< [ 21: 21](RO) Affinity 3 Valid.
+ 0 = The virtual CPU interface logic only supports zero values of
+ Affinity 3 in SGI generation system registers.
+ 1 = The virtual CPU interface logic supports nonzero values of
+ Affinity 3 in SGI generation system registers. */
+ uint32_t seis : 1; /**< [ 22: 22](RO) SEI Support. Indicates whether the virtual CPU interface
+ supports generation of SEIs:
+ Virtual system errors may still be generated by writing to
+ AP_ICH_VSEIR_EL2 regardless of the value of this field.
+ 0 = The virtual CPU interface logic does not support generation of
+ SEIs.
+ 1 = The virtual CPU interface logic supports generation of SEIs. */
+ uint32_t idbits : 3; /**< [ 25: 23](RO) The number of virtual interrupt identifier bits supported:
+ All other values are reserved.
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t prebits : 3; /**< [ 28: 26](RO) The number of virtual preemption bits implemented, minus one. */
+ uint32_t pribits : 3; /**< [ 31: 29](RO) The number of virtual priority bits implemented, minus one. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ich_vtr_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pribits : 3; /**< [ 31: 29](RO) The number of virtual priority bits implemented, minus one. */
+ uint32_t prebits : 3; /**< [ 28: 26](RO) The number of virtual preemption bits implemented, minus one. */
+ uint32_t idbits : 3; /**< [ 25: 23](RO) The number of virtual interrupt identifier bits supported:
+ All other values are reserved.
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t seis : 1; /**< [ 22: 22](RO) SEI Support. Indicates whether the virtual CPU interface
+ supports generation of SEIs:
+ Virtual system errors may still be generated by writing to
+ AP_ICH_VSEIR_EL2 regardless of the value of this field.
+ 0 = The virtual CPU interface logic does not support generation of
+ SEIs.
+ 1 = The virtual CPU interface logic supports generation of SEIs. */
+ uint32_t a3v : 1; /**< [ 21: 21](RO) Affinity 3 Valid.
+ 0 = The virtual CPU interface logic only supports zero values of
+ Affinity 3 in SGI generation system registers.
+ 1 = The virtual CPU interface logic supports nonzero values of
+ Affinity 3 in SGI generation system registers. */
+ uint32_t reserved_5_20 : 16;
+ uint32_t listregs : 5; /**< [ 4: 0](RO) The number of implemented List registers, minus one. */
+#else /* Word 0 - Little Endian */
+ uint32_t listregs : 5; /**< [ 4: 0](RO) The number of implemented List registers, minus one. */
+ uint32_t reserved_5_20 : 16;
+ uint32_t a3v : 1; /**< [ 21: 21](RO) Affinity 3 Valid.
+ 0 = The virtual CPU interface logic only supports zero values of
+ Affinity 3 in SGI generation system registers.
+ 1 = The virtual CPU interface logic supports nonzero values of
+ Affinity 3 in SGI generation system registers. */
+ uint32_t seis : 1; /**< [ 22: 22](RO) SEI Support. Indicates whether the virtual CPU interface
+ supports generation of SEIs:
+ Virtual system errors may still be generated by writing to
+ AP_ICH_VSEIR_EL2 regardless of the value of this field.
+ 0 = The virtual CPU interface logic does not support generation of
+ SEIs.
+ 1 = The virtual CPU interface logic supports generation of SEIs. */
+ uint32_t idbits : 3; /**< [ 25: 23](RO) The number of virtual interrupt identifier bits supported:
+ All other values are reserved.
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t prebits : 3; /**< [ 28: 26](RO) The number of virtual preemption bits implemented, minus one. */
+ uint32_t pribits : 3; /**< [ 31: 29](RO) The number of virtual priority bits implemented, minus one. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_ich_vtr_el2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pribits : 3; /**< [ 31: 29](RO) The number of virtual priority bits implemented, minus one. */
+ uint32_t prebits : 3; /**< [ 28: 26](RO) The number of virtual preemption bits implemented, minus one. */
+ uint32_t idbits : 3; /**< [ 25: 23](RO) The number of virtual interrupt identifier bits supported:
+ All other values are reserved.
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t seis : 1; /**< [ 22: 22](RO) SEI Support. Indicates whether the virtual CPU interface
+ supports generation of SEIs:
+ 0 = The virtual CPU interface logic does not support generation of
+ SEIs.
+ 1 = The virtual CPU interface logic supports generation of SEIs. */
+ uint32_t a3v : 1; /**< [ 21: 21](RO) Affinity 3 Valid.
+ 0 = The virtual CPU interface logic only supports zero values of
+ Affinity 3 in SGI generation system registers.
+ 1 = The virtual CPU interface logic supports nonzero values of
+ Affinity 3 in SGI generation system registers. */
+ uint32_t reserved_20 : 1;
+ uint32_t tds : 1; /**< [ 19: 19](RO) Separate trapping of nonsecure EL1 writes supported.
+ 0 = Implementation does not support CIM()_ICH_HCR_EL2[TDIR].
+ 1 = Implementation supports CIM()_ICH_HCR_EL2[TDIR]. */
+ uint32_t reserved_5_18 : 14;
+ uint32_t listregs : 5; /**< [ 4: 0](RO) The number of implemented List registers, minus one. */
+#else /* Word 0 - Little Endian */
+ uint32_t listregs : 5; /**< [ 4: 0](RO) The number of implemented List registers, minus one. */
+ uint32_t reserved_5_18 : 14;
+ uint32_t tds : 1; /**< [ 19: 19](RO) Separate trapping of nonsecure EL1 writes supported.
+ 0 = Implementation does not support CIM()_ICH_HCR_EL2[TDIR].
+ 1 = Implementation supports CIM()_ICH_HCR_EL2[TDIR]. */
+ uint32_t reserved_20 : 1;
+ uint32_t a3v : 1; /**< [ 21: 21](RO) Affinity 3 Valid.
+ 0 = The virtual CPU interface logic only supports zero values of
+ Affinity 3 in SGI generation system registers.
+ 1 = The virtual CPU interface logic supports nonzero values of
+ Affinity 3 in SGI generation system registers. */
+ uint32_t seis : 1; /**< [ 22: 22](RO) SEI Support. Indicates whether the virtual CPU interface
+ supports generation of SEIs:
+ 0 = The virtual CPU interface logic does not support generation of
+ SEIs.
+ 1 = The virtual CPU interface logic supports generation of SEIs. */
+ uint32_t idbits : 3; /**< [ 25: 23](RO) The number of virtual interrupt identifier bits supported:
+ All other values are reserved.
+ 0x0 = 16 bits.
+ 0x1 = 24 bits. */
+ uint32_t prebits : 3; /**< [ 28: 26](RO) The number of virtual preemption bits implemented, minus one. */
+ uint32_t pribits : 3; /**< [ 31: 29](RO) The number of virtual priority bits implemented, minus one. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_ich_vtr_el2 bdk_ap_ich_vtr_el2_t;
+
+#define BDK_AP_ICH_VTR_EL2 BDK_AP_ICH_VTR_EL2_FUNC()
+static inline uint64_t BDK_AP_ICH_VTR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ICH_VTR_EL2_FUNC(void)
+{
+ return 0x3040c0b0100ll;
+}
+
+#define typedef_BDK_AP_ICH_VTR_EL2 bdk_ap_ich_vtr_el2_t
+#define bustype_BDK_AP_ICH_VTR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ICH_VTR_EL2 "AP_ICH_VTR_EL2"
+#define busnum_BDK_AP_ICH_VTR_EL2 0
+#define arguments_BDK_AP_ICH_VTR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64afr#_el1_res0
+ *
+ * INTERNAL: AP AArch64 Reserved Register
+ *
+ * Reserved for future expansion of information about the
+ * implementation defined features of the processor in AArch64.
+ * ARM doesn't actually assign a name to these registers, so
+ * for CNXXXX a made up one.
+ */
+union bdk_ap_id_aa64afrx_el1_res0
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64afrx_el1_res0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64afrx_el1_res0_s cn; */
+};
+typedef union bdk_ap_id_aa64afrx_el1_res0 bdk_ap_id_aa64afrx_el1_res0_t;
+
+static inline uint64_t BDK_AP_ID_AA64AFRX_EL1_RES0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64AFRX_EL1_RES0(unsigned long a)
+{
+ if ((a>=2)&&(a<=3))
+ return 0x30000050400ll + 0x100ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_ID_AA64AFRX_EL1_RES0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_AA64AFRX_EL1_RES0(a) bdk_ap_id_aa64afrx_el1_res0_t
+#define bustype_BDK_AP_ID_AA64AFRX_EL1_RES0(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64AFRX_EL1_RES0(a) "AP_ID_AA64AFRX_EL1_RES0"
+#define busnum_BDK_AP_ID_AA64AFRX_EL1_RES0(a) (a)
+#define arguments_BDK_AP_ID_AA64AFRX_EL1_RES0(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64afr0_el1
+ *
+ * AP AArch64 Auxiliary Feature Register 0
+ * Provides information about the implementation defined features
+ * of the processor in AArch64.
+ */
+union bdk_ap_id_aa64afr0_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64afr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64afr0_el1_s cn; */
+};
+typedef union bdk_ap_id_aa64afr0_el1 bdk_ap_id_aa64afr0_el1_t;
+
+#define BDK_AP_ID_AA64AFR0_EL1 BDK_AP_ID_AA64AFR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64AFR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64AFR0_EL1_FUNC(void)
+{
+ return 0x30000050400ll;
+}
+
+#define typedef_BDK_AP_ID_AA64AFR0_EL1 bdk_ap_id_aa64afr0_el1_t
+#define bustype_BDK_AP_ID_AA64AFR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64AFR0_EL1 "AP_ID_AA64AFR0_EL1"
+#define busnum_BDK_AP_ID_AA64AFR0_EL1 0
+#define arguments_BDK_AP_ID_AA64AFR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64afr1_el1
+ *
+ * AP AArch64 Auxiliary Feature Register 1
+ * Reserved for future expansion of information about the
+ * implementation defined features of the processor in AArch64.
+ */
+union bdk_ap_id_aa64afr1_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64afr1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64afr1_el1_s cn; */
+};
+typedef union bdk_ap_id_aa64afr1_el1 bdk_ap_id_aa64afr1_el1_t;
+
+#define BDK_AP_ID_AA64AFR1_EL1 BDK_AP_ID_AA64AFR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64AFR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64AFR1_EL1_FUNC(void)
+{
+ return 0x30000050500ll;
+}
+
+#define typedef_BDK_AP_ID_AA64AFR1_EL1 bdk_ap_id_aa64afr1_el1_t
+#define bustype_BDK_AP_ID_AA64AFR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64AFR1_EL1 "AP_ID_AA64AFR1_EL1"
+#define busnum_BDK_AP_ID_AA64AFR1_EL1 0
+#define arguments_BDK_AP_ID_AA64AFR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64dfr#_el1_res0
+ *
+ * INTERNAL: AP AArch64 Reserved Register
+ *
+ * Reserved for future expansion of top level information about
+ * the debug system in AArch64. ARM doesn't actually assign
+ * a name to these registers, so CNXXXX made up one.
+ */
+union bdk_ap_id_aa64dfrx_el1_res0
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64dfrx_el1_res0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64dfrx_el1_res0_s cn; */
+};
+typedef union bdk_ap_id_aa64dfrx_el1_res0 bdk_ap_id_aa64dfrx_el1_res0_t;
+
+static inline uint64_t BDK_AP_ID_AA64DFRX_EL1_RES0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64DFRX_EL1_RES0(unsigned long a)
+{
+ if ((a>=2)&&(a<=3))
+ return 0x30000050000ll + 0x100ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_ID_AA64DFRX_EL1_RES0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_AA64DFRX_EL1_RES0(a) bdk_ap_id_aa64dfrx_el1_res0_t
+#define bustype_BDK_AP_ID_AA64DFRX_EL1_RES0(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64DFRX_EL1_RES0(a) "AP_ID_AA64DFRX_EL1_RES0"
+#define busnum_BDK_AP_ID_AA64DFRX_EL1_RES0(a) (a)
+#define arguments_BDK_AP_ID_AA64DFRX_EL1_RES0(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64dfr0_el1
+ *
+ * AP AArch64 Debug Feature Register 0
+ * This register provides top level information about the debug system in AArch64.
+ */
+union bdk_ap_id_aa64dfr0_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64dfr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t pmsver : 4; /**< [ 35: 32](RO) Statistical profiling extension version.
+ 0x0 = No statistical profiling extension.
+ 0x1 = Version 1 of the statistical profiling extension present.
+
+ All other values are reserved. Reserved values might be defined in a future
+ version of the architecture. */
+ uint64_t ctx_cmps : 4; /**< [ 31: 28](RO) Number of breakpoints that are context-aware, minus 1. These
+ are the highest numbered breakpoints.
+
+ In CNXXXX all breakpoints are context-aware. */
+ uint64_t reserved_24_27 : 4;
+ uint64_t wrps : 4; /**< [ 23: 20](RO) Number of watchpoints, minus 1. The value of 0b0000 is reserved.
+
+ In CNXXXX 4 watchpoints. */
+ uint64_t reserved_16_19 : 4;
+ uint64_t brps : 4; /**< [ 15: 12](RO) Number of breakpoints, minus 1. The value of 0b0000 is reserved.
+
+ In CNXXXX 6 breakpoints. */
+ uint64_t pmuver : 4; /**< [ 11: 8](RO) Performance Monitors extension version. Indicates whether
+ system register interface to Performance Monitors extension is
+ implemented.
+ All other values are reserved.
+ 0x0 = Performance Monitors extension system registers not
+ implemented.
+ 0x1 = Performance Monitors extension system registers implemented,
+ PMUv3.
+ 0x4 = v8.1: Performance Monitors extension system registers
+ implemented, PMUv3 with 16bit evtCount.
+ 0xF = implementation defined form of performance monitors
+ supported, PMUv3 not supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x4, else 0x1. */
+ uint64_t tracever : 4; /**< [ 7: 4](RO) Trace extension. Indicates whether system register interface to the trace
+ extension is implemented.
+ All other values are reserved.
+ 0x0 = Trace extension system registers not implemented.
+ 0x1 = Trace extension system registers implemented. */
+ uint64_t debugver : 4; /**< [ 3: 0](RO) Debug architecture version. Indicates presence of ARMv8 debug
+ architecture.
+ All other values are reserved.
+ 0x6 = ARMv8 debug architecture.
+ 0x7 = ARMv8.1 debug architecture.
+ 0x8 = ARMv8.2 debug architecture.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x7, else 0x6. */
+#else /* Word 0 - Little Endian */
+ uint64_t debugver : 4; /**< [ 3: 0](RO) Debug architecture version. Indicates presence of ARMv8 debug
+ architecture.
+ All other values are reserved.
+ 0x6 = ARMv8 debug architecture.
+ 0x7 = ARMv8.1 debug architecture.
+ 0x8 = ARMv8.2 debug architecture.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x7, else 0x6. */
+ uint64_t tracever : 4; /**< [ 7: 4](RO) Trace extension. Indicates whether system register interface to the trace
+ extension is implemented.
+ All other values are reserved.
+ 0x0 = Trace extension system registers not implemented.
+ 0x1 = Trace extension system registers implemented. */
+ uint64_t pmuver : 4; /**< [ 11: 8](RO) Performance Monitors extension version. Indicates whether
+ system register interface to Performance Monitors extension is
+ implemented.
+ All other values are reserved.
+ 0x0 = Performance Monitors extension system registers not
+ implemented.
+ 0x1 = Performance Monitors extension system registers implemented,
+ PMUv3.
+ 0x4 = v8.1: Performance Monitors extension system registers
+ implemented, PMUv3 with 16bit evtCount.
+ 0xF = implementation defined form of performance monitors
+ supported, PMUv3 not supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x4, else 0x1. */
+ uint64_t brps : 4; /**< [ 15: 12](RO) Number of breakpoints, minus 1. The value of 0b0000 is reserved.
+
+ In CNXXXX 6 breakpoints. */
+ uint64_t reserved_16_19 : 4;
+ uint64_t wrps : 4; /**< [ 23: 20](RO) Number of watchpoints, minus 1. The value of 0b0000 is reserved.
+
+ In CNXXXX 4 watchpoints. */
+ uint64_t reserved_24_27 : 4;
+ uint64_t ctx_cmps : 4; /**< [ 31: 28](RO) Number of breakpoints that are context-aware, minus 1. These
+ are the highest numbered breakpoints.
+
+ In CNXXXX all breakpoints are context-aware. */
+ uint64_t pmsver : 4; /**< [ 35: 32](RO) Statistical profiling extension version.
+ 0x0 = No statistical profiling extension.
+ 0x1 = Version 1 of the statistical profiling extension present.
+
+ All other values are reserved. Reserved values might be defined in a future
+ version of the architecture. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_id_aa64dfr0_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ctx_cmps : 4; /**< [ 31: 28](RO) Number of breakpoints that are context-aware, minus 1. These
+ are the highest numbered breakpoints.
+
+ In CNXXXX all breakpoints are context-aware. */
+ uint64_t reserved_24_27 : 4;
+ uint64_t wrps : 4; /**< [ 23: 20](RO) Number of watchpoints, minus 1. The value of 0b0000 is reserved.
+
+ In CNXXXX 4 watchpoints. */
+ uint64_t reserved_16_19 : 4;
+ uint64_t brps : 4; /**< [ 15: 12](RO) Number of breakpoints, minus 1. The value of 0b0000 is reserved.
+
+ In CNXXXX 6 breakpoints. */
+ uint64_t pmuver : 4; /**< [ 11: 8](RO) Performance Monitors extension version. Indicates whether
+ system register interface to Performance Monitors extension is
+ implemented.
+ All other values are reserved.
+ 0x0 = Performance Monitors extension system registers not
+ implemented.
+ 0x1 = Performance Monitors extension system registers implemented,
+ PMUv3.
+ 0x4 = v8.1: Performance Monitors extension system registers
+ implemented, PMUv3 with 16bit evtCount.
+ 0xF = implementation defined form of performance monitors
+ supported, PMUv3 not supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x4, else 0x1. */
+ uint64_t tracever : 4; /**< [ 7: 4](RO) Trace extension. Indicates whether system register interface to the trace
+ extension is implemented.
+ All other values are reserved.
+ 0x0 = Trace extension system registers not implemented.
+ 0x1 = Trace extension system registers implemented. */
+ uint64_t debugver : 4; /**< [ 3: 0](RO) Debug architecture version. Indicates presence of ARMv8 debug
+ architecture.
+ All other values are reserved.
+ 0x6 = ARMv8 debug architecture.
+ 0x7 = ARMv8.1 debug architecture.
+ 0x8 = ARMv8.2 debug architecture.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x7, else 0x6. */
+#else /* Word 0 - Little Endian */
+ uint64_t debugver : 4; /**< [ 3: 0](RO) Debug architecture version. Indicates presence of ARMv8 debug
+ architecture.
+ All other values are reserved.
+ 0x6 = ARMv8 debug architecture.
+ 0x7 = ARMv8.1 debug architecture.
+ 0x8 = ARMv8.2 debug architecture.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x7, else 0x6. */
+ uint64_t tracever : 4; /**< [ 7: 4](RO) Trace extension. Indicates whether system register interface to the trace
+ extension is implemented.
+ All other values are reserved.
+ 0x0 = Trace extension system registers not implemented.
+ 0x1 = Trace extension system registers implemented. */
+ uint64_t pmuver : 4; /**< [ 11: 8](RO) Performance Monitors extension version. Indicates whether
+ system register interface to Performance Monitors extension is
+ implemented.
+ All other values are reserved.
+ 0x0 = Performance Monitors extension system registers not
+ implemented.
+ 0x1 = Performance Monitors extension system registers implemented,
+ PMUv3.
+ 0x4 = v8.1: Performance Monitors extension system registers
+ implemented, PMUv3 with 16bit evtCount.
+ 0xF = implementation defined form of performance monitors
+ supported, PMUv3 not supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x4, else 0x1. */
+ uint64_t brps : 4; /**< [ 15: 12](RO) Number of breakpoints, minus 1. The value of 0b0000 is reserved.
+
+ In CNXXXX 6 breakpoints. */
+ uint64_t reserved_16_19 : 4;
+ uint64_t wrps : 4; /**< [ 23: 20](RO) Number of watchpoints, minus 1. The value of 0b0000 is reserved.
+
+ In CNXXXX 4 watchpoints. */
+ uint64_t reserved_24_27 : 4;
+ uint64_t ctx_cmps : 4; /**< [ 31: 28](RO) Number of breakpoints that are context-aware, minus 1. These
+ are the highest numbered breakpoints.
+
+ In CNXXXX all breakpoints are context-aware. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_id_aa64dfr0_el1_s cn9; */
+};
+typedef union bdk_ap_id_aa64dfr0_el1 bdk_ap_id_aa64dfr0_el1_t;
+
+#define BDK_AP_ID_AA64DFR0_EL1 BDK_AP_ID_AA64DFR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64DFR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64DFR0_EL1_FUNC(void)
+{
+ return 0x30000050000ll;
+}
+
+#define typedef_BDK_AP_ID_AA64DFR0_EL1 bdk_ap_id_aa64dfr0_el1_t
+#define bustype_BDK_AP_ID_AA64DFR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64DFR0_EL1 "AP_ID_AA64DFR0_EL1"
+#define busnum_BDK_AP_ID_AA64DFR0_EL1 0
+#define arguments_BDK_AP_ID_AA64DFR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64dfr1_el1
+ *
+ * AP AArch64 Debug Feature Register 1
+ * Reserved for future expansion of top level information about
+ * the debug system in AArch64.
+ */
+union bdk_ap_id_aa64dfr1_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64dfr1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64dfr1_el1_s cn; */
+};
+typedef union bdk_ap_id_aa64dfr1_el1 bdk_ap_id_aa64dfr1_el1_t;
+
+#define BDK_AP_ID_AA64DFR1_EL1 BDK_AP_ID_AA64DFR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64DFR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64DFR1_EL1_FUNC(void)
+{
+ return 0x30000050100ll;
+}
+
+#define typedef_BDK_AP_ID_AA64DFR1_EL1 bdk_ap_id_aa64dfr1_el1_t
+#define bustype_BDK_AP_ID_AA64DFR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64DFR1_EL1 "AP_ID_AA64DFR1_EL1"
+#define busnum_BDK_AP_ID_AA64DFR1_EL1 0
+#define arguments_BDK_AP_ID_AA64DFR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64isar#_el1_res0
+ *
+ * INTERNAL: AP AArch64 Reserved Register
+ *
+ * Reserved for future expansion of the information about the
+ * instruction sets implemented by the processor in AArch64.
+ * ARM doesn't actually assign a name to these registers, so
+ * CNXXXX made up one.
+ */
+union bdk_ap_id_aa64isarx_el1_res0
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64isarx_el1_res0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64isarx_el1_res0_s cn; */
+};
+typedef union bdk_ap_id_aa64isarx_el1_res0 bdk_ap_id_aa64isarx_el1_res0_t;
+
+static inline uint64_t BDK_AP_ID_AA64ISARX_EL1_RES0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64ISARX_EL1_RES0(unsigned long a)
+{
+ if ((a>=2)&&(a<=7))
+ return 0x30000060000ll + 0x100ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_ID_AA64ISARX_EL1_RES0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_AA64ISARX_EL1_RES0(a) bdk_ap_id_aa64isarx_el1_res0_t
+#define bustype_BDK_AP_ID_AA64ISARX_EL1_RES0(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64ISARX_EL1_RES0(a) "AP_ID_AA64ISARX_EL1_RES0"
+#define busnum_BDK_AP_ID_AA64ISARX_EL1_RES0(a) (a)
+#define arguments_BDK_AP_ID_AA64ISARX_EL1_RES0(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64isar0_el1
+ *
+ * AP AArch64 Instruction Set Attribute Register 0
+ * This register provides information about the instructions implemented by the
+ * processor in AArch64.
+ */
+union bdk_ap_id_aa64isar0_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64isar0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t sqrdml : 4; /**< [ 31: 28](RO) 0x0 = SQRDMLAH and SQRDMLSH not supported in AArch64.
+ 0x1 = SQRDMLAH and SQRDMLSH supported in AArch64.
+ All other values reserved.
+
+ In CNXXXX, 0x1 if AP_CVMCTL_EL1[ENABLE_V81] is set, else 0x0. */
+ uint64_t reserved_24_27 : 4;
+ uint64_t atomic : 4; /**< [ 23: 20](RO) Atomic instructions in AArch64
+ 0x0 = No Atomic instructions implemented.
+ 0x1 = Reserved.
+ 0x2 = LDADD, LDCLR, LDEOR, LDSET, LDSMAX, LDSMIN, LDUMAX, LDUMIN, CAS, CASP,
+ SWP instructions implemented.
+
+ For CNXXXX, 0x2. */
+ uint64_t crc32 : 4; /**< [ 19: 16](RO) CRC32 instructions in AArch64.
+ All other values are reserved.
+ This field must have the same value as ID_ISAR5[CRC32]. The
+ architecture requires that if CRC32 is supported in one
+ Execution state, it must be supported in both Execution
+ states.
+ 0x0 = No CRC32 instructions implemented.
+ 0x1 = CRC32B, CRC32H, CRC32W, CRC32X, CRC32CB, CRC32CH, CRC32CW, and
+ CRC32CX instructions implemented.
+
+ In CNXXXX, supported unless crypto disabled by MIO_FUS_DAT2[NOCRYPTO]. */
+ uint64_t sha2 : 4; /**< [ 15: 12](RO) SHA2 instructions in AArch64.
+ All other values are reserved.
+ 0x0 = No SHA2 instructions implemented.
+ 0x1 = SHA256H, SHA256H2, SHA256SU0, and SHA256SU1 instructions
+ implemented.
+
+ In CNXXXX, supported unless crypto disabled by MIO_FUS_DAT2[NOCRYPTO]. */
+ uint64_t sha1 : 4; /**< [ 11: 8](RO) SHA1 instructions in AArch64.
+ All other values are reserved.
+ 0x0 = No SHA1 instructions implemented.
+ 0x1 = SHA1C, SHA1P, SHA1M, SHA1H, SHA1SU0, and SHA1SU1 instructions
+ implemented.
+
+ In CNXXXX, supported unless crypto disabled by MIO_FUS_DAT2[NOCRYPTO]. */
+ uint64_t aes : 4; /**< [ 7: 4](RO) AES instructions in AArch64.
+ 0x0 = No AES instructions implemented.
+ 0x1 = AESE, AESD, AESMC, and AESIMC instructions implemented.
+ 0x2 = As for 0x1, plus PMULL/PMULL2 instructions operate on
+ 64-bit data quantities.
+
+ In CNXXXX, supported with PMULL/PMULL2. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t aes : 4; /**< [ 7: 4](RO) AES instructions in AArch64.
+ 0x0 = No AES instructions implemented.
+ 0x1 = AESE, AESD, AESMC, and AESIMC instructions implemented.
+ 0x2 = As for 0x1, plus PMULL/PMULL2 instructions operate on
+ 64-bit data quantities.
+
+ In CNXXXX, supported with PMULL/PMULL2. */
+ uint64_t sha1 : 4; /**< [ 11: 8](RO) SHA1 instructions in AArch64.
+ All other values are reserved.
+ 0x0 = No SHA1 instructions implemented.
+ 0x1 = SHA1C, SHA1P, SHA1M, SHA1H, SHA1SU0, and SHA1SU1 instructions
+ implemented.
+
+ In CNXXXX, supported unless crypto disabled by MIO_FUS_DAT2[NOCRYPTO]. */
+ uint64_t sha2 : 4; /**< [ 15: 12](RO) SHA2 instructions in AArch64.
+ All other values are reserved.
+ 0x0 = No SHA2 instructions implemented.
+ 0x1 = SHA256H, SHA256H2, SHA256SU0, and SHA256SU1 instructions
+ implemented.
+
+ In CNXXXX, supported unless crypto disabled by MIO_FUS_DAT2[NOCRYPTO]. */
+ uint64_t crc32 : 4; /**< [ 19: 16](RO) CRC32 instructions in AArch64.
+ All other values are reserved.
+ This field must have the same value as ID_ISAR5[CRC32]. The
+ architecture requires that if CRC32 is supported in one
+ Execution state, it must be supported in both Execution
+ states.
+ 0x0 = No CRC32 instructions implemented.
+ 0x1 = CRC32B, CRC32H, CRC32W, CRC32X, CRC32CB, CRC32CH, CRC32CW, and
+ CRC32CX instructions implemented.
+
+ In CNXXXX, supported unless crypto disabled by MIO_FUS_DAT2[NOCRYPTO]. */
+ uint64_t atomic : 4; /**< [ 23: 20](RO) Atomic instructions in AArch64
+ 0x0 = No Atomic instructions implemented.
+ 0x1 = Reserved.
+ 0x2 = LDADD, LDCLR, LDEOR, LDSET, LDSMAX, LDSMIN, LDUMAX, LDUMIN, CAS, CASP,
+ SWP instructions implemented.
+
+ For CNXXXX, 0x2. */
+ uint64_t reserved_24_27 : 4;
+ uint64_t sqrdml : 4; /**< [ 31: 28](RO) 0x0 = SQRDMLAH and SQRDMLSH not supported in AArch64.
+ 0x1 = SQRDMLAH and SQRDMLSH supported in AArch64.
+ All other values reserved.
+
+ In CNXXXX, 0x1 if AP_CVMCTL_EL1[ENABLE_V81] is set, else 0x0. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64isar0_el1_s cn; */
+};
+typedef union bdk_ap_id_aa64isar0_el1 bdk_ap_id_aa64isar0_el1_t;
+
+#define BDK_AP_ID_AA64ISAR0_EL1 BDK_AP_ID_AA64ISAR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64ISAR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64ISAR0_EL1_FUNC(void)
+{
+ return 0x30000060000ll;
+}
+
+#define typedef_BDK_AP_ID_AA64ISAR0_EL1 bdk_ap_id_aa64isar0_el1_t
+#define bustype_BDK_AP_ID_AA64ISAR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64ISAR0_EL1 "AP_ID_AA64ISAR0_EL1"
+#define busnum_BDK_AP_ID_AA64ISAR0_EL1 0
+#define arguments_BDK_AP_ID_AA64ISAR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64isar1_el1
+ *
+ * AP AArch64 Instruction Set Attribute Register 1
+ * Reserved for future expansion of the information about the
+ * instruction sets implemented by the processor in AArch64.
+ */
+union bdk_ap_id_aa64isar1_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64isar1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t dpb : 4; /**< [ 3: 0](RO) 0x0 = DC CVAP not supported in AArch64.
+ 0x1 = DC CVAP supported in AArch64.
+
+ All other values reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t dpb : 4; /**< [ 3: 0](RO) 0x0 = DC CVAP not supported in AArch64.
+ 0x1 = DC CVAP supported in AArch64.
+
+ All other values reserved. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_id_aa64isar1_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_id_aa64isar1_el1_s cn9; */
+};
+typedef union bdk_ap_id_aa64isar1_el1 bdk_ap_id_aa64isar1_el1_t;
+
+#define BDK_AP_ID_AA64ISAR1_EL1 BDK_AP_ID_AA64ISAR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64ISAR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64ISAR1_EL1_FUNC(void)
+{
+ return 0x30000060100ll;
+}
+
+#define typedef_BDK_AP_ID_AA64ISAR1_EL1 bdk_ap_id_aa64isar1_el1_t
+#define bustype_BDK_AP_ID_AA64ISAR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64ISAR1_EL1 "AP_ID_AA64ISAR1_EL1"
+#define busnum_BDK_AP_ID_AA64ISAR1_EL1 0
+#define arguments_BDK_AP_ID_AA64ISAR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64mmfr#_el1_res0
+ *
+ * INTERNAL: AP AArch64 Reserved Register
+ *
+ * Reserved for future expansion of the information about the
+ * implemented memory model and memory management support in
+ * AArch64. ARM doesn't actually assign a name to these
+ * registers, so CNXXXX made up one.
+ */
+union bdk_ap_id_aa64mmfrx_el1_res0
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64mmfrx_el1_res0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64mmfrx_el1_res0_s cn; */
+};
+typedef union bdk_ap_id_aa64mmfrx_el1_res0 bdk_ap_id_aa64mmfrx_el1_res0_t;
+
+static inline uint64_t BDK_AP_ID_AA64MMFRX_EL1_RES0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64MMFRX_EL1_RES0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && ((a>=2)&&(a<=7)))
+ return 0x30000070000ll + 0x100ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a>=3)&&(a<=7)))
+ return 0x30000070000ll + 0x100ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_ID_AA64MMFRX_EL1_RES0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_AA64MMFRX_EL1_RES0(a) bdk_ap_id_aa64mmfrx_el1_res0_t
+#define bustype_BDK_AP_ID_AA64MMFRX_EL1_RES0(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64MMFRX_EL1_RES0(a) "AP_ID_AA64MMFRX_EL1_RES0"
+#define busnum_BDK_AP_ID_AA64MMFRX_EL1_RES0(a) (a)
+#define arguments_BDK_AP_ID_AA64MMFRX_EL1_RES0(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64mmfr0_el1
+ *
+ * AP AArch64 Memory Model Feature Register 0
+ * This register provides information about the implemented memory model and memory
+ * management support in AArch64.
+ */
+union bdk_ap_id_aa64mmfr0_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64mmfr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t tgran4 : 4; /**< [ 31: 28](RO) Support for 4KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 4KB granule supported.
+ 0XF = 4KB granule not supported.
+
+ In CNXXXX, supported. */
+ uint64_t tgran64 : 4; /**< [ 27: 24](RO) Support for 64KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 64KB granule supported.
+ 0xF = 64KB granule not supported.
+
+ In CNXXXX, supported. */
+ uint64_t tgran16 : 4; /**< [ 23: 20](RO) Support for 16KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 16KB granule not supported.
+ 0x1 = 16KB granule supported.
+
+ In CNXXXX, supported. */
+ uint64_t bigendel0 : 4; /**< [ 19: 16](RO) Mixed-endian support at EL0 only.
+ All other values are reserved.
+ This field is invalid and is RES0 if the BigEnd field, bits
+ [11:8], is not 0x0.
+ 0x0 = No mixed-endian support at EL0. The AP_SCTLR_EL1[E0E] bit has a
+ fixed value.
+ 0x1 = Mixed-endian support at EL0. The AP_SCTLR_EL1[E0E] bit can be
+ configured.
+
+ In CNXXXX, supported. */
+ uint64_t snsmem : 4; /**< [ 15: 12](RO) Secure versus nonsecure memory distinction.
+ All other values are reserved.
+ 0x0 = Does not support a distinction between secure and nonsecure
+ memory.
+ 0x1 = Does support a distinction between secure and nonsecure
+ memory.
+
+ In CNXXXX, supported. */
+ uint64_t bigend : 4; /**< [ 11: 8](RO) Mixed-endian configuration support.
+ All other values are reserved.
+ 0x0 = No mixed-endian support. The AP_SCTLR_ELx[EE] bits have a fixed
+ value. See the BigEndEL0 field, bits[19:16], for whether EL0
+ supports mixed-endian.
+ 0x1 = Mixed-endian support. The AP_SCTLR_ELx[EE] and AP_SCTLR_EL1[E0E] bits
+ can be configured.
+
+ In CNXXXX, supported. */
+ uint64_t asidbits : 4; /**< [ 7: 4](RO) Number of ASID bits.
+ All other values are reserved.
+ 0x0 = 8 bits.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t parange : 4; /**< [ 3: 0](RO) Physical address range supported.
+ All other values are reserved.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB.
+
+ In CNXXXX, 48 bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t parange : 4; /**< [ 3: 0](RO) Physical address range supported.
+ All other values are reserved.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB.
+
+ In CNXXXX, 48 bits. */
+ uint64_t asidbits : 4; /**< [ 7: 4](RO) Number of ASID bits.
+ All other values are reserved.
+ 0x0 = 8 bits.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t bigend : 4; /**< [ 11: 8](RO) Mixed-endian configuration support.
+ All other values are reserved.
+ 0x0 = No mixed-endian support. The AP_SCTLR_ELx[EE] bits have a fixed
+ value. See the BigEndEL0 field, bits[19:16], for whether EL0
+ supports mixed-endian.
+ 0x1 = Mixed-endian support. The AP_SCTLR_ELx[EE] and AP_SCTLR_EL1[E0E] bits
+ can be configured.
+
+ In CNXXXX, supported. */
+ uint64_t snsmem : 4; /**< [ 15: 12](RO) Secure versus nonsecure memory distinction.
+ All other values are reserved.
+ 0x0 = Does not support a distinction between secure and nonsecure
+ memory.
+ 0x1 = Does support a distinction between secure and nonsecure
+ memory.
+
+ In CNXXXX, supported. */
+ uint64_t bigendel0 : 4; /**< [ 19: 16](RO) Mixed-endian support at EL0 only.
+ All other values are reserved.
+ This field is invalid and is RES0 if the BigEnd field, bits
+ [11:8], is not 0x0.
+ 0x0 = No mixed-endian support at EL0. The AP_SCTLR_EL1[E0E] bit has a
+ fixed value.
+ 0x1 = Mixed-endian support at EL0. The AP_SCTLR_EL1[E0E] bit can be
+ configured.
+
+ In CNXXXX, supported. */
+ uint64_t tgran16 : 4; /**< [ 23: 20](RO) Support for 16KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 16KB granule not supported.
+ 0x1 = 16KB granule supported.
+
+ In CNXXXX, supported. */
+ uint64_t tgran64 : 4; /**< [ 27: 24](RO) Support for 64KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 64KB granule supported.
+ 0xF = 64KB granule not supported.
+
+ In CNXXXX, supported. */
+ uint64_t tgran4 : 4; /**< [ 31: 28](RO) Support for 4KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 4KB granule supported.
+ 0XF = 4KB granule not supported.
+
+ In CNXXXX, supported. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64mmfr0_el1_s cn8; */
+ struct bdk_ap_id_aa64mmfr0_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t tgran4 : 4; /**< [ 31: 28](RO) Support for 4KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 4KB granule supported.
+ 0XF = 4KB granule not supported.
+
+ In CNXXXX, supported. */
+ uint64_t tgran64 : 4; /**< [ 27: 24](RO) Support for 64KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 64KB granule supported.
+ 0xF = 64KB granule not supported.
+
+ In CNXXXX, supported. */
+ uint64_t tgran16 : 4; /**< [ 23: 20](RO) Support for 16KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 16KB granule not supported.
+ 0x1 = 16KB granule supported.
+
+ In CNXXXX, supported. */
+ uint64_t bigendel0 : 4; /**< [ 19: 16](RO) Mixed-endian support at EL0 only.
+ All other values are reserved.
+ This field is invalid and is RES0 if the BigEnd field, bits
+ [11:8], is not 0x0.
+ 0x0 = No mixed-endian support at EL0. The AP_SCTLR_EL1[E0E] bit has a
+ fixed value.
+ 0x1 = Mixed-endian support at EL0. The AP_SCTLR_EL1[E0E] bit can be
+ configured.
+
+ In CNXXXX, supported. */
+ uint64_t snsmem : 4; /**< [ 15: 12](RO) Secure versus nonsecure memory distinction.
+ All other values are reserved.
+ 0x0 = Does not support a distinction between secure and nonsecure
+ memory.
+ 0x1 = Does support a distinction between secure and nonsecure
+ memory.
+
+ In CNXXXX, supported. */
+ uint64_t bigend : 4; /**< [ 11: 8](RO) Mixed-endian configuration support.
+ All other values are reserved.
+ 0x0 = No mixed-endian support. The AP_SCTLR_ELx[EE] bits have a fixed
+ value. See the BigEndEL0 field, bits[19:16], for whether EL0
+ supports mixed-endian.
+ 0x1 = Mixed-endian support. The AP_SCTLR_ELx[EE] and AP_SCTLR_EL1[E0E] bits
+ can be configured.
+
+ In CNXXXX, supported. */
+ uint64_t asidbits : 4; /**< [ 7: 4](RO) Number of ASID bits.
+ All other values are reserved.
+ 0x0 = 8 bits.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t parange : 4; /**< [ 3: 0](RO) Physical address range supported.
+ All other values are reserved.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB.
+ 0x6 = 52 bits, 4PB.
+
+ In CN8XXX, 48 bits.
+ In CN9XXX, 52 bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t parange : 4; /**< [ 3: 0](RO) Physical address range supported.
+ All other values are reserved.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB.
+ 0x6 = 52 bits, 4PB.
+
+ In CN8XXX, 48 bits.
+ In CN9XXX, 52 bits. */
+ uint64_t asidbits : 4; /**< [ 7: 4](RO) Number of ASID bits.
+ All other values are reserved.
+ 0x0 = 8 bits.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t bigend : 4; /**< [ 11: 8](RO) Mixed-endian configuration support.
+ All other values are reserved.
+ 0x0 = No mixed-endian support. The AP_SCTLR_ELx[EE] bits have a fixed
+ value. See the BigEndEL0 field, bits[19:16], for whether EL0
+ supports mixed-endian.
+ 0x1 = Mixed-endian support. The AP_SCTLR_ELx[EE] and AP_SCTLR_EL1[E0E] bits
+ can be configured.
+
+ In CNXXXX, supported. */
+ uint64_t snsmem : 4; /**< [ 15: 12](RO) Secure versus nonsecure memory distinction.
+ All other values are reserved.
+ 0x0 = Does not support a distinction between secure and nonsecure
+ memory.
+ 0x1 = Does support a distinction between secure and nonsecure
+ memory.
+
+ In CNXXXX, supported. */
+ uint64_t bigendel0 : 4; /**< [ 19: 16](RO) Mixed-endian support at EL0 only.
+ All other values are reserved.
+ This field is invalid and is RES0 if the BigEnd field, bits
+ [11:8], is not 0x0.
+ 0x0 = No mixed-endian support at EL0. The AP_SCTLR_EL1[E0E] bit has a
+ fixed value.
+ 0x1 = Mixed-endian support at EL0. The AP_SCTLR_EL1[E0E] bit can be
+ configured.
+
+ In CNXXXX, supported. */
+ uint64_t tgran16 : 4; /**< [ 23: 20](RO) Support for 16KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 16KB granule not supported.
+ 0x1 = 16KB granule supported.
+
+ In CNXXXX, supported. */
+ uint64_t tgran64 : 4; /**< [ 27: 24](RO) Support for 64KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 64KB granule supported.
+ 0xF = 64KB granule not supported.
+
+ In CNXXXX, supported. */
+ uint64_t tgran4 : 4; /**< [ 31: 28](RO) Support for 4KB memory translation granule size.
+ All other values are reserved.
+ 0x0 = 4KB granule supported.
+ 0XF = 4KB granule not supported.
+
+ In CNXXXX, supported. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_id_aa64mmfr0_el1 bdk_ap_id_aa64mmfr0_el1_t;
+
+#define BDK_AP_ID_AA64MMFR0_EL1 BDK_AP_ID_AA64MMFR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64MMFR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64MMFR0_EL1_FUNC(void)
+{
+ return 0x30000070000ll;
+}
+
+#define typedef_BDK_AP_ID_AA64MMFR0_EL1 bdk_ap_id_aa64mmfr0_el1_t
+#define bustype_BDK_AP_ID_AA64MMFR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64MMFR0_EL1 "AP_ID_AA64MMFR0_EL1"
+#define busnum_BDK_AP_ID_AA64MMFR0_EL1 0
+#define arguments_BDK_AP_ID_AA64MMFR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64mmfr1_el1
+ *
+ * AP AArch64 Memory Model Feature Register 1
+ * This register contains additional information about the implemented memory model and
+ * memory management support in AArch64.
+ */
+union bdk_ap_id_aa64mmfr1_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64mmfr1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t xnx : 4; /**< [ 31: 28](RO) 0x0 = EL0/EL1 execute control distinction at stage2 bit not supported.
+ 0x1 = EL0/EL1 execute control distinction at stage2 bit supported.
+
+ All other values reserved. */
+ uint64_t specsei : 4; /**< [ 27: 24](RO) Describes whether the PE can generate SError interrupt exceptions from speculative reads
+ of memory, including speculative instruction fetches.
+ 0x0 = The PE never generates an SError interrupt due to an external abort on a
+ speculative read.
+ 0x1 = The PE might generate an SError interrupt due to an external abort on a
+ speculative
+ read.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture.
+
+ Valid only if AP_ID_AA64PFR0_EL1[RAS] is nonzero. RAZ otherwise.
+
+ Note: Speculative reads include speculative instruction prefetches. The architecture
+ places restrictions on the memory types a processor is permitted to speculatively read
+ from. Software might use this to control how it initializes memory, and how it responds to
+ errors reported by ESB operations. */
+ uint64_t pan : 4; /**< [ 23: 20](RO) V8.1: Privileged Access Never.
+ 0x0 = AP_PAN not supported.
+ 0x1 = AP_PAN supported.
+ All other values reserved.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t lo : 4; /**< [ 19: 16](RO) V8.1: Limited order regions
+ All other values reserved.
+ 0x0 = LORRegions not supported.
+ 0x1 = LORRegions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t vh : 4; /**< [ 11: 8](RO) V8.1: Virtualization Host Extensions.
+ All other values reserved.
+ 0x0 = Virtualization Host Extensions are not supported.
+ 0x1 = Virtualization Host Extensions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t vmidbits : 4; /**< [ 7: 4](RO) V8.1: Number of VMID bits.
+ Other values are reserved.
+ 0x0 = 8 bits.
+ 0x1 = Reserved.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t hardware_access_dirty : 4; /**< [ 3: 0](RO) V8.1: Hardware updates of the Access and Dirty bits
+ All other fields reserved.
+ 0x0 = no hardware update of the access and dirty bits supported in hardware.
+ 0x1 = hardware update of the access bit supported in hardware.
+ 0x2 = hardware update of both the access and dirty bits supported in hardware.
+
+ In CNXXXX not supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t hardware_access_dirty : 4; /**< [ 3: 0](RO) V8.1: Hardware updates of the Access and Dirty bits
+ All other fields reserved.
+ 0x0 = no hardware update of the access and dirty bits supported in hardware.
+ 0x1 = hardware update of the access bit supported in hardware.
+ 0x2 = hardware update of both the access and dirty bits supported in hardware.
+
+ In CNXXXX not supported. */
+ uint64_t vmidbits : 4; /**< [ 7: 4](RO) V8.1: Number of VMID bits.
+ Other values are reserved.
+ 0x0 = 8 bits.
+ 0x1 = Reserved.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t vh : 4; /**< [ 11: 8](RO) V8.1: Virtualization Host Extensions.
+ All other values reserved.
+ 0x0 = Virtualization Host Extensions are not supported.
+ 0x1 = Virtualization Host Extensions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lo : 4; /**< [ 19: 16](RO) V8.1: Limited order regions
+ All other values reserved.
+ 0x0 = LORRegions not supported.
+ 0x1 = LORRegions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t pan : 4; /**< [ 23: 20](RO) V8.1: Privileged Access Never.
+ 0x0 = AP_PAN not supported.
+ 0x1 = AP_PAN supported.
+ All other values reserved.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t specsei : 4; /**< [ 27: 24](RO) Describes whether the PE can generate SError interrupt exceptions from speculative reads
+ of memory, including speculative instruction fetches.
+ 0x0 = The PE never generates an SError interrupt due to an external abort on a
+ speculative read.
+ 0x1 = The PE might generate an SError interrupt due to an external abort on a
+ speculative
+ read.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture.
+
+ Valid only if AP_ID_AA64PFR0_EL1[RAS] is nonzero. RAZ otherwise.
+
+ Note: Speculative reads include speculative instruction prefetches. The architecture
+ places restrictions on the memory types a processor is permitted to speculatively read
+ from. Software might use this to control how it initializes memory, and how it responds to
+ errors reported by ESB operations. */
+ uint64_t xnx : 4; /**< [ 31: 28](RO) 0x0 = EL0/EL1 execute control distinction at stage2 bit not supported.
+ 0x1 = EL0/EL1 execute control distinction at stage2 bit supported.
+
+ All other values reserved. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_id_aa64mmfr1_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t pan : 4; /**< [ 23: 20](RO) V8.1: Privileged Access Never.
+ 0x0 = AP_PAN not supported.
+ 0x1 = AP_PAN supported.
+ All other values reserved.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t lo : 4; /**< [ 19: 16](RO) V8.1: Limited order regions
+ All other values reserved.
+ 0x0 = LORRegions not supported.
+ 0x1 = LORRegions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t hd : 4; /**< [ 15: 12](RO) V8.1: Hierarchical Attribute Disables.
+ All other values reserved.
+ 0x0 = Hierarchical Attribute Disables not supported.
+ 0x1 = Hierarchical Attribute Disables supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t vh : 4; /**< [ 11: 8](RO) V8.1: Virtualization Host Extensions.
+ All other values reserved.
+ 0x0 = Virtualization Host Extensions are not supported.
+ 0x1 = Virtualization Host Extensions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t vmidbits : 4; /**< [ 7: 4](RO) V8.1: Number of VMID bits.
+ Other values are reserved.
+ 0x0 = 8 bits.
+ 0x1 = Reserved.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t hardware_access_dirty : 4; /**< [ 3: 0](RO) V8.1: Hardware updates of the Access and Dirty bits
+ All other fields reserved.
+ 0x0 = no hardware update of the access and dirty bits supported in hardware.
+ 0x1 = hardware update of the access bit supported in hardware.
+ 0x2 = hardware update of both the access and dirty bits supported in hardware.
+
+ In CNXXXX not supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t hardware_access_dirty : 4; /**< [ 3: 0](RO) V8.1: Hardware updates of the Access and Dirty bits
+ All other fields reserved.
+ 0x0 = no hardware update of the access and dirty bits supported in hardware.
+ 0x1 = hardware update of the access bit supported in hardware.
+ 0x2 = hardware update of both the access and dirty bits supported in hardware.
+
+ In CNXXXX not supported. */
+ uint64_t vmidbits : 4; /**< [ 7: 4](RO) V8.1: Number of VMID bits.
+ Other values are reserved.
+ 0x0 = 8 bits.
+ 0x1 = Reserved.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t vh : 4; /**< [ 11: 8](RO) V8.1: Virtualization Host Extensions.
+ All other values reserved.
+ 0x0 = Virtualization Host Extensions are not supported.
+ 0x1 = Virtualization Host Extensions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t hd : 4; /**< [ 15: 12](RO) V8.1: Hierarchical Attribute Disables.
+ All other values reserved.
+ 0x0 = Hierarchical Attribute Disables not supported.
+ 0x1 = Hierarchical Attribute Disables supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t lo : 4; /**< [ 19: 16](RO) V8.1: Limited order regions
+ All other values reserved.
+ 0x0 = LORRegions not supported.
+ 0x1 = LORRegions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t pan : 4; /**< [ 23: 20](RO) V8.1: Privileged Access Never.
+ 0x0 = AP_PAN not supported.
+ 0x1 = AP_PAN supported.
+ All other values reserved.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_id_aa64mmfr1_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t xnx : 4; /**< [ 31: 28](RO) 0x0 = EL0/EL1 execute control distinction at stage2 bit not supported.
+ 0x1 = EL0/EL1 execute control distinction at stage2 bit supported.
+
+ All other values reserved. */
+ uint64_t specsei : 4; /**< [ 27: 24](RO) Describes whether the PE can generate SError interrupt exceptions from speculative reads
+ of memory, including speculative instruction fetches.
+ 0x0 = The PE never generates an SError interrupt due to an external abort on a
+ speculative read.
+ 0x1 = The PE might generate an SError interrupt due to an external abort on a
+ speculative
+ read.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture.
+
+ Valid only if AP_ID_AA64PFR0_EL1[RAS] is nonzero. RAZ otherwise.
+
+ Note: Speculative reads include speculative instruction prefetches. The architecture
+ places restrictions on the memory types a processor is permitted to speculatively read
+ from. Software might use this to control how it initializes memory, and how it responds to
+ errors reported by ESB operations. */
+ uint64_t pan : 4; /**< [ 23: 20](RO) V8.1: Privileged Access Never.
+ 0x0 = AP_PAN not supported.
+ 0x1 = AP_PAN supported.
+ 0x2 = PAN supported and new AT S1E1RP and AT S1E1WP instructions supported
+
+ All other values reserved.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t lo : 4; /**< [ 19: 16](RO) V8.1: Limited order regions
+ All other values reserved.
+ 0x0 = LORRegions not supported.
+ 0x1 = LORRegions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t hpds : 4; /**< [ 15: 12](RO) V8.1: Hierarchical Permission Disables.
+ All other values reserved.
+ 0x0 = Hierarchical Permission Disables not supported.
+ 0x1 = Hierarchical Permission Disables supported.
+ 0x2 = Hierarchical Permission Disables and hardware allocation of bits[62:59] supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t vh : 4; /**< [ 11: 8](RO) V8.1: Virtualization Host Extensions.
+ All other values reserved.
+ 0x0 = Virtualization Host Extensions are not supported.
+ 0x1 = Virtualization Host Extensions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t vmidbits : 4; /**< [ 7: 4](RO) V8.1: Number of VMID bits.
+ Other values are reserved.
+ 0x0 = 8 bits.
+ 0x1 = Reserved.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t hardware_access_dirty : 4; /**< [ 3: 0](RO) V8.1: Hardware updates of the Access and Dirty bits
+ All other fields reserved.
+ 0x0 = no hardware update of the access and dirty bits supported in hardware.
+ 0x1 = hardware update of the access bit supported in hardware.
+ 0x2 = hardware update of both the access and dirty bits supported in hardware.
+
+ In CNXXXX not supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t hardware_access_dirty : 4; /**< [ 3: 0](RO) V8.1: Hardware updates of the Access and Dirty bits
+ All other fields reserved.
+ 0x0 = no hardware update of the access and dirty bits supported in hardware.
+ 0x1 = hardware update of the access bit supported in hardware.
+ 0x2 = hardware update of both the access and dirty bits supported in hardware.
+
+ In CNXXXX not supported. */
+ uint64_t vmidbits : 4; /**< [ 7: 4](RO) V8.1: Number of VMID bits.
+ Other values are reserved.
+ 0x0 = 8 bits.
+ 0x1 = Reserved.
+ 0x2 = 16 bits.
+
+ In CNXXXX, 16 bits. */
+ uint64_t vh : 4; /**< [ 11: 8](RO) V8.1: Virtualization Host Extensions.
+ All other values reserved.
+ 0x0 = Virtualization Host Extensions are not supported.
+ 0x1 = Virtualization Host Extensions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t hpds : 4; /**< [ 15: 12](RO) V8.1: Hierarchical Permission Disables.
+ All other values reserved.
+ 0x0 = Hierarchical Permission Disables not supported.
+ 0x1 = Hierarchical Permission Disables supported.
+ 0x2 = Hierarchical Permission Disables and hardware allocation of bits[62:59] supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t lo : 4; /**< [ 19: 16](RO) V8.1: Limited order regions
+ All other values reserved.
+ 0x0 = LORRegions not supported.
+ 0x1 = LORRegions supported.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t pan : 4; /**< [ 23: 20](RO) V8.1: Privileged Access Never.
+ 0x0 = AP_PAN not supported.
+ 0x1 = AP_PAN supported.
+ 0x2 = PAN supported and new AT S1E1RP and AT S1E1WP instructions supported
+
+ All other values reserved.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint64_t specsei : 4; /**< [ 27: 24](RO) Describes whether the PE can generate SError interrupt exceptions from speculative reads
+ of memory, including speculative instruction fetches.
+ 0x0 = The PE never generates an SError interrupt due to an external abort on a
+ speculative read.
+ 0x1 = The PE might generate an SError interrupt due to an external abort on a
+ speculative
+ read.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture.
+
+ Valid only if AP_ID_AA64PFR0_EL1[RAS] is nonzero. RAZ otherwise.
+
+ Note: Speculative reads include speculative instruction prefetches. The architecture
+ places restrictions on the memory types a processor is permitted to speculatively read
+ from. Software might use this to control how it initializes memory, and how it responds to
+ errors reported by ESB operations. */
+ uint64_t xnx : 4; /**< [ 31: 28](RO) 0x0 = EL0/EL1 execute control distinction at stage2 bit not supported.
+ 0x1 = EL0/EL1 execute control distinction at stage2 bit supported.
+
+ All other values reserved. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_id_aa64mmfr1_el1 bdk_ap_id_aa64mmfr1_el1_t;
+
+#define BDK_AP_ID_AA64MMFR1_EL1 BDK_AP_ID_AA64MMFR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64MMFR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64MMFR1_EL1_FUNC(void)
+{
+ return 0x30000070100ll;
+}
+
+#define typedef_BDK_AP_ID_AA64MMFR1_EL1 bdk_ap_id_aa64mmfr1_el1_t
+#define bustype_BDK_AP_ID_AA64MMFR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64MMFR1_EL1 "AP_ID_AA64MMFR1_EL1"
+#define busnum_BDK_AP_ID_AA64MMFR1_EL1 0
+#define arguments_BDK_AP_ID_AA64MMFR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64mmfr2_el1
+ *
+ * AP AArch64 Memory Model Feature Register 2
+ * This register contains additional information about the implemented memory model and
+ * memory management support in AArch64.
+ */
+union bdk_ap_id_aa64mmfr2_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64mmfr2_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t varange : 4; /**< [ 19: 16](RO) 0x0 = 48 bits of VA for each translation table page register (for 64Kbyte
+ stage1 pages) are supported.
+ 0x1 = 52 bits of VA for each translation table page register (for 64Kbyte
+ stage1 pages) are supported. */
+ uint64_t iesb : 4; /**< [ 15: 12](RO) 0x0 = Not implemented.
+ 0x1 = SCTLR_ELx.IESB implicit error synchronization barrier control implemented. */
+ uint64_t lsm : 4; /**< [ 11: 8](RO) 0x0 = LSMAOE and nTLSMD bit not supported.
+ 0x1 = LSMAOE and nTLSMD bit supported.
+
+ All other values reserved. */
+ uint64_t uao : 4; /**< [ 7: 4](RO) 0x0 = UAO not supported.
+ 0x1 = UAO supported.
+
+ All other values reserved. */
+ uint64_t cnp : 4; /**< [ 3: 0](RO) 0x0 = CnP bit not supported.
+ 0x1 = CnP bit supported.
+
+ All other values reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnp : 4; /**< [ 3: 0](RO) 0x0 = CnP bit not supported.
+ 0x1 = CnP bit supported.
+
+ All other values reserved. */
+ uint64_t uao : 4; /**< [ 7: 4](RO) 0x0 = UAO not supported.
+ 0x1 = UAO supported.
+
+ All other values reserved. */
+ uint64_t lsm : 4; /**< [ 11: 8](RO) 0x0 = LSMAOE and nTLSMD bit not supported.
+ 0x1 = LSMAOE and nTLSMD bit supported.
+
+ All other values reserved. */
+ uint64_t iesb : 4; /**< [ 15: 12](RO) 0x0 = Not implemented.
+ 0x1 = SCTLR_ELx.IESB implicit error synchronization barrier control implemented. */
+ uint64_t varange : 4; /**< [ 19: 16](RO) 0x0 = 48 bits of VA for each translation table page register (for 64Kbyte
+ stage1 pages) are supported.
+ 0x1 = 52 bits of VA for each translation table page register (for 64Kbyte
+ stage1 pages) are supported. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64mmfr2_el1_s cn; */
+};
+typedef union bdk_ap_id_aa64mmfr2_el1 bdk_ap_id_aa64mmfr2_el1_t;
+
+#define BDK_AP_ID_AA64MMFR2_EL1 BDK_AP_ID_AA64MMFR2_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64MMFR2_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64MMFR2_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30000070200ll;
+ __bdk_csr_fatal("AP_ID_AA64MMFR2_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_AA64MMFR2_EL1 bdk_ap_id_aa64mmfr2_el1_t
+#define bustype_BDK_AP_ID_AA64MMFR2_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64MMFR2_EL1 "AP_ID_AA64MMFR2_EL1"
+#define busnum_BDK_AP_ID_AA64MMFR2_EL1 0
+#define arguments_BDK_AP_ID_AA64MMFR2_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64pfr#_el1_res0
+ *
+ * INTERNAL: AP AArch64 Reserved Register
+ *
+ * Reserved for future expansion of information about implemented
+ * processor features in AArch64. ARM doesn't actually assign
+ * a name to these registers, so CNXXXX made up one.
+ */
+union bdk_ap_id_aa64pfrx_el1_res0
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64pfrx_el1_res0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64pfrx_el1_res0_s cn; */
+};
+typedef union bdk_ap_id_aa64pfrx_el1_res0 bdk_ap_id_aa64pfrx_el1_res0_t;
+
+static inline uint64_t BDK_AP_ID_AA64PFRX_EL1_RES0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64PFRX_EL1_RES0(unsigned long a)
+{
+ if ((a>=2)&&(a<=7))
+ return 0x30000040000ll + 0x100ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_ID_AA64PFRX_EL1_RES0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_AA64PFRX_EL1_RES0(a) bdk_ap_id_aa64pfrx_el1_res0_t
+#define bustype_BDK_AP_ID_AA64PFRX_EL1_RES0(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64PFRX_EL1_RES0(a) "AP_ID_AA64PFRX_EL1_RES0"
+#define busnum_BDK_AP_ID_AA64PFRX_EL1_RES0(a) (a)
+#define arguments_BDK_AP_ID_AA64PFRX_EL1_RES0(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64pfr0_el1
+ *
+ * AP AArch64 Processor Feature Register 0
+ * This register provides additional information about implemented processor features
+ * in AArch64.
+ */
+union bdk_ap_id_aa64pfr0_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64pfr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ras : 4; /**< [ 31: 28](RO) RAS extension version.
+ 0x0 = No RAS extension.
+ 0x1 = Version 1 of the RAS extension present.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t gic : 4; /**< [ 27: 24](RO) GIC system register interface.
+ All other values are reserved.
+ 0x0 = No GIC system registers are supported.
+ 0x1 = GICv3 system registers are supported.
+
+ In CNXXXX, supported. */
+ uint64_t advsimd : 4; /**< [ 23: 20](RO) Advanced SIMD.
+ All other values are reserved.
+ 0x0 = Advanced SIMD is implemented.
+ 0xF = Advanced SIMD is not implemented.
+
+ In CNXXXX, supported. */
+ uint64_t fp : 4; /**< [ 19: 16](RO) Floating-point.
+ All other values are reserved.
+ 0x0 = Floating-point is implemented.
+ 0xF = Floating-point is not implemented.
+
+ In CNXXXX, supported. */
+ uint64_t el3 : 4; /**< [ 15: 12](RO) EL3 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL3 is not implemented.
+ 0x1 = EL3 can be executed in AArch64 state only.
+ 0x2 = EL3 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el2 : 4; /**< [ 11: 8](RO) EL2 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL2 is not implemented.
+ 0x1 = EL2 can be executed in AArch64 state only.
+ 0x2 = EL2 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el1 : 4; /**< [ 7: 4](RO) EL1 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL1 is not implemented.
+ 0x1 = EL1 can be executed in AArch64 state only.
+ 0x2 = EL1 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el0 : 4; /**< [ 3: 0](RO) EL0 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL0 is not implemented.
+ 0x1 = EL0 can be executed in AArch64 state only.
+ 0x2 = EL0 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+#else /* Word 0 - Little Endian */
+ uint64_t el0 : 4; /**< [ 3: 0](RO) EL0 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL0 is not implemented.
+ 0x1 = EL0 can be executed in AArch64 state only.
+ 0x2 = EL0 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el1 : 4; /**< [ 7: 4](RO) EL1 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL1 is not implemented.
+ 0x1 = EL1 can be executed in AArch64 state only.
+ 0x2 = EL1 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el2 : 4; /**< [ 11: 8](RO) EL2 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL2 is not implemented.
+ 0x1 = EL2 can be executed in AArch64 state only.
+ 0x2 = EL2 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el3 : 4; /**< [ 15: 12](RO) EL3 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL3 is not implemented.
+ 0x1 = EL3 can be executed in AArch64 state only.
+ 0x2 = EL3 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t fp : 4; /**< [ 19: 16](RO) Floating-point.
+ All other values are reserved.
+ 0x0 = Floating-point is implemented.
+ 0xF = Floating-point is not implemented.
+
+ In CNXXXX, supported. */
+ uint64_t advsimd : 4; /**< [ 23: 20](RO) Advanced SIMD.
+ All other values are reserved.
+ 0x0 = Advanced SIMD is implemented.
+ 0xF = Advanced SIMD is not implemented.
+
+ In CNXXXX, supported. */
+ uint64_t gic : 4; /**< [ 27: 24](RO) GIC system register interface.
+ All other values are reserved.
+ 0x0 = No GIC system registers are supported.
+ 0x1 = GICv3 system registers are supported.
+
+ In CNXXXX, supported. */
+ uint64_t ras : 4; /**< [ 31: 28](RO) RAS extension version.
+ 0x0 = No RAS extension.
+ 0x1 = Version 1 of the RAS extension present.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_id_aa64pfr0_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t gic : 4; /**< [ 27: 24](RO) GIC system register interface.
+ All other values are reserved.
+ 0x0 = No GIC system registers are supported.
+ 0x1 = GICv3 system registers are supported.
+
+ In CNXXXX, supported. */
+ uint64_t advsimd : 4; /**< [ 23: 20](RO) Advanced SIMD.
+ All other values are reserved.
+ 0x0 = Advanced SIMD is implemented.
+ 0xF = Advanced SIMD is not implemented.
+
+ In CNXXXX, supported. */
+ uint64_t fp : 4; /**< [ 19: 16](RO) Floating-point.
+ All other values are reserved.
+ 0x0 = Floating-point is implemented.
+ 0xF = Floating-point is not implemented.
+
+ In CNXXXX, supported. */
+ uint64_t el3 : 4; /**< [ 15: 12](RO) EL3 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL3 is not implemented.
+ 0x1 = EL3 can be executed in AArch64 state only.
+ 0x2 = EL3 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el2 : 4; /**< [ 11: 8](RO) EL2 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL2 is not implemented.
+ 0x1 = EL2 can be executed in AArch64 state only.
+ 0x2 = EL2 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el1 : 4; /**< [ 7: 4](RO) EL1 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL1 is not implemented.
+ 0x1 = EL1 can be executed in AArch64 state only.
+ 0x2 = EL1 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el0 : 4; /**< [ 3: 0](RO) EL0 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL0 is not implemented.
+ 0x1 = EL0 can be executed in AArch64 state only.
+ 0x2 = EL0 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+#else /* Word 0 - Little Endian */
+ uint64_t el0 : 4; /**< [ 3: 0](RO) EL0 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL0 is not implemented.
+ 0x1 = EL0 can be executed in AArch64 state only.
+ 0x2 = EL0 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el1 : 4; /**< [ 7: 4](RO) EL1 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL1 is not implemented.
+ 0x1 = EL1 can be executed in AArch64 state only.
+ 0x2 = EL1 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el2 : 4; /**< [ 11: 8](RO) EL2 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL2 is not implemented.
+ 0x1 = EL2 can be executed in AArch64 state only.
+ 0x2 = EL2 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t el3 : 4; /**< [ 15: 12](RO) EL3 Exception level handling.
+ All other values are reserved.
+ 0x0 = EL3 is not implemented.
+ 0x1 = EL3 can be executed in AArch64 state only.
+ 0x2 = EL3 can be executed in either AArch64 or AArch32 state.
+
+ In CNXXXX, supported in AArch64. */
+ uint64_t fp : 4; /**< [ 19: 16](RO) Floating-point.
+ All other values are reserved.
+ 0x0 = Floating-point is implemented.
+ 0xF = Floating-point is not implemented.
+
+ In CNXXXX, supported. */
+ uint64_t advsimd : 4; /**< [ 23: 20](RO) Advanced SIMD.
+ All other values are reserved.
+ 0x0 = Advanced SIMD is implemented.
+ 0xF = Advanced SIMD is not implemented.
+
+ In CNXXXX, supported. */
+ uint64_t gic : 4; /**< [ 27: 24](RO) GIC system register interface.
+ All other values are reserved.
+ 0x0 = No GIC system registers are supported.
+ 0x1 = GICv3 system registers are supported.
+
+ In CNXXXX, supported. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_id_aa64pfr0_el1_s cn9; */
+};
+typedef union bdk_ap_id_aa64pfr0_el1 bdk_ap_id_aa64pfr0_el1_t;
+
+#define BDK_AP_ID_AA64PFR0_EL1 BDK_AP_ID_AA64PFR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64PFR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64PFR0_EL1_FUNC(void)
+{
+ return 0x30000040000ll;
+}
+
+#define typedef_BDK_AP_ID_AA64PFR0_EL1 bdk_ap_id_aa64pfr0_el1_t
+#define bustype_BDK_AP_ID_AA64PFR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64PFR0_EL1 "AP_ID_AA64PFR0_EL1"
+#define busnum_BDK_AP_ID_AA64PFR0_EL1 0
+#define arguments_BDK_AP_ID_AA64PFR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_aa64pfr1_el1
+ *
+ * AP AArch64 Processor Feature Register 1
+ * Reserved for future expansion of information about implemented
+ * processor features in AArch64.
+ */
+union bdk_ap_id_aa64pfr1_el1
+{
+ uint64_t u;
+ struct bdk_ap_id_aa64pfr1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_aa64pfr1_el1_s cn; */
+};
+typedef union bdk_ap_id_aa64pfr1_el1 bdk_ap_id_aa64pfr1_el1_t;
+
+#define BDK_AP_ID_AA64PFR1_EL1 BDK_AP_ID_AA64PFR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AA64PFR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AA64PFR1_EL1_FUNC(void)
+{
+ return 0x30000040100ll;
+}
+
+#define typedef_BDK_AP_ID_AA64PFR1_EL1 bdk_ap_id_aa64pfr1_el1_t
+#define bustype_BDK_AP_ID_AA64PFR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AA64PFR1_EL1 "AP_ID_AA64PFR1_EL1"
+#define busnum_BDK_AP_ID_AA64PFR1_EL1 0
+#define arguments_BDK_AP_ID_AA64PFR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_afr0_el1
+ *
+ * AP AArch32 Auxiliary Feature Register 0
+ * Provides information about the implementation defined features
+ * of the PE in AArch32.
+ */
+union bdk_ap_id_afr0_el1
+{
+ uint32_t u;
+ struct bdk_ap_id_afr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_afr0_el1_s cn; */
+};
+typedef union bdk_ap_id_afr0_el1 bdk_ap_id_afr0_el1_t;
+
+#define BDK_AP_ID_AFR0_EL1 BDK_AP_ID_AFR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_AFR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_AFR0_EL1_FUNC(void)
+{
+ return 0x30000010300ll;
+}
+
+#define typedef_BDK_AP_ID_AFR0_EL1 bdk_ap_id_afr0_el1_t
+#define bustype_BDK_AP_ID_AFR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_AFR0_EL1 "AP_ID_AFR0_EL1"
+#define busnum_BDK_AP_ID_AFR0_EL1 0
+#define arguments_BDK_AP_ID_AFR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_dfr0_el1
+ *
+ * AP AArch32 Debug Feature Register 0
+ * Provides top level information about the debug system in
+ * AArch32.
+ * This register is RES0 on CNXXXX since we don't support 32bit,
+ * but it still needs to exist per spec.
+ */
+union bdk_ap_id_dfr0_el1
+{
+ uint32_t u;
+ struct bdk_ap_id_dfr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_dfr0_el1_s cn; */
+};
+typedef union bdk_ap_id_dfr0_el1 bdk_ap_id_dfr0_el1_t;
+
+#define BDK_AP_ID_DFR0_EL1 BDK_AP_ID_DFR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_DFR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_DFR0_EL1_FUNC(void)
+{
+ return 0x30000010200ll;
+}
+
+#define typedef_BDK_AP_ID_DFR0_EL1 bdk_ap_id_dfr0_el1_t
+#define bustype_BDK_AP_ID_DFR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_DFR0_EL1 "AP_ID_DFR0_EL1"
+#define busnum_BDK_AP_ID_DFR0_EL1 0
+#define arguments_BDK_AP_ID_DFR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_isar#_el1
+ *
+ * AP ARM32 Instruction Set Attribute Register
+ * Instruction set attribute register
+ */
+union bdk_ap_id_isarx_el1
+{
+ uint32_t u;
+ struct bdk_ap_id_isarx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_isarx_el1_s cn; */
+};
+typedef union bdk_ap_id_isarx_el1 bdk_ap_id_isarx_el1_t;
+
+static inline uint64_t BDK_AP_ID_ISARX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_ISARX_EL1(unsigned long a)
+{
+ if (a<=5)
+ return 0x30000020000ll + 0x100ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_ID_ISARX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_ISARX_EL1(a) bdk_ap_id_isarx_el1_t
+#define bustype_BDK_AP_ID_ISARX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_ISARX_EL1(a) "AP_ID_ISARX_EL1"
+#define busnum_BDK_AP_ID_ISARX_EL1(a) (a)
+#define arguments_BDK_AP_ID_ISARX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_isar#_el1_res0
+ *
+ * INTERNAL: AP ARM32 Instruction Set Attribute Register
+ *
+ * Instruction set attribute register. ARM doesn't actually assign a name to these registers, so
+ * CNXXXX made up one.
+ */
+union bdk_ap_id_isarx_el1_res0
+{
+ uint32_t u;
+ struct bdk_ap_id_isarx_el1_res0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_isarx_el1_res0_s cn; */
+};
+typedef union bdk_ap_id_isarx_el1_res0 bdk_ap_id_isarx_el1_res0_t;
+
+static inline uint64_t BDK_AP_ID_ISARX_EL1_RES0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_ISARX_EL1_RES0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && ((a>=6)&&(a<=7)))
+ return 0x30000020000ll + 0x100ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_ID_ISARX_EL1_RES0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_ISARX_EL1_RES0(a) bdk_ap_id_isarx_el1_res0_t
+#define bustype_BDK_AP_ID_ISARX_EL1_RES0(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_ISARX_EL1_RES0(a) "AP_ID_ISARX_EL1_RES0"
+#define busnum_BDK_AP_ID_ISARX_EL1_RES0(a) (a)
+#define arguments_BDK_AP_ID_ISARX_EL1_RES0(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_isar7_el1_res0
+ *
+ * INTERNAL: AP ARM32 Instruction Set Attribute Register
+ *
+ * Instruction set attribute register. ARM doesn't actually assign a name to these registers, so
+ * CNXXXX made up one.
+ */
+union bdk_ap_id_isar7_el1_res0
+{
+ uint32_t u;
+ struct bdk_ap_id_isar7_el1_res0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_isar7_el1_res0_s cn; */
+};
+typedef union bdk_ap_id_isar7_el1_res0 bdk_ap_id_isar7_el1_res0_t;
+
+#define BDK_AP_ID_ISAR7_EL1_RES0 BDK_AP_ID_ISAR7_EL1_RES0_FUNC()
+static inline uint64_t BDK_AP_ID_ISAR7_EL1_RES0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_ISAR7_EL1_RES0_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30000020700ll;
+ __bdk_csr_fatal("AP_ID_ISAR7_EL1_RES0", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_ISAR7_EL1_RES0 bdk_ap_id_isar7_el1_res0_t
+#define bustype_BDK_AP_ID_ISAR7_EL1_RES0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_ISAR7_EL1_RES0 "AP_ID_ISAR7_EL1_RES0"
+#define busnum_BDK_AP_ID_ISAR7_EL1_RES0 0
+#define arguments_BDK_AP_ID_ISAR7_EL1_RES0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_mmfr#_el1
+ *
+ * AP ARM32 Memory Model Feature Register
+ * ARM32 Memory model feature register
+ */
+union bdk_ap_id_mmfrx_el1
+{
+ uint32_t u;
+ struct bdk_ap_id_mmfrx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_mmfrx_el1_s cn; */
+};
+typedef union bdk_ap_id_mmfrx_el1 bdk_ap_id_mmfrx_el1_t;
+
+static inline uint64_t BDK_AP_ID_MMFRX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_MMFRX_EL1(unsigned long a)
+{
+ if (a<=3)
+ return 0x30000010400ll + 0x100ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_ID_MMFRX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_MMFRX_EL1(a) bdk_ap_id_mmfrx_el1_t
+#define bustype_BDK_AP_ID_MMFRX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_MMFRX_EL1(a) "AP_ID_MMFRX_EL1"
+#define busnum_BDK_AP_ID_MMFRX_EL1(a) (a)
+#define arguments_BDK_AP_ID_MMFRX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_mmfr4_el1
+ *
+ * AP AArch32 Memory Model Feature Register 4
+ * Provides additional information about implemented memory model and memory management support
+ * in AArch32.
+ *
+ * Usage constraints:
+ * ID_MMFR4_EL1 is UNDEFINED at EL0.
+ * If EL2 is implemented and HCR_EL2.TID3 == 1, then direct reads of ID_MMFR4_EL1 at Non-secure
+ * EL1 generate a Trap exception to EL2.
+ *
+ * Configurations:
+ * AArch64 System register ID_MMFR4_EL1 is architecturally mapped to AArch32 System register
+ * ID_MMFR4.
+ * In an implementation that does not include ACTLR2 and HACTLR2 this register is RAZ/WI.
+ * In an AArch64-only implementation, this register is UNKNOWN.
+ */
+union bdk_ap_id_mmfr4_el1
+{
+ uint32_t u;
+ struct bdk_ap_id_mmfr4_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t lsm : 4; /**< [ 23: 20](RO) 0x0 = LSMAOE and nTLSMD bit not supported.
+ 0x1 = LSMAOE and nTLSMD bit supported.
+
+ All other values reserved. */
+ uint32_t hpds : 4; /**< [ 19: 16](RO) V8.1: Hierarchical Permission Disables.
+ 0x0 = Hierarchical Permission Disables not supported.
+ 0x1 = Hierarchical Permission Disables supported.
+ 0x2 = Hierarchical Permission Disables and hardware allocation of bits[62:59] supported.
+
+ All other values reserved.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint32_t cnp : 4; /**< [ 15: 12](RO) 0x0 = CnP bit not supported.
+ 0x1 = CnP bit supported.
+
+ All other values reserved. */
+ uint32_t xnx : 4; /**< [ 11: 8](RO) 0x0 = EL0/EL1 execute control distinction at stage2 bit not supported.
+ 0x1 = EL0/EL1 execute control distinction at stage2 bit supported.
+
+ All other values reserved. */
+ uint32_t ac2 : 4; /**< [ 7: 4](RO) Indicates the extension of the ACTLR and HACTLR registers using ACTLR2 and HACTLR2.
+ 0x0 = ACTLR2 and HACTLR2 are not implemented.
+ 0x1 = ACTLR2 and HACTLR2 are implemented.
+
+ All other values are reserved. */
+ uint32_t specsei : 4; /**< [ 3: 0](RO) Describes whether the PE can generate SError interrupt exceptions from speculative reads
+ of memory, including speculative instruction fetches.
+ 0x0 = The PE never generates an SError interrupt due to an external abort on a
+ speculative read.
+ 0x1 = The PE might generate an SError interrupt due to an external abort on a
+ speculative read. */
+#else /* Word 0 - Little Endian */
+ uint32_t specsei : 4; /**< [ 3: 0](RO) Describes whether the PE can generate SError interrupt exceptions from speculative reads
+ of memory, including speculative instruction fetches.
+ 0x0 = The PE never generates an SError interrupt due to an external abort on a
+ speculative read.
+ 0x1 = The PE might generate an SError interrupt due to an external abort on a
+ speculative read. */
+ uint32_t ac2 : 4; /**< [ 7: 4](RO) Indicates the extension of the ACTLR and HACTLR registers using ACTLR2 and HACTLR2.
+ 0x0 = ACTLR2 and HACTLR2 are not implemented.
+ 0x1 = ACTLR2 and HACTLR2 are implemented.
+
+ All other values are reserved. */
+ uint32_t xnx : 4; /**< [ 11: 8](RO) 0x0 = EL0/EL1 execute control distinction at stage2 bit not supported.
+ 0x1 = EL0/EL1 execute control distinction at stage2 bit supported.
+
+ All other values reserved. */
+ uint32_t cnp : 4; /**< [ 15: 12](RO) 0x0 = CnP bit not supported.
+ 0x1 = CnP bit supported.
+
+ All other values reserved. */
+ uint32_t hpds : 4; /**< [ 19: 16](RO) V8.1: Hierarchical Permission Disables.
+ 0x0 = Hierarchical Permission Disables not supported.
+ 0x1 = Hierarchical Permission Disables supported.
+ 0x2 = Hierarchical Permission Disables and hardware allocation of bits[62:59] supported.
+
+ All other values reserved.
+
+ For CNXXXX, if AP_CVMCTL_EL1[ENABLE_V81] is set 0x1, else 0x0. */
+ uint32_t lsm : 4; /**< [ 23: 20](RO) 0x0 = LSMAOE and nTLSMD bit not supported.
+ 0x1 = LSMAOE and nTLSMD bit supported.
+
+ All other values reserved. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_mmfr4_el1_s cn; */
+};
+typedef union bdk_ap_id_mmfr4_el1 bdk_ap_id_mmfr4_el1_t;
+
+#define BDK_AP_ID_MMFR4_EL1 BDK_AP_ID_MMFR4_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_MMFR4_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_MMFR4_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30000020600ll;
+ __bdk_csr_fatal("AP_ID_MMFR4_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_ID_MMFR4_EL1 bdk_ap_id_mmfr4_el1_t
+#define bustype_BDK_AP_ID_MMFR4_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_MMFR4_EL1 "AP_ID_MMFR4_EL1"
+#define busnum_BDK_AP_ID_MMFR4_EL1 0
+#define arguments_BDK_AP_ID_MMFR4_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_pfr0_el1
+ *
+ * AP AArch32 Processor Feature Register 0
+ * Gives top-level information about the instruction sets
+ * supported by the processor in AArch32.
+ */
+union bdk_ap_id_pfr0_el1
+{
+ uint32_t u;
+ struct bdk_ap_id_pfr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ras : 4; /**< [ 31: 28](RO) RAS extension version. The possible values of this field are:
+ 0x0 = No RAS extension.
+ 0x1 = Version 1 of the RAS extension present.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint32_t reserved_16_27 : 12;
+ uint32_t state3 : 4; /**< [ 15: 12](RO) T32EE instruction set support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = T32EE instruction set implemented. */
+ uint32_t state2 : 4; /**< [ 11: 8](RO) Jazelle extension support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = Jazelle extension implemented, without clearing of JOSCR[CV] on
+ exception entry.
+ 0x2 = Jazelle extension implemented, with clearing of JOSCR[CV] on
+ exception entry. */
+ uint32_t state1 : 4; /**< [ 7: 4](RO) T32 instruction set support.
+ All other values are reserved.
+ 0x0 = T32 instruction set not implemented.
+
+ 0x1 = T32 encodings before the introduction of Thumb-2 technology
+ implemented:
+ All instructions are 16-bit.
+ A BL or BLX is a pair of 16-bit instructions.
+ 32-bit instructions other than BL and BLX cannot be encoded.
+
+ 0x3 = T32 encodings after the introduction of Thumb-2 technology
+ implemented, for all 16-bit and 32-bit T32 basic instructions. */
+ uint32_t state0 : 4; /**< [ 3: 0](RO) A32 instruction set support.
+ All other values are reserved.
+ 0x0 = A32 instruction set not implemented.
+ 0x1 = A32 instruction set implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t state0 : 4; /**< [ 3: 0](RO) A32 instruction set support.
+ All other values are reserved.
+ 0x0 = A32 instruction set not implemented.
+ 0x1 = A32 instruction set implemented. */
+ uint32_t state1 : 4; /**< [ 7: 4](RO) T32 instruction set support.
+ All other values are reserved.
+ 0x0 = T32 instruction set not implemented.
+
+ 0x1 = T32 encodings before the introduction of Thumb-2 technology
+ implemented:
+ All instructions are 16-bit.
+ A BL or BLX is a pair of 16-bit instructions.
+ 32-bit instructions other than BL and BLX cannot be encoded.
+
+ 0x3 = T32 encodings after the introduction of Thumb-2 technology
+ implemented, for all 16-bit and 32-bit T32 basic instructions. */
+ uint32_t state2 : 4; /**< [ 11: 8](RO) Jazelle extension support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = Jazelle extension implemented, without clearing of JOSCR[CV] on
+ exception entry.
+ 0x2 = Jazelle extension implemented, with clearing of JOSCR[CV] on
+ exception entry. */
+ uint32_t state3 : 4; /**< [ 15: 12](RO) T32EE instruction set support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = T32EE instruction set implemented. */
+ uint32_t reserved_16_27 : 12;
+ uint32_t ras : 4; /**< [ 31: 28](RO) RAS extension version. The possible values of this field are:
+ 0x0 = No RAS extension.
+ 0x1 = Version 1 of the RAS extension present.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_id_pfr0_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t state3 : 4; /**< [ 15: 12](RO) T32EE instruction set support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = T32EE instruction set implemented. */
+ uint32_t state2 : 4; /**< [ 11: 8](RO) Jazelle extension support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = Jazelle extension implemented, without clearing of JOSCR[CV] on
+ exception entry.
+ 0x2 = Jazelle extension implemented, with clearing of JOSCR[CV] on
+ exception entry. */
+ uint32_t state1 : 4; /**< [ 7: 4](RO) T32 instruction set support.
+ All other values are reserved.
+ 0x0 = T32 instruction set not implemented.
+
+ 0x1 = T32 encodings before the introduction of Thumb-2 technology
+ implemented:
+ All instructions are 16-bit.
+ A BL or BLX is a pair of 16-bit instructions.
+ 32-bit instructions other than BL and BLX cannot be encoded.
+
+ 0x3 = T32 encodings after the introduction of Thumb-2 technology
+ implemented, for all 16-bit and 32-bit T32 basic instructions. */
+ uint32_t state0 : 4; /**< [ 3: 0](RO) A32 instruction set support.
+ All other values are reserved.
+ 0x0 = A32 instruction set not implemented.
+ 0x1 = A32 instruction set implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t state0 : 4; /**< [ 3: 0](RO) A32 instruction set support.
+ All other values are reserved.
+ 0x0 = A32 instruction set not implemented.
+ 0x1 = A32 instruction set implemented. */
+ uint32_t state1 : 4; /**< [ 7: 4](RO) T32 instruction set support.
+ All other values are reserved.
+ 0x0 = T32 instruction set not implemented.
+
+ 0x1 = T32 encodings before the introduction of Thumb-2 technology
+ implemented:
+ All instructions are 16-bit.
+ A BL or BLX is a pair of 16-bit instructions.
+ 32-bit instructions other than BL and BLX cannot be encoded.
+
+ 0x3 = T32 encodings after the introduction of Thumb-2 technology
+ implemented, for all 16-bit and 32-bit T32 basic instructions. */
+ uint32_t state2 : 4; /**< [ 11: 8](RO) Jazelle extension support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = Jazelle extension implemented, without clearing of JOSCR[CV] on
+ exception entry.
+ 0x2 = Jazelle extension implemented, with clearing of JOSCR[CV] on
+ exception entry. */
+ uint32_t state3 : 4; /**< [ 15: 12](RO) T32EE instruction set support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = T32EE instruction set implemented. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_id_pfr0_el1_s cn9; */
+};
+typedef union bdk_ap_id_pfr0_el1 bdk_ap_id_pfr0_el1_t;
+
+#define BDK_AP_ID_PFR0_EL1 BDK_AP_ID_PFR0_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_PFR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_PFR0_EL1_FUNC(void)
+{
+ return 0x30000010000ll;
+}
+
+#define typedef_BDK_AP_ID_PFR0_EL1 bdk_ap_id_pfr0_el1_t
+#define bustype_BDK_AP_ID_PFR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_PFR0_EL1 "AP_ID_PFR0_EL1"
+#define busnum_BDK_AP_ID_PFR0_EL1 0
+#define arguments_BDK_AP_ID_PFR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_id_pfr1_el1
+ *
+ * AP AArch32 Processor Feature Register 1
+ * Gives information about the programmers' model and extensions
+ * support in AArch32.
+ */
+union bdk_ap_id_pfr1_el1
+{
+ uint32_t u;
+ struct bdk_ap_id_pfr1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t gic : 4; /**< [ 31: 28](RO) GIC CP15 interface.
+ All other values are reserved.
+ 0x0 = No GIC CP15 registers are supported.
+ 0x1 = GICv3 CP15 registers are supported. */
+ uint32_t virt_frac : 4; /**< [ 27: 24](RO) Virtualization fractional field. When the Virtualization field
+ is0b0000
+ All other values are reserved.
+ This field is only valid when AP_ID_PFR1_EL1[15:12] == 0,
+ otherwise it holds the value0b0000
+ 0x0 = No features from the ARMv7 Virtualization Extensions are
+ implemented.
+ 0x1 = The SCR[SIF] bit is implemented. The modifications to the
+ SCR[AW] and SCR[FW] bits are part of the control of whether the
+ CPSR[A] and CPSR[F] bits mask the corresponding aborts. The MSR
+ (Banked register) and MRS (Banked register) instructions are
+ implemented.
+ This value is permitted only when AP_ID_PFR1_EL1[Security] is
+ not0b0000 */
+ uint32_t sec_frac : 4; /**< [ 23: 20](RO) Security fractional field. When the Security field is0b0000
+ All other values are reserved.
+ This field is only valid when AP_ID_PFR1_EL1[7:4] == 0, otherwise
+ it holds the value0b0000
+ 0x0 = No features from the ARMv7 Security Extensions are
+ implemented.
+ 0x1 = The implementation includes the VBAR, and the TCR[PD0] and
+ TCR[PD1] bits.
+ 0x2 = As for 0x1. */
+ uint32_t gentimer : 4; /**< [ 19: 16](RO) Generic Timer Extension support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = Generic Timer Extension implemented. */
+ uint32_t virtualization : 4; /**< [ 15: 12](RO) Virtualization support.
+ All other values are reserved.
+ A value of0b0001
+ 0x0 = EL2 not implemented.
+ 0x1 = EL2 implemented. */
+ uint32_t mprogmod : 4; /**< [ 11: 8](RO) M profile programmers' model support.
+ All other values are reserved.
+ 0x0 = Not supported.
+ 0x2 = Support for two-stack programmers' model. */
+ uint32_t security : 4; /**< [ 7: 4](RO) Security support.
+ All other values are reserved.
+ 0x0 = EL3 not implemented.
+ 0x1 = EL3 implemented.
+ This includes support for Monitor mode and the SMC
+ instruction.
+ 0x2 = As for 0x1 NSACR[RFR] bit. Not permitted in ARMv8 as the
+ NSACR[RFR] bit is RES0. */
+ uint32_t progmod : 4; /**< [ 3: 0](RO) Support for the standard programmers' model for ARMv4 and
+ later. Model must support User, FIQ, IRQ, Supervisor, Abort,
+ Undefined, and System modes.
+ All other values are reserved.
+ 0x0 = Not supported.
+ 0x1 = Supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t progmod : 4; /**< [ 3: 0](RO) Support for the standard programmers' model for ARMv4 and
+ later. Model must support User, FIQ, IRQ, Supervisor, Abort,
+ Undefined, and System modes.
+ All other values are reserved.
+ 0x0 = Not supported.
+ 0x1 = Supported. */
+ uint32_t security : 4; /**< [ 7: 4](RO) Security support.
+ All other values are reserved.
+ 0x0 = EL3 not implemented.
+ 0x1 = EL3 implemented.
+ This includes support for Monitor mode and the SMC
+ instruction.
+ 0x2 = As for 0x1 NSACR[RFR] bit. Not permitted in ARMv8 as the
+ NSACR[RFR] bit is RES0. */
+ uint32_t mprogmod : 4; /**< [ 11: 8](RO) M profile programmers' model support.
+ All other values are reserved.
+ 0x0 = Not supported.
+ 0x2 = Support for two-stack programmers' model. */
+ uint32_t virtualization : 4; /**< [ 15: 12](RO) Virtualization support.
+ All other values are reserved.
+ A value of0b0001
+ 0x0 = EL2 not implemented.
+ 0x1 = EL2 implemented. */
+ uint32_t gentimer : 4; /**< [ 19: 16](RO) Generic Timer Extension support.
+ All other values are reserved.
+ 0x0 = Not implemented.
+ 0x1 = Generic Timer Extension implemented. */
+ uint32_t sec_frac : 4; /**< [ 23: 20](RO) Security fractional field. When the Security field is0b0000
+ All other values are reserved.
+ This field is only valid when AP_ID_PFR1_EL1[7:4] == 0, otherwise
+ it holds the value0b0000
+ 0x0 = No features from the ARMv7 Security Extensions are
+ implemented.
+ 0x1 = The implementation includes the VBAR, and the TCR[PD0] and
+ TCR[PD1] bits.
+ 0x2 = As for 0x1. */
+ uint32_t virt_frac : 4; /**< [ 27: 24](RO) Virtualization fractional field. When the Virtualization field
+ is0b0000
+ All other values are reserved.
+ This field is only valid when AP_ID_PFR1_EL1[15:12] == 0,
+ otherwise it holds the value0b0000
+ 0x0 = No features from the ARMv7 Virtualization Extensions are
+ implemented.
+ 0x1 = The SCR[SIF] bit is implemented. The modifications to the
+ SCR[AW] and SCR[FW] bits are part of the control of whether the
+ CPSR[A] and CPSR[F] bits mask the corresponding aborts. The MSR
+ (Banked register) and MRS (Banked register) instructions are
+ implemented.
+ This value is permitted only when AP_ID_PFR1_EL1[Security] is
+ not0b0000 */
+ uint32_t gic : 4; /**< [ 31: 28](RO) GIC CP15 interface.
+ All other values are reserved.
+ 0x0 = No GIC CP15 registers are supported.
+ 0x1 = GICv3 CP15 registers are supported. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_id_pfr1_el1_s cn; */
+};
+typedef union bdk_ap_id_pfr1_el1 bdk_ap_id_pfr1_el1_t;
+
+#define BDK_AP_ID_PFR1_EL1 BDK_AP_ID_PFR1_EL1_FUNC()
+static inline uint64_t BDK_AP_ID_PFR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ID_PFR1_EL1_FUNC(void)
+{
+ return 0x30000010100ll;
+}
+
+#define typedef_BDK_AP_ID_PFR1_EL1 bdk_ap_id_pfr1_el1_t
+#define bustype_BDK_AP_ID_PFR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ID_PFR1_EL1 "AP_ID_PFR1_EL1"
+#define busnum_BDK_AP_ID_PFR1_EL1 0
+#define arguments_BDK_AP_ID_PFR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ifsr32_el2
+ *
+ * AP Instruction Fault Status EL2 Register
+ * Allows access to the AArch32 IFSR register from AArch64 state
+ * only. Its value has no effect on execution in AArch64 state.
+ */
+union bdk_ap_ifsr32_el2
+{
+ uint32_t u;
+ struct bdk_ap_ifsr32_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ifsr32_el2_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_13_31 : 19;
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_11 : 1;
+ uint32_t reserved_10 : 1;
+ uint32_t reserved_9 : 1;
+ uint32_t reserved_4_8 : 5;
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t reserved_4_8 : 5;
+ uint32_t reserved_9 : 1;
+ uint32_t reserved_10 : 1;
+ uint32_t reserved_11 : 1;
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_13_31 : 19;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_ifsr32_el2 bdk_ap_ifsr32_el2_t;
+
+#define BDK_AP_IFSR32_EL2 BDK_AP_IFSR32_EL2_FUNC()
+static inline uint64_t BDK_AP_IFSR32_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_IFSR32_EL2_FUNC(void)
+{
+ return 0x30405000100ll;
+}
+
+#define typedef_BDK_AP_IFSR32_EL2 bdk_ap_ifsr32_el2_t
+#define bustype_BDK_AP_IFSR32_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_IFSR32_EL2 "AP_IFSR32_EL2"
+#define busnum_BDK_AP_IFSR32_EL2 0
+#define arguments_BDK_AP_IFSR32_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_isr_el1
+ *
+ * AP Interrupt Status Register
+ * Shows whether an IRQ, FIQ, or SError interrupt is pending. If
+ * EL2 is implemented, an indicated pending interrupt might be a
+ * physical interrupt or a virtual interrupt.
+ */
+union bdk_ap_isr_el1
+{
+ uint32_t u;
+ struct bdk_ap_isr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t aa : 1; /**< [ 8: 8](RO) SError pending bit:
+ 0 = No pending SError.
+ 1 = An SError interrupt is pending. */
+ uint32_t i : 1; /**< [ 7: 7](RO) IRQ pending bit. Indicates whether an IRQ interrupt is
+ pending:
+ 0 = No pending IRQ.
+ 1 = An IRQ interrupt is pending. */
+ uint32_t f : 1; /**< [ 6: 6](RO) FIQ pending bit. Indicates whether an FIQ interrupt is
+ pending.
+ 0 = No pending FIQ.
+ 1 = An FIQ interrupt is pending. */
+ uint32_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_5 : 6;
+ uint32_t f : 1; /**< [ 6: 6](RO) FIQ pending bit. Indicates whether an FIQ interrupt is
+ pending.
+ 0 = No pending FIQ.
+ 1 = An FIQ interrupt is pending. */
+ uint32_t i : 1; /**< [ 7: 7](RO) IRQ pending bit. Indicates whether an IRQ interrupt is
+ pending:
+ 0 = No pending IRQ.
+ 1 = An IRQ interrupt is pending. */
+ uint32_t aa : 1; /**< [ 8: 8](RO) SError pending bit:
+ 0 = No pending SError.
+ 1 = An SError interrupt is pending. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_isr_el1_s cn; */
+};
+typedef union bdk_ap_isr_el1 bdk_ap_isr_el1_t;
+
+#define BDK_AP_ISR_EL1 BDK_AP_ISR_EL1_FUNC()
+static inline uint64_t BDK_AP_ISR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_ISR_EL1_FUNC(void)
+{
+ return 0x3000c010000ll;
+}
+
+#define typedef_BDK_AP_ISR_EL1 bdk_ap_isr_el1_t
+#define bustype_BDK_AP_ISR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_ISR_EL1 "AP_ISR_EL1"
+#define busnum_BDK_AP_ISR_EL1 0
+#define arguments_BDK_AP_ISR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_lorc_el1
+ *
+ * AP LORegion Control (v8.1) Register
+ * v8.1: LORegion Control, being a 64-bit read/write register that is
+ * accessible from EL1 or above.
+ *
+ * When the AP_LORC_EL1[EN] bit is 0, then no acceses match an LORegion
+ * Note: this has the consequence that if the AP_LORID_EL1 indicates that no
+ * LORegions are implemented, then the LoadLOAcquire and StoreLORelease
+ * will therefore behave as LoadAcquire and StoreRelease.
+ *
+ * The AP_LORC_EL1[EN] bit is permitted to be cached within a TLB.
+ * Note: In keeping with the other system registers in the ARMv8
+ * architecture, the LORC register must be explicitly synchronised for
+ * changes in the AP_LORC_EL1[DS] field to take effect.
+ */
+union bdk_ap_lorc_el1
+{
+ uint64_t u;
+ struct bdk_ap_lorc_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ds : 3; /**< [ 4: 2](R/W) Descriptor Select, being a number that selects the current LOR
+ Descriptor accessed by the AP_LORSA_EL1, AP_LOREA_EL1, AP_LORN_EL1
+ registers; If the AP_LORC_EL1[DS] points to a LOR Descriptor that is
+ not supported by an implementation then the AP_LOREA_EL1, AP_LORSA_EL1
+ and AP_LORN_EL1 are RES0. */
+ uint64_t reserved_1 : 1;
+ uint64_t en : 1; /**< [ 0: 0](R/W) Enable.
+ 0 = Disabled (reset value).
+ 1 = Enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t en : 1; /**< [ 0: 0](R/W) Enable.
+ 0 = Disabled (reset value).
+ 1 = Enabled. */
+ uint64_t reserved_1 : 1;
+ uint64_t ds : 3; /**< [ 4: 2](R/W) Descriptor Select, being a number that selects the current LOR
+ Descriptor accessed by the AP_LORSA_EL1, AP_LOREA_EL1, AP_LORN_EL1
+ registers; If the AP_LORC_EL1[DS] points to a LOR Descriptor that is
+ not supported by an implementation then the AP_LOREA_EL1, AP_LORSA_EL1
+ and AP_LORN_EL1 are RES0. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_lorc_el1_s cn; */
+};
+typedef union bdk_ap_lorc_el1 bdk_ap_lorc_el1_t;
+
+#define BDK_AP_LORC_EL1 BDK_AP_LORC_EL1_FUNC()
+static inline uint64_t BDK_AP_LORC_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_LORC_EL1_FUNC(void)
+{
+ return 0x3000a040300ll;
+}
+
+#define typedef_BDK_AP_LORC_EL1 bdk_ap_lorc_el1_t
+#define bustype_BDK_AP_LORC_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_LORC_EL1 "AP_LORC_EL1"
+#define busnum_BDK_AP_LORC_EL1 0
+#define arguments_BDK_AP_LORC_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_lorea_el1
+ *
+ * AP LORegion End Address (v8.1) Register
+ * v8.1: LORegion End Address being a 64 bit read/write register that is
+ * accessible from EL1 or above.
+ * The AP_LOREA_EL1 is permitted to be cached in a TLB.
+ * If the AP_LORN_EL1[StartAddress] \> AP_LORN_EL1[EndAddress] for a LOR
+ * Descriptor, then that LOR Descriptor does not match any LORegion.
+ */
+union bdk_ap_lorea_el1
+{
+ uint64_t u;
+ struct bdk_ap_lorea_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t ea : 32; /**< [ 47: 16](R/W) End physical address bits \<47:16\>. Bits\<15:0\> of the end address
+ are defined to be 0xFFFF. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t ea : 32; /**< [ 47: 16](R/W) End physical address bits \<47:16\>. Bits\<15:0\> of the end address
+ are defined to be 0xFFFF. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_lorea_el1_s cn; */
+};
+typedef union bdk_ap_lorea_el1 bdk_ap_lorea_el1_t;
+
+#define BDK_AP_LOREA_EL1 BDK_AP_LOREA_EL1_FUNC()
+static inline uint64_t BDK_AP_LOREA_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_LOREA_EL1_FUNC(void)
+{
+ return 0x3000a040100ll;
+}
+
+#define typedef_BDK_AP_LOREA_EL1 bdk_ap_lorea_el1_t
+#define bustype_BDK_AP_LOREA_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_LOREA_EL1 "AP_LOREA_EL1"
+#define busnum_BDK_AP_LOREA_EL1 0
+#define arguments_BDK_AP_LOREA_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_lorid_el1
+ *
+ * AP LORegionID (v8.1) Register
+ * v8.1: The LORegion ID provides an ID register as to how many LORegions
+ * and LOR Descriptors are supported by the system.
+ * The AP_LORID_EL1 register is a 64-bit Read-only register accessible from
+ * EL1 and above.
+ * If no LOR Descriptors are implemented then the AP_LORC_EL1, AP_LORN_EL1,
+ * AP_LORSA_EL1 and AP_LOREA_EL1 registers are RES0.
+ */
+union bdk_ap_lorid_el1
+{
+ uint64_t u;
+ struct bdk_ap_lorid_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t ld : 8; /**< [ 23: 16](RO) Number of LOR Descriptors supported by the implementation,
+ expressed as binary 8 bit number. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t lr : 8; /**< [ 7: 0](RO) Number of LORegions supported by the implementation, expressed as
+ binary 8 bit number. */
+#else /* Word 0 - Little Endian */
+ uint64_t lr : 8; /**< [ 7: 0](RO) Number of LORegions supported by the implementation, expressed as
+ binary 8 bit number. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ld : 8; /**< [ 23: 16](RO) Number of LOR Descriptors supported by the implementation,
+ expressed as binary 8 bit number. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_lorid_el1_s cn; */
+};
+typedef union bdk_ap_lorid_el1 bdk_ap_lorid_el1_t;
+
+#define BDK_AP_LORID_EL1 BDK_AP_LORID_EL1_FUNC()
+static inline uint64_t BDK_AP_LORID_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_LORID_EL1_FUNC(void)
+{
+ return 0x3000a040700ll;
+}
+
+#define typedef_BDK_AP_LORID_EL1 bdk_ap_lorid_el1_t
+#define bustype_BDK_AP_LORID_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_LORID_EL1 "AP_LORID_EL1"
+#define busnum_BDK_AP_LORID_EL1 0
+#define arguments_BDK_AP_LORID_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_lorn_el1
+ *
+ * AP LORegion Number (v8.1) Register
+ * v8.1: LORegion Number, being a 64-bit read/write register that is
+ * accessible from EL1 or above.
+ * The AP_LORN_EL1 is permitted to be cached in a TLB.
+ * If the AP_LORN_EL1[Num] bit points to a LORegion that is not supported by
+ * the implemented, then that LOR Descriptor does not match any LORegion.
+ */
+union bdk_ap_lorn_el1
+{
+ uint64_t u;
+ struct bdk_ap_lorn_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t num : 2; /**< [ 1: 0](R/W) LORegion Number.
+
+ For CNXXXX region number 0 is special and matches all physical
+ addresses. */
+#else /* Word 0 - Little Endian */
+ uint64_t num : 2; /**< [ 1: 0](R/W) LORegion Number.
+
+ For CNXXXX region number 0 is special and matches all physical
+ addresses. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_lorn_el1_s cn; */
+};
+typedef union bdk_ap_lorn_el1 bdk_ap_lorn_el1_t;
+
+#define BDK_AP_LORN_EL1 BDK_AP_LORN_EL1_FUNC()
+static inline uint64_t BDK_AP_LORN_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_LORN_EL1_FUNC(void)
+{
+ return 0x3000a040200ll;
+}
+
+#define typedef_BDK_AP_LORN_EL1 bdk_ap_lorn_el1_t
+#define bustype_BDK_AP_LORN_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_LORN_EL1 "AP_LORN_EL1"
+#define busnum_BDK_AP_LORN_EL1 0
+#define arguments_BDK_AP_LORN_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_lorsa_el1
+ *
+ * AP LORegion Start Address (v8.1) Register
+ * v8.1: LORegion Start Address being a 64 bit read/write register that is
+ * accessible from EL1 or above
+ * The AP_LORSA_EL1 is permitted to be cached in a TLB.
+ */
+union bdk_ap_lorsa_el1
+{
+ uint64_t u;
+ struct bdk_ap_lorsa_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t sa : 32; /**< [ 47: 16](R/W) Start physical address bits \<47:16\>. Bits\<15:0\> of the start
+ physical address are defined to be 0x0. */
+ uint64_t reserved_1_15 : 15;
+ uint64_t valid : 1; /**< [ 0: 0](R/W) indicates whether the LORegion Descriptor is enabled
+ 0 = Not valid (reset value).
+ 1 = Valid. */
+#else /* Word 0 - Little Endian */
+ uint64_t valid : 1; /**< [ 0: 0](R/W) indicates whether the LORegion Descriptor is enabled
+ 0 = Not valid (reset value).
+ 1 = Valid. */
+ uint64_t reserved_1_15 : 15;
+ uint64_t sa : 32; /**< [ 47: 16](R/W) Start physical address bits \<47:16\>. Bits\<15:0\> of the start
+ physical address are defined to be 0x0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_lorsa_el1_s cn; */
+};
+typedef union bdk_ap_lorsa_el1 bdk_ap_lorsa_el1_t;
+
+#define BDK_AP_LORSA_EL1 BDK_AP_LORSA_EL1_FUNC()
+static inline uint64_t BDK_AP_LORSA_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_LORSA_EL1_FUNC(void)
+{
+ return 0x3000a040000ll;
+}
+
+#define typedef_BDK_AP_LORSA_EL1 bdk_ap_lorsa_el1_t
+#define bustype_BDK_AP_LORSA_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_LORSA_EL1 "AP_LORSA_EL1"
+#define busnum_BDK_AP_LORSA_EL1 0
+#define arguments_BDK_AP_LORSA_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mair_el#
+ *
+ * AP Memory Attribute Indirection Register
+ * Provides the memory attribute encodings corresponding to the
+ * possible AttrIndx values in a Long-descriptor format
+ * translation table entry for stage 1 translations at EL3.
+ */
+union bdk_ap_mair_elx
+{
+ uint64_t u;
+ struct bdk_ap_mair_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_mair_elx_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t attr_n : 64; /**< [ 63: 0](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+#else /* Word 0 - Little Endian */
+ uint64_t attr_n : 64; /**< [ 63: 0](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_mair_elx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t attr7 : 8; /**< [ 63: 56](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr6 : 8; /**< [ 55: 48](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr5 : 8; /**< [ 47: 40](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr4 : 8; /**< [ 39: 32](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr3 : 8; /**< [ 31: 24](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr2 : 8; /**< [ 23: 16](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr1 : 8; /**< [ 15: 8](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr0 : 8; /**< [ 7: 0](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+#else /* Word 0 - Little Endian */
+ uint64_t attr0 : 8; /**< [ 7: 0](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr1 : 8; /**< [ 15: 8](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr2 : 8; /**< [ 23: 16](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr3 : 8; /**< [ 31: 24](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr4 : 8; /**< [ 39: 32](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr5 : 8; /**< [ 47: 40](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr6 : 8; /**< [ 55: 48](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+ uint64_t attr7 : 8; /**< [ 63: 56](R/W) The memory attribute encoding for an AttrIndx[2:0] entry in a
+ Long descriptor format translation table entry, where
+ AttrIndx[2:0] gives the value of \<n\> in Attr\<n\>.
+
+ Bits [7:4] are encoded as follows:
+
+ Attr\<n\>[7:4] Meaning
+ 0b0000 Device memory. See encoding of Attr\<n\>[3:0] for the type of Device memory.
+ 0b00RW0b00 Normal Memory, Outer Write-through transient
+ 0b0100 Normal Memory, Outer Non-Cacheable
+ 0b01RW0b00 Normal Memory, Outer Write-back transient
+ 0b10RW Normal Memory, Outer Write-through non-transient
+ 0b11RW Normal Memory, Outer Write-back non-transient
+
+ R = Outer Read Allocate Policy, W = Outer Write Allocate
+ Policy.
+
+ The meaning of bits [3:0] depends on the value of bits [7:4]:
+
+ Attr\<n\>[3:0] Meaning when Attr\<n\>[7:4] is 0000 Meaning when Attr\<n\>[7:4] is
+ not 0000
+ 0b0000 Device-nGnRnE memory UNPREDICTABLE
+ 0b00RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through transient
+ 0b0100 Device-nGnRE memory Normal memory, Inner Non-Cacheable
+ 0b01RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back transient
+ 0b1000 Device-nGRE memory Normal Memory, Inner Write-through non-transient
+ (RW=00)
+ 0b10RW0b00 UNPREDICTABLE Normal Memory, Inner Write-through non-transient
+ 0b1100 Device-GRE memory Normal Memory, Inner Write-back non-transient (RW=00)
+ 0b11RW0b00 UNPREDICTABLE Normal Memory, Inner Write-back non-transient
+
+ R = Inner Read Allocate Policy, W = Inner Write Allocate
+ Policy.
+
+ ARMv7's Strongly-ordered and Device memory types have been
+ renamed to Device-nGnRnE and Device-nGnRE in ARMv8.
+
+ The R and W bits in some Attr\<n\> fields have the following
+ meanings:
+
+ R or W Meaning
+ 0 = Do not allocate.
+ 1 = Allocate. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_mair_elx bdk_ap_mair_elx_t;
+
+static inline uint64_t BDK_AP_MAIR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MAIR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=3))
+ return 0x3000a020000ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_MAIR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_MAIR_ELX(a) bdk_ap_mair_elx_t
+#define bustype_BDK_AP_MAIR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MAIR_ELX(a) "AP_MAIR_ELX"
+#define busnum_BDK_AP_MAIR_ELX(a) (a)
+#define arguments_BDK_AP_MAIR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mair_el12
+ *
+ * AP Memory Attribute Indirection Register
+ * Alias of ESR_EL1 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_mair_el12
+{
+ uint64_t u;
+ struct bdk_ap_mair_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_mair_el12_s cn; */
+};
+typedef union bdk_ap_mair_el12 bdk_ap_mair_el12_t;
+
+#define BDK_AP_MAIR_EL12 BDK_AP_MAIR_EL12_FUNC()
+static inline uint64_t BDK_AP_MAIR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MAIR_EL12_FUNC(void)
+{
+ return 0x3050a020000ll;
+}
+
+#define typedef_BDK_AP_MAIR_EL12 bdk_ap_mair_el12_t
+#define bustype_BDK_AP_MAIR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MAIR_EL12 "AP_MAIR_EL12"
+#define busnum_BDK_AP_MAIR_EL12 0
+#define arguments_BDK_AP_MAIR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mdccint_el1
+ *
+ * AP Monitor Debug Comms Channel Interrupt Enable Register
+ * Enables interrupt requests to be signaled based on the DCC
+ * status flags.
+ */
+union bdk_ap_mdccint_el1
+{
+ uint32_t u;
+ struct bdk_ap_mdccint_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t rx : 1; /**< [ 30: 30](R/W) DCC interrupt request enable control for DTRRX. Enables a
+ common COMMIRQ interrupt request to be signaled based on the
+ DCC status flags.
+
+ If legacy COMMRX and COMMTX signals are implemented, then
+ these are not affected by the value of this bit.
+ 0 = No interrupt request generated by DTRRX.
+ 1 = Interrupt request will be generated on RXfull == 1. */
+ uint32_t tx : 1; /**< [ 29: 29](R/W) DCC interrupt request enable control for DTRTX. Enables a
+ common COMMIRQ interrupt request to be signaled based on the
+ DCC status flags.
+
+ If legacy COMMRX and COMMTX signals are implemented, then
+ these are not affected by the value of this bit.
+ 0 = No interrupt request generated by DTRTX.
+ 1 = Interrupt request will be generated on TXfull == 0. */
+ uint32_t reserved_0_28 : 29;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_28 : 29;
+ uint32_t tx : 1; /**< [ 29: 29](R/W) DCC interrupt request enable control for DTRTX. Enables a
+ common COMMIRQ interrupt request to be signaled based on the
+ DCC status flags.
+
+ If legacy COMMRX and COMMTX signals are implemented, then
+ these are not affected by the value of this bit.
+ 0 = No interrupt request generated by DTRTX.
+ 1 = Interrupt request will be generated on TXfull == 0. */
+ uint32_t rx : 1; /**< [ 30: 30](R/W) DCC interrupt request enable control for DTRRX. Enables a
+ common COMMIRQ interrupt request to be signaled based on the
+ DCC status flags.
+
+ If legacy COMMRX and COMMTX signals are implemented, then
+ these are not affected by the value of this bit.
+ 0 = No interrupt request generated by DTRRX.
+ 1 = Interrupt request will be generated on RXfull == 1. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_mdccint_el1_s cn; */
+};
+typedef union bdk_ap_mdccint_el1 bdk_ap_mdccint_el1_t;
+
+#define BDK_AP_MDCCINT_EL1 BDK_AP_MDCCINT_EL1_FUNC()
+static inline uint64_t BDK_AP_MDCCINT_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MDCCINT_EL1_FUNC(void)
+{
+ return 0x20000020000ll;
+}
+
+#define typedef_BDK_AP_MDCCINT_EL1 bdk_ap_mdccint_el1_t
+#define bustype_BDK_AP_MDCCINT_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MDCCINT_EL1 "AP_MDCCINT_EL1"
+#define busnum_BDK_AP_MDCCINT_EL1 0
+#define arguments_BDK_AP_MDCCINT_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mdccsr_el0
+ *
+ * AP Monitor Debug Comms Channel Status Register
+ * Main control register for the debug implementation, containing
+ * flow-control flags for the DCC. This is an internal, read-only
+ * view.
+ */
+union bdk_ap_mdccsr_el0
+{
+ uint32_t u;
+ struct bdk_ap_mdccsr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t rxfull : 1; /**< [ 30: 30](RO) DTRRX full. Read-only view of the equivalent bit in the EDSCR. */
+ uint32_t txfull : 1; /**< [ 29: 29](RO) DTRTX full. Read-only view of the equivalent bit in the EDSCR. */
+ uint32_t reserved_0_28 : 29;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_28 : 29;
+ uint32_t txfull : 1; /**< [ 29: 29](RO) DTRTX full. Read-only view of the equivalent bit in the EDSCR. */
+ uint32_t rxfull : 1; /**< [ 30: 30](RO) DTRRX full. Read-only view of the equivalent bit in the EDSCR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_mdccsr_el0_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t rxfull : 1; /**< [ 30: 30](RO) DTRRX full. Read-only view of the equivalent bit in the EDSCR. */
+ uint32_t txfull : 1; /**< [ 29: 29](RO) DTRTX full. Read-only view of the equivalent bit in the EDSCR. */
+ uint32_t reserved_19_28 : 10;
+ uint32_t reserved_15_18 : 4;
+ uint32_t reserved_13_14 : 2;
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_6_11 : 6;
+ uint32_t reserved_2_5 : 4;
+ uint32_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_1 : 2;
+ uint32_t reserved_2_5 : 4;
+ uint32_t reserved_6_11 : 6;
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_13_14 : 2;
+ uint32_t reserved_15_18 : 4;
+ uint32_t reserved_19_28 : 10;
+ uint32_t txfull : 1; /**< [ 29: 29](RO) DTRTX full. Read-only view of the equivalent bit in the EDSCR. */
+ uint32_t rxfull : 1; /**< [ 30: 30](RO) DTRRX full. Read-only view of the equivalent bit in the EDSCR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_mdccsr_el0 bdk_ap_mdccsr_el0_t;
+
+#define BDK_AP_MDCCSR_EL0 BDK_AP_MDCCSR_EL0_FUNC()
+static inline uint64_t BDK_AP_MDCCSR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MDCCSR_EL0_FUNC(void)
+{
+ return 0x20300010000ll;
+}
+
+#define typedef_BDK_AP_MDCCSR_EL0 bdk_ap_mdccsr_el0_t
+#define bustype_BDK_AP_MDCCSR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MDCCSR_EL0 "AP_MDCCSR_EL0"
+#define busnum_BDK_AP_MDCCSR_EL0 0
+#define arguments_BDK_AP_MDCCSR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mdcr_el2
+ *
+ * AP Monitor Debug Configuration EL2 Register
+ * Provides configuration options for the Virtualization
+ * extensions to self-hosted debug and the Performance Monitors
+ * extension.
+ */
+union bdk_ap_mdcr_el2
+{
+ uint32_t u;
+ struct bdk_ap_mdcr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t hpmd : 1; /**< [ 17: 17](R/W) v8.1: Hyp performance monitors disable. This prohibits event counting
+ at EL2.
+ 0 = Event counting allowed at EL2.
+ 1 = Event counting prohibited at EL2, unless overridden by
+ the authentication interface.
+
+ Note: This behavior is independent of the value of the AP_HCR_EL2[E2H]
+ bit.
+ This control applies only to:
+ The counters in the range [0.[HPMN]).
+ If AP_PMCR_EL0[DP] is set to 1, AP_PMCCNTR_EL0.
+ The other event counters and, if AP_PMCR_EL0[DP] is set to 0,
+ AP_PMCCNTR_EL0 are unaffected.
+
+ On Warm reset, the field resets to 0. */
+ uint32_t reserved_15_16 : 2;
+ uint32_t tpms : 1; /**< [ 14: 14](R/W) Trap Performance Monitor Sampling. Controls access to Statistical Profiling control
+ registers from Non-secure EL1 and EL0.
+ 0 = Do not trap statistical profiling controls to EL2.
+ 1 = Accesses to statistical profiling controls at nonsecure EL1 generate a Trap
+ exception to EL2. */
+ uint32_t e2pb : 2; /**< [ 13: 12](R/W) EL2 Profiling Buffer. Controls the owning translation regime and access to Profiling
+ Buffer control registers from nonsecure EL1.
+ 00 = Profiling Buffer uses the EL2 stage 1 translation regime. Accesses to Profiling
+ Buffer controls at nonsecure EL1 generate a Trap exception to EL2.
+ 10 = Profiling Buffer uses the EL1&0 stage 1 translation regime. Accesses to Profiling
+ Buffer controls at nonsecure EL1 generate a Trap exception to EL2.
+ 11 = Profiling Buffer uses the EL1&0 stage 1 translation regime. Accesses to Profiling
+ Buffer controls at nonsecure EL1 are not trapped to EL2. */
+ uint32_t tdra : 1; /**< [ 11: 11](R/W) Trap debug ROM address register access.
+
+ When this bit is set to 1, any access to the following
+ registers from EL1 or EL0 is trapped to EL2:
+
+ AArch32: DBGDRAR, DBGDSAR.
+
+ AArch64: AP_MDRAR_EL1.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+
+ 0 = Has no effect on accesses to debug ROM address registers from
+ EL1 and EL0.
+ 1 = Trap valid EL1 and EL0 access to debug ROM address registers
+ to EL2. */
+ uint32_t tdosa : 1; /**< [ 10: 10](R/W) Trap debug OS-related register access.
+ When this bit is set to 1, any access to the following
+ registers from EL1 or EL0 is trapped to EL2:
+
+ AArch32: DBGOSLAR, DBGOSLSR, DBGOSDLR, DBGPRCR.
+
+ AArch64: AP_OSLAR_EL1, AP_OSLSR_EL1, AP_OSDLR_EL1, AP_DBGPRCR_EL1.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+ 0 = Has no effect on accesses to OS-related debug registers.
+ 1 = Trap valid accesses to OS-related debug registers to EL2. */
+ uint32_t tda : 1; /**< [ 9: 9](R/W) Trap debug access.
+
+ When this bit is set to 1, any valid nonsecure access to the
+ debug registers from EL1 or EL0, other than the registers
+ trapped by the TDRA and TDOSA bits, is trapped to EL2.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+ 0 = Has no effect on accesses to debug registers.
+ 1 = Trap valid nonsecure accesses to debug registers to EL2. */
+ uint32_t tde : 1; /**< [ 8: 8](R/W) Route Software debug exceptions from nonsecure EL1 and EL0 to
+ EL2. Also enables traps on all debug register accesses to EL2.
+ If AP_HCR_EL2[TGE] == 1, then this bit is ignored and treated as
+ though it is 1 other than for the value read back from
+ AP_MDCR_EL2. */
+ uint32_t hpme : 1; /**< [ 7: 7](R/W) Hypervisor Performance Monitors Enable.
+ When this bit is set to 1, the Performance Monitors counters
+ that are reserved for use from EL2 or Secure state are
+ enabled. For more information see the description of the HPMN
+ field.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = EL2 Performance Monitors disabled.
+ 1 = EL2 Performance Monitors enabled. */
+ uint32_t tpm : 1; /**< [ 6: 6](R/W) Trap Performance Monitors accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on Performance Monitors accesses.
+ 1 = Trap nonsecure EL0 and EL1 accesses to Performance Monitors
+ registers that are not unallocated to EL2. */
+ uint32_t tpmcr : 1; /**< [ 5: 5](R/W) Trap AP_PMCR_EL0 accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on AP_PMCR_EL0 accesses.
+ 1 = Trap nonsecure EL0 and EL1 accesses to AP_PMCR_EL0 to EL2. */
+ uint32_t hpmn : 5; /**< [ 4: 0](R/W) Defines the number of Performance Monitors counters that are
+ accessible from nonsecure EL0 and EL1 modes.
+
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+
+ In nonsecure state, HPMN divides the Performance Monitors
+ counters as follows. For counter n in nonsecure state:
+
+ If n is in the range 0\<=n\<HPMN, the counter is accessible
+ from EL1 and EL2, and from EL0 if permitted by AP_PMUSERENR_EL0.
+ AP_PMCR_EL0[E] enables the operation of counters in this range.
+
+ If n is in the range HPMN\<=n\< AP_PMCR_EL0[N], the counter is
+ accessible only from EL2. AP_MDCR_EL2[HPME] enables the operation
+ of counters in this range.
+
+ If this field is set to 0, or to a value larger than
+ AP_PMCR_EL0[N], then the behavior in nonsecure EL0 and EL1 is
+ CONSTRAINED UNPREDICTABLE, and one of the following must
+ happen:
+
+ The number of counters accessible is an UNKNOWN nonzero
+ value less than AP_PMCR_EL0[N].
+
+ There is no access to any counters.
+
+ For reads of AP_MDCR_EL2[HPMN] by EL2 or higher, if this field is
+ set to 0 or to a value larger than AP_PMCR_EL0[N], the processor
+ must return a CONSTRAINED UNPREDICTABLE value being one of:
+ AP_PMCR_EL0[N].
+
+ The value that was written to AP_MDCR_EL2[HPMN].
+ (The value that was written to AP_MDCR_EL2[HPMN]) modulo
+ 22^(h), where h is the smallest number of bits required
+ for a value in the range 0 to AP_PMCR_EL0[N]. */
+#else /* Word 0 - Little Endian */
+ uint32_t hpmn : 5; /**< [ 4: 0](R/W) Defines the number of Performance Monitors counters that are
+ accessible from nonsecure EL0 and EL1 modes.
+
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+
+ In nonsecure state, HPMN divides the Performance Monitors
+ counters as follows. For counter n in nonsecure state:
+
+ If n is in the range 0\<=n\<HPMN, the counter is accessible
+ from EL1 and EL2, and from EL0 if permitted by AP_PMUSERENR_EL0.
+ AP_PMCR_EL0[E] enables the operation of counters in this range.
+
+ If n is in the range HPMN\<=n\< AP_PMCR_EL0[N], the counter is
+ accessible only from EL2. AP_MDCR_EL2[HPME] enables the operation
+ of counters in this range.
+
+ If this field is set to 0, or to a value larger than
+ AP_PMCR_EL0[N], then the behavior in nonsecure EL0 and EL1 is
+ CONSTRAINED UNPREDICTABLE, and one of the following must
+ happen:
+
+ The number of counters accessible is an UNKNOWN nonzero
+ value less than AP_PMCR_EL0[N].
+
+ There is no access to any counters.
+
+ For reads of AP_MDCR_EL2[HPMN] by EL2 or higher, if this field is
+ set to 0 or to a value larger than AP_PMCR_EL0[N], the processor
+ must return a CONSTRAINED UNPREDICTABLE value being one of:
+ AP_PMCR_EL0[N].
+
+ The value that was written to AP_MDCR_EL2[HPMN].
+ (The value that was written to AP_MDCR_EL2[HPMN]) modulo
+ 22^(h), where h is the smallest number of bits required
+ for a value in the range 0 to AP_PMCR_EL0[N]. */
+ uint32_t tpmcr : 1; /**< [ 5: 5](R/W) Trap AP_PMCR_EL0 accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on AP_PMCR_EL0 accesses.
+ 1 = Trap nonsecure EL0 and EL1 accesses to AP_PMCR_EL0 to EL2. */
+ uint32_t tpm : 1; /**< [ 6: 6](R/W) Trap Performance Monitors accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on Performance Monitors accesses.
+ 1 = Trap nonsecure EL0 and EL1 accesses to Performance Monitors
+ registers that are not unallocated to EL2. */
+ uint32_t hpme : 1; /**< [ 7: 7](R/W) Hypervisor Performance Monitors Enable.
+ When this bit is set to 1, the Performance Monitors counters
+ that are reserved for use from EL2 or Secure state are
+ enabled. For more information see the description of the HPMN
+ field.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = EL2 Performance Monitors disabled.
+ 1 = EL2 Performance Monitors enabled. */
+ uint32_t tde : 1; /**< [ 8: 8](R/W) Route Software debug exceptions from nonsecure EL1 and EL0 to
+ EL2. Also enables traps on all debug register accesses to EL2.
+ If AP_HCR_EL2[TGE] == 1, then this bit is ignored and treated as
+ though it is 1 other than for the value read back from
+ AP_MDCR_EL2. */
+ uint32_t tda : 1; /**< [ 9: 9](R/W) Trap debug access.
+
+ When this bit is set to 1, any valid nonsecure access to the
+ debug registers from EL1 or EL0, other than the registers
+ trapped by the TDRA and TDOSA bits, is trapped to EL2.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+ 0 = Has no effect on accesses to debug registers.
+ 1 = Trap valid nonsecure accesses to debug registers to EL2. */
+ uint32_t tdosa : 1; /**< [ 10: 10](R/W) Trap debug OS-related register access.
+ When this bit is set to 1, any access to the following
+ registers from EL1 or EL0 is trapped to EL2:
+
+ AArch32: DBGOSLAR, DBGOSLSR, DBGOSDLR, DBGPRCR.
+
+ AArch64: AP_OSLAR_EL1, AP_OSLSR_EL1, AP_OSDLR_EL1, AP_DBGPRCR_EL1.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+ 0 = Has no effect on accesses to OS-related debug registers.
+ 1 = Trap valid accesses to OS-related debug registers to EL2. */
+ uint32_t tdra : 1; /**< [ 11: 11](R/W) Trap debug ROM address register access.
+
+ When this bit is set to 1, any access to the following
+ registers from EL1 or EL0 is trapped to EL2:
+
+ AArch32: DBGDRAR, DBGDSAR.
+
+ AArch64: AP_MDRAR_EL1.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+
+ 0 = Has no effect on accesses to debug ROM address registers from
+ EL1 and EL0.
+ 1 = Trap valid EL1 and EL0 access to debug ROM address registers
+ to EL2. */
+ uint32_t e2pb : 2; /**< [ 13: 12](R/W) EL2 Profiling Buffer. Controls the owning translation regime and access to Profiling
+ Buffer control registers from nonsecure EL1.
+ 00 = Profiling Buffer uses the EL2 stage 1 translation regime. Accesses to Profiling
+ Buffer controls at nonsecure EL1 generate a Trap exception to EL2.
+ 10 = Profiling Buffer uses the EL1&0 stage 1 translation regime. Accesses to Profiling
+ Buffer controls at nonsecure EL1 generate a Trap exception to EL2.
+ 11 = Profiling Buffer uses the EL1&0 stage 1 translation regime. Accesses to Profiling
+ Buffer controls at nonsecure EL1 are not trapped to EL2. */
+ uint32_t tpms : 1; /**< [ 14: 14](R/W) Trap Performance Monitor Sampling. Controls access to Statistical Profiling control
+ registers from Non-secure EL1 and EL0.
+ 0 = Do not trap statistical profiling controls to EL2.
+ 1 = Accesses to statistical profiling controls at nonsecure EL1 generate a Trap
+ exception to EL2. */
+ uint32_t reserved_15_16 : 2;
+ uint32_t hpmd : 1; /**< [ 17: 17](R/W) v8.1: Hyp performance monitors disable. This prohibits event counting
+ at EL2.
+ 0 = Event counting allowed at EL2.
+ 1 = Event counting prohibited at EL2, unless overridden by
+ the authentication interface.
+
+ Note: This behavior is independent of the value of the AP_HCR_EL2[E2H]
+ bit.
+ This control applies only to:
+ The counters in the range [0.[HPMN]).
+ If AP_PMCR_EL0[DP] is set to 1, AP_PMCCNTR_EL0.
+ The other event counters and, if AP_PMCR_EL0[DP] is set to 0,
+ AP_PMCCNTR_EL0 are unaffected.
+
+ On Warm reset, the field resets to 0. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_mdcr_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t hpmd : 1; /**< [ 17: 17](R/W) v8.1: Hyp performance monitors disable. This prohibits event counting
+ at EL2.
+ 0 = Event counting allowed at EL2.
+ 1 = Event counting prohibited at EL2, unless overridden by
+ the authentication interface.
+
+ Note: This behavior is independent of the value of the AP_HCR_EL2[E2H]
+ bit.
+ This control applies only to:
+ The counters in the range [0.[HPMN]).
+ If AP_PMCR_EL0[DP] is set to 1, AP_PMCCNTR_EL0.
+ The other event counters and, if AP_PMCR_EL0[DP] is set to 0,
+ AP_PMCCNTR_EL0 are unaffected.
+
+ On Warm reset, the field resets to 0. */
+ uint32_t reserved_12_16 : 5;
+ uint32_t tdra : 1; /**< [ 11: 11](R/W) Trap debug ROM address register access.
+
+ When this bit is set to 1, any access to the following
+ registers from EL1 or EL0 is trapped to EL2:
+
+ AArch32: DBGDRAR, DBGDSAR.
+
+ AArch64: AP_MDRAR_EL1.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+
+ 0 = Has no effect on accesses to debug ROM address registers from
+ EL1 and EL0.
+ 1 = Trap valid EL1 and EL0 access to debug ROM address registers
+ to EL2. */
+ uint32_t tdosa : 1; /**< [ 10: 10](R/W) Trap debug OS-related register access.
+ When this bit is set to 1, any access to the following
+ registers from EL1 or EL0 is trapped to EL2:
+
+ AArch32: DBGOSLAR, DBGOSLSR, DBGOSDLR, DBGPRCR.
+
+ AArch64: AP_OSLAR_EL1, AP_OSLSR_EL1, AP_OSDLR_EL1, AP_DBGPRCR_EL1.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+ 0 = Has no effect on accesses to OS-related debug registers.
+ 1 = Trap valid accesses to OS-related debug registers to EL2. */
+ uint32_t tda : 1; /**< [ 9: 9](R/W) Trap debug access.
+
+ When this bit is set to 1, any valid nonsecure access to the
+ debug registers from EL1 or EL0, other than the registers
+ trapped by the TDRA and TDOSA bits, is trapped to EL2.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+ 0 = Has no effect on accesses to debug registers.
+ 1 = Trap valid nonsecure accesses to debug registers to EL2. */
+ uint32_t tde : 1; /**< [ 8: 8](R/W) Route Software debug exceptions from nonsecure EL1 and EL0 to
+ EL2. Also enables traps on all debug register accesses to EL2.
+ If AP_HCR_EL2[TGE] == 1, then this bit is ignored and treated as
+ though it is 1 other than for the value read back from
+ AP_MDCR_EL2. */
+ uint32_t hpme : 1; /**< [ 7: 7](R/W) Hypervisor Performance Monitors Enable.
+ When this bit is set to 1, the Performance Monitors counters
+ that are reserved for use from EL2 or Secure state are
+ enabled. For more information see the description of the HPMN
+ field.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = EL2 Performance Monitors disabled.
+ 1 = EL2 Performance Monitors enabled. */
+ uint32_t tpm : 1; /**< [ 6: 6](R/W) Trap Performance Monitors accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on Performance Monitors accesses.
+ 1 = Trap nonsecure EL0 and EL1 accesses to Performance Monitors
+ registers that are not unallocated to EL2. */
+ uint32_t tpmcr : 1; /**< [ 5: 5](R/W) Trap AP_PMCR_EL0 accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on AP_PMCR_EL0 accesses.
+ 1 = Trap nonsecure EL0 and EL1 accesses to AP_PMCR_EL0 to EL2. */
+ uint32_t hpmn : 5; /**< [ 4: 0](R/W) Defines the number of Performance Monitors counters that are
+ accessible from nonsecure EL0 and EL1 modes.
+
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+
+ In nonsecure state, HPMN divides the Performance Monitors
+ counters as follows. For counter n in nonsecure state:
+
+ If n is in the range 0\<=n\<HPMN, the counter is accessible
+ from EL1 and EL2, and from EL0 if permitted by AP_PMUSERENR_EL0.
+ AP_PMCR_EL0[E] enables the operation of counters in this range.
+
+ If n is in the range HPMN\<=n\< AP_PMCR_EL0[N], the counter is
+ accessible only from EL2. AP_MDCR_EL2[HPME] enables the operation
+ of counters in this range.
+
+ If this field is set to 0, or to a value larger than
+ AP_PMCR_EL0[N], then the behavior in nonsecure EL0 and EL1 is
+ CONSTRAINED UNPREDICTABLE, and one of the following must
+ happen:
+
+ The number of counters accessible is an UNKNOWN nonzero
+ value less than AP_PMCR_EL0[N].
+
+ There is no access to any counters.
+
+ For reads of AP_MDCR_EL2[HPMN] by EL2 or higher, if this field is
+ set to 0 or to a value larger than AP_PMCR_EL0[N], the processor
+ must return a CONSTRAINED UNPREDICTABLE value being one of:
+ AP_PMCR_EL0[N].
+
+ The value that was written to AP_MDCR_EL2[HPMN].
+ (The value that was written to AP_MDCR_EL2[HPMN]) modulo
+ 22^(h), where h is the smallest number of bits required
+ for a value in the range 0 to AP_PMCR_EL0[N]. */
+#else /* Word 0 - Little Endian */
+ uint32_t hpmn : 5; /**< [ 4: 0](R/W) Defines the number of Performance Monitors counters that are
+ accessible from nonsecure EL0 and EL1 modes.
+
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+
+ In nonsecure state, HPMN divides the Performance Monitors
+ counters as follows. For counter n in nonsecure state:
+
+ If n is in the range 0\<=n\<HPMN, the counter is accessible
+ from EL1 and EL2, and from EL0 if permitted by AP_PMUSERENR_EL0.
+ AP_PMCR_EL0[E] enables the operation of counters in this range.
+
+ If n is in the range HPMN\<=n\< AP_PMCR_EL0[N], the counter is
+ accessible only from EL2. AP_MDCR_EL2[HPME] enables the operation
+ of counters in this range.
+
+ If this field is set to 0, or to a value larger than
+ AP_PMCR_EL0[N], then the behavior in nonsecure EL0 and EL1 is
+ CONSTRAINED UNPREDICTABLE, and one of the following must
+ happen:
+
+ The number of counters accessible is an UNKNOWN nonzero
+ value less than AP_PMCR_EL0[N].
+
+ There is no access to any counters.
+
+ For reads of AP_MDCR_EL2[HPMN] by EL2 or higher, if this field is
+ set to 0 or to a value larger than AP_PMCR_EL0[N], the processor
+ must return a CONSTRAINED UNPREDICTABLE value being one of:
+ AP_PMCR_EL0[N].
+
+ The value that was written to AP_MDCR_EL2[HPMN].
+ (The value that was written to AP_MDCR_EL2[HPMN]) modulo
+ 22^(h), where h is the smallest number of bits required
+ for a value in the range 0 to AP_PMCR_EL0[N]. */
+ uint32_t tpmcr : 1; /**< [ 5: 5](R/W) Trap AP_PMCR_EL0 accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on AP_PMCR_EL0 accesses.
+ 1 = Trap nonsecure EL0 and EL1 accesses to AP_PMCR_EL0 to EL2. */
+ uint32_t tpm : 1; /**< [ 6: 6](R/W) Trap Performance Monitors accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on Performance Monitors accesses.
+ 1 = Trap nonsecure EL0 and EL1 accesses to Performance Monitors
+ registers that are not unallocated to EL2. */
+ uint32_t hpme : 1; /**< [ 7: 7](R/W) Hypervisor Performance Monitors Enable.
+ When this bit is set to 1, the Performance Monitors counters
+ that are reserved for use from EL2 or Secure state are
+ enabled. For more information see the description of the HPMN
+ field.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = EL2 Performance Monitors disabled.
+ 1 = EL2 Performance Monitors enabled. */
+ uint32_t tde : 1; /**< [ 8: 8](R/W) Route Software debug exceptions from nonsecure EL1 and EL0 to
+ EL2. Also enables traps on all debug register accesses to EL2.
+ If AP_HCR_EL2[TGE] == 1, then this bit is ignored and treated as
+ though it is 1 other than for the value read back from
+ AP_MDCR_EL2. */
+ uint32_t tda : 1; /**< [ 9: 9](R/W) Trap debug access.
+
+ When this bit is set to 1, any valid nonsecure access to the
+ debug registers from EL1 or EL0, other than the registers
+ trapped by the TDRA and TDOSA bits, is trapped to EL2.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+ 0 = Has no effect on accesses to debug registers.
+ 1 = Trap valid nonsecure accesses to debug registers to EL2. */
+ uint32_t tdosa : 1; /**< [ 10: 10](R/W) Trap debug OS-related register access.
+ When this bit is set to 1, any access to the following
+ registers from EL1 or EL0 is trapped to EL2:
+
+ AArch32: DBGOSLAR, DBGOSLSR, DBGOSDLR, DBGPRCR.
+
+ AArch64: AP_OSLAR_EL1, AP_OSLSR_EL1, AP_OSDLR_EL1, AP_DBGPRCR_EL1.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+ 0 = Has no effect on accesses to OS-related debug registers.
+ 1 = Trap valid accesses to OS-related debug registers to EL2. */
+ uint32_t tdra : 1; /**< [ 11: 11](R/W) Trap debug ROM address register access.
+
+ When this bit is set to 1, any access to the following
+ registers from EL1 or EL0 is trapped to EL2:
+
+ AArch32: DBGDRAR, DBGDSAR.
+
+ AArch64: AP_MDRAR_EL1.
+
+ If AP_HCR_EL2[TGE] == 1 or AP_MDCR_EL2[TDE] == 1, then this bit is
+ ignored and treated as though it is 1 other than for the value
+ read back from AP_MDCR_EL2.
+
+ 0 = Has no effect on accesses to debug ROM address registers from
+ EL1 and EL0.
+ 1 = Trap valid EL1 and EL0 access to debug ROM address registers
+ to EL2. */
+ uint32_t reserved_12_16 : 5;
+ uint32_t hpmd : 1; /**< [ 17: 17](R/W) v8.1: Hyp performance monitors disable. This prohibits event counting
+ at EL2.
+ 0 = Event counting allowed at EL2.
+ 1 = Event counting prohibited at EL2, unless overridden by
+ the authentication interface.
+
+ Note: This behavior is independent of the value of the AP_HCR_EL2[E2H]
+ bit.
+ This control applies only to:
+ The counters in the range [0.[HPMN]).
+ If AP_PMCR_EL0[DP] is set to 1, AP_PMCCNTR_EL0.
+ The other event counters and, if AP_PMCR_EL0[DP] is set to 0,
+ AP_PMCCNTR_EL0 are unaffected.
+
+ On Warm reset, the field resets to 0. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_mdcr_el2_s cn9; */
+};
+typedef union bdk_ap_mdcr_el2 bdk_ap_mdcr_el2_t;
+
+#define BDK_AP_MDCR_EL2 BDK_AP_MDCR_EL2_FUNC()
+static inline uint64_t BDK_AP_MDCR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MDCR_EL2_FUNC(void)
+{
+ return 0x30401010100ll;
+}
+
+#define typedef_BDK_AP_MDCR_EL2 bdk_ap_mdcr_el2_t
+#define bustype_BDK_AP_MDCR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MDCR_EL2 "AP_MDCR_EL2"
+#define busnum_BDK_AP_MDCR_EL2 0
+#define arguments_BDK_AP_MDCR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mdcr_el3
+ *
+ * AP Monitor Debug Configuration EL3 Register
+ * Provides configuration options for the Security extensions to
+ * self-hosted debug.
+ */
+union bdk_ap_mdcr_el3
+{
+ uint32_t u;
+ struct bdk_ap_mdcr_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t epmad : 1; /**< [ 21: 21](R/W) External debugger access to Performance Monitors registers
+ disabled. This disables access to these registers by an
+ external debugger:
+ 0 = Access to Performance Monitors registers from external
+ debugger is permitted.
+ 1 = Access to Performance Monitors registers from external
+ debugger is disabled, unless overridden by authentication
+ interface. */
+ uint32_t edad : 1; /**< [ 20: 20](R/W) External debugger access to breakpoint and watchpoint
+ registers disabled. This disables access to these registers by
+ an external debugger:
+ 0 = Access to breakpoint and watchpoint registers from external
+ debugger is permitted.
+ 1 = Access to breakpoint and watchpoint registers from external
+ debugger is disabled, unless overridden by authentication
+ interface. */
+ uint32_t reserved_18_19 : 2;
+ uint32_t spme : 1; /**< [ 17: 17](R/W) Secure performance monitors enable. This allows event counting
+ in Secure state:
+ 0 = Event counting prohibited in Secure state, unless overridden
+ by the authentication interface.
+ 1 = Event counting allowed in Secure state. */
+ uint32_t sdd : 1; /**< [ 16: 16](R/W) AArch64 secure self-hosted invasive debug disable. Disables
+ Software debug exceptions in Secure state, other than Software
+ breakpoint instruction.
+ SDD only applies when both of the following are true:
+ The processor is executing in Secure state.
+ Secure EL1 is using AArch64.
+ 0 = Taking Software debug events as debug exceptions is permitted
+ from Secure EL0 and EL1, if enabled by the relevant AP_MDSCR_EL1
+ and PSTATE[D] flags.
+ 1 = Software debug events, other than software breakpoint
+ instruction debug events, are disabled from all Exception
+ levels in Secure state. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t nspb : 2; /**< [ 13: 12](R/W) Non-secure profiling buffer. Controls the owning translation regime and accesses to
+ Statistical
+ Profiling and profiling buffer control registers.
+ 0x0 = Profiling buffer uses secure virtual addresses. Statistical profiling enabled in
+ Secure state and disabled in Non-secure state. Accesses to Statistical profiling and
+ Profiling Buffer controls at EL2 and EL1 in both security states generate Trap exceptions
+ to EL3.
+ 0x1 = Profiling buffer uses secure virtual addresses. Statistical profiling enabled in
+ Secure state and disabled in Non-secure state. Accesses to Statistical profiling and
+ Profiling Buffer controls in Nonsecure state generate Trap exceptions to EL3.
+ 0x2 = Profiling buffer uses nonsecure virtual addresses. Statistical profiling enabled
+ in Non-secure state and disabled in Secure state. Accesses to Statistical Profiling and
+ Profiling Buffer controls at EL2 and EL1 in both security states generate Trap exceptions
+ to EL3.
+ 0x3 = Profiling buffer uses nonsecure virtual addresses. Statistical profiling enabled
+ in Non-secure state and disabled in secure state. Accesses to Statistical Profiling and
+ Profiling Buffer controls at Secure EL1 generate Trap exceptions to EL3.
+
+ If EL3 is not implemented and the PE executes in Non-secure state, the PE behaves as if
+ NSPB = 0x3.
+ If EL3 is not implemented and the PE executes in Secure state, the PE behaves as if NSPB
+ = 0x1. */
+ uint32_t reserved_11 : 1;
+ uint32_t tdosa : 1; /**< [ 10: 10](R/W) Trap debug OS-related register access.
+ When this bit is set to 1, any access to the following
+ registers from EL2 or below is trapped to EL3:
+
+ AArch32: DBGOSLAR, DBGOSLSR, DBGOSDLR, DBGPRCR.
+
+ AArch64: AP_OSLAR_EL1, AP_OSLSR_EL1, AP_OSDLR_EL1, AP_DBGPRCR_EL1.
+
+ 0 = Has no effect on accesses to OS-related debug registers.
+ 1 = Trap valid accesses to OS-related debug registers to EL3. */
+ uint32_t tda : 1; /**< [ 9: 9](R/W) Trap debug access.
+ When this bit is set to 1, any valid nonsecure access to the
+ debug registers from EL2 or below, other than the registers
+ trapped by the TDRA and TDOSA bits, is trapped to EL3.
+ 0 = Has no effect on accesses to debug registers.
+ 1 = Trap valid nonsecure accesses to debug registers to EL3. */
+ uint32_t reserved_7_8 : 2;
+ uint32_t tpm : 1; /**< [ 6: 6](R/W) Trap Performance Monitors accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on Performance Monitors accesses.
+ 1 = Trap nonsecure EL0, EL1 and EL2 accesses to Performance
+ Monitors registers that are not unallocated, or trapped to a
+ lower Exception level, to EL3. */
+ uint32_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_5 : 6;
+ uint32_t tpm : 1; /**< [ 6: 6](R/W) Trap Performance Monitors accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on Performance Monitors accesses.
+ 1 = Trap nonsecure EL0, EL1 and EL2 accesses to Performance
+ Monitors registers that are not unallocated, or trapped to a
+ lower Exception level, to EL3. */
+ uint32_t reserved_7_8 : 2;
+ uint32_t tda : 1; /**< [ 9: 9](R/W) Trap debug access.
+ When this bit is set to 1, any valid nonsecure access to the
+ debug registers from EL2 or below, other than the registers
+ trapped by the TDRA and TDOSA bits, is trapped to EL3.
+ 0 = Has no effect on accesses to debug registers.
+ 1 = Trap valid nonsecure accesses to debug registers to EL3. */
+ uint32_t tdosa : 1; /**< [ 10: 10](R/W) Trap debug OS-related register access.
+ When this bit is set to 1, any access to the following
+ registers from EL2 or below is trapped to EL3:
+
+ AArch32: DBGOSLAR, DBGOSLSR, DBGOSDLR, DBGPRCR.
+
+ AArch64: AP_OSLAR_EL1, AP_OSLSR_EL1, AP_OSDLR_EL1, AP_DBGPRCR_EL1.
+
+ 0 = Has no effect on accesses to OS-related debug registers.
+ 1 = Trap valid accesses to OS-related debug registers to EL3. */
+ uint32_t reserved_11 : 1;
+ uint32_t nspb : 2; /**< [ 13: 12](R/W) Non-secure profiling buffer. Controls the owning translation regime and accesses to
+ Statistical
+ Profiling and profiling buffer control registers.
+ 0x0 = Profiling buffer uses secure virtual addresses. Statistical profiling enabled in
+ Secure state and disabled in Non-secure state. Accesses to Statistical profiling and
+ Profiling Buffer controls at EL2 and EL1 in both security states generate Trap exceptions
+ to EL3.
+ 0x1 = Profiling buffer uses secure virtual addresses. Statistical profiling enabled in
+ Secure state and disabled in Non-secure state. Accesses to Statistical profiling and
+ Profiling Buffer controls in Nonsecure state generate Trap exceptions to EL3.
+ 0x2 = Profiling buffer uses nonsecure virtual addresses. Statistical profiling enabled
+ in Non-secure state and disabled in Secure state. Accesses to Statistical Profiling and
+ Profiling Buffer controls at EL2 and EL1 in both security states generate Trap exceptions
+ to EL3.
+ 0x3 = Profiling buffer uses nonsecure virtual addresses. Statistical profiling enabled
+ in Non-secure state and disabled in secure state. Accesses to Statistical Profiling and
+ Profiling Buffer controls at Secure EL1 generate Trap exceptions to EL3.
+
+ If EL3 is not implemented and the PE executes in Non-secure state, the PE behaves as if
+ NSPB = 0x3.
+ If EL3 is not implemented and the PE executes in Secure state, the PE behaves as if NSPB
+ = 0x1. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t sdd : 1; /**< [ 16: 16](R/W) AArch64 secure self-hosted invasive debug disable. Disables
+ Software debug exceptions in Secure state, other than Software
+ breakpoint instruction.
+ SDD only applies when both of the following are true:
+ The processor is executing in Secure state.
+ Secure EL1 is using AArch64.
+ 0 = Taking Software debug events as debug exceptions is permitted
+ from Secure EL0 and EL1, if enabled by the relevant AP_MDSCR_EL1
+ and PSTATE[D] flags.
+ 1 = Software debug events, other than software breakpoint
+ instruction debug events, are disabled from all Exception
+ levels in Secure state. */
+ uint32_t spme : 1; /**< [ 17: 17](R/W) Secure performance monitors enable. This allows event counting
+ in Secure state:
+ 0 = Event counting prohibited in Secure state, unless overridden
+ by the authentication interface.
+ 1 = Event counting allowed in Secure state. */
+ uint32_t reserved_18_19 : 2;
+ uint32_t edad : 1; /**< [ 20: 20](R/W) External debugger access to breakpoint and watchpoint
+ registers disabled. This disables access to these registers by
+ an external debugger:
+ 0 = Access to breakpoint and watchpoint registers from external
+ debugger is permitted.
+ 1 = Access to breakpoint and watchpoint registers from external
+ debugger is disabled, unless overridden by authentication
+ interface. */
+ uint32_t epmad : 1; /**< [ 21: 21](R/W) External debugger access to Performance Monitors registers
+ disabled. This disables access to these registers by an
+ external debugger:
+ 0 = Access to Performance Monitors registers from external
+ debugger is permitted.
+ 1 = Access to Performance Monitors registers from external
+ debugger is disabled, unless overridden by authentication
+ interface. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_mdcr_el3_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t epmad : 1; /**< [ 21: 21](R/W) External debugger access to Performance Monitors registers
+ disabled. This disables access to these registers by an
+ external debugger:
+ 0 = Access to Performance Monitors registers from external
+ debugger is permitted.
+ 1 = Access to Performance Monitors registers from external
+ debugger is disabled, unless overridden by authentication
+ interface. */
+ uint32_t edad : 1; /**< [ 20: 20](R/W) External debugger access to breakpoint and watchpoint
+ registers disabled. This disables access to these registers by
+ an external debugger:
+ 0 = Access to breakpoint and watchpoint registers from external
+ debugger is permitted.
+ 1 = Access to breakpoint and watchpoint registers from external
+ debugger is disabled, unless overridden by authentication
+ interface. */
+ uint32_t reserved_18_19 : 2;
+ uint32_t spme : 1; /**< [ 17: 17](R/W) Secure performance monitors enable. This allows event counting
+ in Secure state:
+ 0 = Event counting prohibited in Secure state, unless overridden
+ by the authentication interface.
+ 1 = Event counting allowed in Secure state. */
+ uint32_t sdd : 1; /**< [ 16: 16](R/W) AArch64 secure self-hosted invasive debug disable. Disables
+ Software debug exceptions in Secure state, other than Software
+ breakpoint instruction.
+ SDD only applies when both of the following are true:
+ The processor is executing in Secure state.
+ Secure EL1 is using AArch64.
+ 0 = Taking Software debug events as debug exceptions is permitted
+ from Secure EL0 and EL1, if enabled by the relevant AP_MDSCR_EL1
+ and PSTATE[D] flags.
+ 1 = Software debug events, other than software breakpoint
+ instruction debug events, are disabled from all Exception
+ levels in Secure state. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t reserved_11_13 : 3;
+ uint32_t tdosa : 1; /**< [ 10: 10](R/W) Trap debug OS-related register access.
+ When this bit is set to 1, any access to the following
+ registers from EL2 or below is trapped to EL3:
+
+ AArch32: DBGOSLAR, DBGOSLSR, DBGOSDLR, DBGPRCR.
+
+ AArch64: AP_OSLAR_EL1, AP_OSLSR_EL1, AP_OSDLR_EL1, AP_DBGPRCR_EL1.
+
+ 0 = Has no effect on accesses to OS-related debug registers.
+ 1 = Trap valid accesses to OS-related debug registers to EL3. */
+ uint32_t tda : 1; /**< [ 9: 9](R/W) Trap debug access.
+ When this bit is set to 1, any valid nonsecure access to the
+ debug registers from EL2 or below, other than the registers
+ trapped by the TDRA and TDOSA bits, is trapped to EL3.
+ 0 = Has no effect on accesses to debug registers.
+ 1 = Trap valid nonsecure accesses to debug registers to EL3. */
+ uint32_t reserved_7_8 : 2;
+ uint32_t tpm : 1; /**< [ 6: 6](R/W) Trap Performance Monitors accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on Performance Monitors accesses.
+ 1 = Trap nonsecure EL0, EL1 and EL2 accesses to Performance
+ Monitors registers that are not unallocated, or trapped to a
+ lower Exception level, to EL3. */
+ uint32_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_5 : 6;
+ uint32_t tpm : 1; /**< [ 6: 6](R/W) Trap Performance Monitors accesses.
+ If the Performance Monitors extension is not implemented, this
+ field is RES0.
+ 0 = Has no effect on Performance Monitors accesses.
+ 1 = Trap nonsecure EL0, EL1 and EL2 accesses to Performance
+ Monitors registers that are not unallocated, or trapped to a
+ lower Exception level, to EL3. */
+ uint32_t reserved_7_8 : 2;
+ uint32_t tda : 1; /**< [ 9: 9](R/W) Trap debug access.
+ When this bit is set to 1, any valid nonsecure access to the
+ debug registers from EL2 or below, other than the registers
+ trapped by the TDRA and TDOSA bits, is trapped to EL3.
+ 0 = Has no effect on accesses to debug registers.
+ 1 = Trap valid nonsecure accesses to debug registers to EL3. */
+ uint32_t tdosa : 1; /**< [ 10: 10](R/W) Trap debug OS-related register access.
+ When this bit is set to 1, any access to the following
+ registers from EL2 or below is trapped to EL3:
+
+ AArch32: DBGOSLAR, DBGOSLSR, DBGOSDLR, DBGPRCR.
+
+ AArch64: AP_OSLAR_EL1, AP_OSLSR_EL1, AP_OSDLR_EL1, AP_DBGPRCR_EL1.
+
+ 0 = Has no effect on accesses to OS-related debug registers.
+ 1 = Trap valid accesses to OS-related debug registers to EL3. */
+ uint32_t reserved_11_13 : 3;
+ uint32_t reserved_14_15 : 2;
+ uint32_t sdd : 1; /**< [ 16: 16](R/W) AArch64 secure self-hosted invasive debug disable. Disables
+ Software debug exceptions in Secure state, other than Software
+ breakpoint instruction.
+ SDD only applies when both of the following are true:
+ The processor is executing in Secure state.
+ Secure EL1 is using AArch64.
+ 0 = Taking Software debug events as debug exceptions is permitted
+ from Secure EL0 and EL1, if enabled by the relevant AP_MDSCR_EL1
+ and PSTATE[D] flags.
+ 1 = Software debug events, other than software breakpoint
+ instruction debug events, are disabled from all Exception
+ levels in Secure state. */
+ uint32_t spme : 1; /**< [ 17: 17](R/W) Secure performance monitors enable. This allows event counting
+ in Secure state:
+ 0 = Event counting prohibited in Secure state, unless overridden
+ by the authentication interface.
+ 1 = Event counting allowed in Secure state. */
+ uint32_t reserved_18_19 : 2;
+ uint32_t edad : 1; /**< [ 20: 20](R/W) External debugger access to breakpoint and watchpoint
+ registers disabled. This disables access to these registers by
+ an external debugger:
+ 0 = Access to breakpoint and watchpoint registers from external
+ debugger is permitted.
+ 1 = Access to breakpoint and watchpoint registers from external
+ debugger is disabled, unless overridden by authentication
+ interface. */
+ uint32_t epmad : 1; /**< [ 21: 21](R/W) External debugger access to Performance Monitors registers
+ disabled. This disables access to these registers by an
+ external debugger:
+ 0 = Access to Performance Monitors registers from external
+ debugger is permitted.
+ 1 = Access to Performance Monitors registers from external
+ debugger is disabled, unless overridden by authentication
+ interface. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_mdcr_el3_s cn9; */
+};
+typedef union bdk_ap_mdcr_el3 bdk_ap_mdcr_el3_t;
+
+#define BDK_AP_MDCR_EL3 BDK_AP_MDCR_EL3_FUNC()
+static inline uint64_t BDK_AP_MDCR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MDCR_EL3_FUNC(void)
+{
+ return 0x30601030100ll;
+}
+
+#define typedef_BDK_AP_MDCR_EL3 bdk_ap_mdcr_el3_t
+#define bustype_BDK_AP_MDCR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MDCR_EL3 "AP_MDCR_EL3"
+#define busnum_BDK_AP_MDCR_EL3 0
+#define arguments_BDK_AP_MDCR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mdrar_el1
+ *
+ * AP Monitor Debug ROM Address Register
+ * Defines the base physical address of a 4KB-aligned memory-
+ * mapped debug component, usually a ROM table that locates and
+ * describes the memory-mapped debug components in the system.
+ */
+union bdk_ap_mdrar_el1
+{
+ uint64_t u;
+ struct bdk_ap_mdrar_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t romaddr : 36; /**< [ 47: 12](RO) Bits[P-1:12] of the ROM table physical address, where P is the
+ physical address size in bits (up to 48 bits) as stored in
+ AP_ID_AA64MMFR0_EL1. If P is less than 48, bits[47:P] of this
+ register are RES0.
+
+ Bits [11:0] of the ROM table physical address are zero.
+
+ If EL3 is implemented, ROMADDR is an address in nonsecure
+ memory. Whether the ROM table is also accessible in Secure
+ memory is implementation defined. */
+ uint64_t reserved_2_11 : 10;
+ uint64_t valid : 2; /**< [ 1: 0](RO) This field indicates whether the ROM Table address is valid.
+
+ 0x0 = ROM Table address is not valid
+ 0x3 = ROM Table address is valid. */
+#else /* Word 0 - Little Endian */
+ uint64_t valid : 2; /**< [ 1: 0](RO) This field indicates whether the ROM Table address is valid.
+
+ 0x0 = ROM Table address is not valid
+ 0x3 = ROM Table address is valid. */
+ uint64_t reserved_2_11 : 10;
+ uint64_t romaddr : 36; /**< [ 47: 12](RO) Bits[P-1:12] of the ROM table physical address, where P is the
+ physical address size in bits (up to 48 bits) as stored in
+ AP_ID_AA64MMFR0_EL1. If P is less than 48, bits[47:P] of this
+ register are RES0.
+
+ Bits [11:0] of the ROM table physical address are zero.
+
+ If EL3 is implemented, ROMADDR is an address in nonsecure
+ memory. Whether the ROM table is also accessible in Secure
+ memory is implementation defined. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_mdrar_el1_s cn; */
+};
+typedef union bdk_ap_mdrar_el1 bdk_ap_mdrar_el1_t;
+
+#define BDK_AP_MDRAR_EL1 BDK_AP_MDRAR_EL1_FUNC()
+static inline uint64_t BDK_AP_MDRAR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MDRAR_EL1_FUNC(void)
+{
+ return 0x20001000000ll;
+}
+
+#define typedef_BDK_AP_MDRAR_EL1 bdk_ap_mdrar_el1_t
+#define bustype_BDK_AP_MDRAR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MDRAR_EL1 "AP_MDRAR_EL1"
+#define busnum_BDK_AP_MDRAR_EL1 0
+#define arguments_BDK_AP_MDRAR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mdscr_el1
+ *
+ * AP Monitor Debug System Control Register
+ * Main control register for the debug implementation.
+ */
+union bdk_ap_mdscr_el1
+{
+ uint32_t u;
+ struct bdk_ap_mdscr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t rxfull : 1; /**< [ 30: 30](R/W) Used for save/restore of EDSCR[RXfull].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO, and software must treat it as UNK/SBZP.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t txfull : 1; /**< [ 29: 29](R/W) Used for save/restore of EDSCR[TXfull].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO, and software must treat it as UNK/SBZP.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_28 : 1;
+ uint32_t rxo : 1; /**< [ 27: 27](R/W) Used for save/restore of EDSCR[RXO].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t txu : 1; /**< [ 26: 26](R/W) Used for save/restore of EDSCR[TXU].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_24_25 : 2;
+ uint32_t intdis : 2; /**< [ 23: 22](R/W) Used for save/restore of EDSCR[INTdis].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this field
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this field
+ is RW. */
+ uint32_t tda : 1; /**< [ 21: 21](R/W) Used for save/restore of EDSCR[TDA].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_20 : 1;
+ uint32_t sc2 : 1; /**< [ 19: 19](R/W) Used for save/restore of EDSCR[SC2].
+
+ When AP_OSLSR_EL1[OSLK] = 0 (the OS lock is unlocked), this bit is
+ read-only. Software must treat it as unknown and use an SBZP policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] = 1 (the OS lock is locked), this bit is R/W. */
+ uint32_t reserved_16_18 : 3;
+ uint32_t mde : 1; /**< [ 15: 15](R/W) Monitor debug events. Enable Breakpoint, Watchpoint, and
+ Vector catch debug exceptions.
+ 0 = Breakpoint, Watchpoint, and Vector catch debug exceptions
+ disabled.
+ 1 = Breakpoint, Watchpoint, and Vector catch debug exceptions
+ enabled. */
+ uint32_t hde : 1; /**< [ 14: 14](R/W) Used for save/restore of EDSCR[HDE].
+
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t kde : 1; /**< [ 13: 13](R/W) Local (kernel) debug enable. If EL{d} is using
+ AArch64, enable Software debug events within EL{d}.
+
+ RES0 if EL{d} is using AArch32.
+ 0 = Software debug events, other than Software breakpoint
+ instructions, disabled within EL{d}.
+ 1 = Software debug events enabled within EL{d}. */
+ uint32_t tdcc : 1; /**< [ 12: 12](R/W) Trap Debug Communications Channel access. When set, any EL0
+ access to the following registers is trapped to EL1:
+
+ AArch32: DBGDIDR, DBGDRAR, DBGDSAR, DBGDSCRint, DBGDTRTXint,
+ DBGDTRRXint.
+
+ AArch64: AP_MDCCSR_EL0, AP_DBGDTR_EL0, AP_DBGDTRTX_EL0, AP_DBGDTRRX_EL0. */
+ uint32_t reserved_7_11 : 5;
+ uint32_t err : 1; /**< [ 6: 6](R/W) Used for save/restore of EDSCR[ERR].
+
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t ss : 1; /**< [ 0: 0](R/W) Software step control bit. If EL{d} is using AArch64,
+ enable Software step.
+ RES0 if EL{d} is using AArch32.
+ 0 = Software step disabled
+ 1 = Software step enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t ss : 1; /**< [ 0: 0](R/W) Software step control bit. If EL{d} is using AArch64,
+ enable Software step.
+ RES0 if EL{d} is using AArch32.
+ 0 = Software step disabled
+ 1 = Software step enabled. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t err : 1; /**< [ 6: 6](R/W) Used for save/restore of EDSCR[ERR].
+
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_7_11 : 5;
+ uint32_t tdcc : 1; /**< [ 12: 12](R/W) Trap Debug Communications Channel access. When set, any EL0
+ access to the following registers is trapped to EL1:
+
+ AArch32: DBGDIDR, DBGDRAR, DBGDSAR, DBGDSCRint, DBGDTRTXint,
+ DBGDTRRXint.
+
+ AArch64: AP_MDCCSR_EL0, AP_DBGDTR_EL0, AP_DBGDTRTX_EL0, AP_DBGDTRRX_EL0. */
+ uint32_t kde : 1; /**< [ 13: 13](R/W) Local (kernel) debug enable. If EL{d} is using
+ AArch64, enable Software debug events within EL{d}.
+
+ RES0 if EL{d} is using AArch32.
+ 0 = Software debug events, other than Software breakpoint
+ instructions, disabled within EL{d}.
+ 1 = Software debug events enabled within EL{d}. */
+ uint32_t hde : 1; /**< [ 14: 14](R/W) Used for save/restore of EDSCR[HDE].
+
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t mde : 1; /**< [ 15: 15](R/W) Monitor debug events. Enable Breakpoint, Watchpoint, and
+ Vector catch debug exceptions.
+ 0 = Breakpoint, Watchpoint, and Vector catch debug exceptions
+ disabled.
+ 1 = Breakpoint, Watchpoint, and Vector catch debug exceptions
+ enabled. */
+ uint32_t reserved_16_18 : 3;
+ uint32_t sc2 : 1; /**< [ 19: 19](R/W) Used for save/restore of EDSCR[SC2].
+
+ When AP_OSLSR_EL1[OSLK] = 0 (the OS lock is unlocked), this bit is
+ read-only. Software must treat it as unknown and use an SBZP policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] = 1 (the OS lock is locked), this bit is R/W. */
+ uint32_t reserved_20 : 1;
+ uint32_t tda : 1; /**< [ 21: 21](R/W) Used for save/restore of EDSCR[TDA].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t intdis : 2; /**< [ 23: 22](R/W) Used for save/restore of EDSCR[INTdis].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this field
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this field
+ is RW. */
+ uint32_t reserved_24_25 : 2;
+ uint32_t txu : 1; /**< [ 26: 26](R/W) Used for save/restore of EDSCR[TXU].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t rxo : 1; /**< [ 27: 27](R/W) Used for save/restore of EDSCR[RXO].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_28 : 1;
+ uint32_t txfull : 1; /**< [ 29: 29](R/W) Used for save/restore of EDSCR[TXfull].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO, and software must treat it as UNK/SBZP.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t rxfull : 1; /**< [ 30: 30](R/W) Used for save/restore of EDSCR[RXfull].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO, and software must treat it as UNK/SBZP.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_mdscr_el1_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t rxfull : 1; /**< [ 30: 30](R/W) Used for save/restore of EDSCR[RXfull].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO, and software must treat it as UNK/SBZP.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t txfull : 1; /**< [ 29: 29](R/W) Used for save/restore of EDSCR[TXfull].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO, and software must treat it as UNK/SBZP.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_28 : 1;
+ uint32_t rxo : 1; /**< [ 27: 27](R/W) Used for save/restore of EDSCR[RXO].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t txu : 1; /**< [ 26: 26](R/W) Used for save/restore of EDSCR[TXU].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_24_25 : 2;
+ uint32_t intdis : 2; /**< [ 23: 22](R/W) Used for save/restore of EDSCR[INTdis].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this field
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this field
+ is RW. */
+ uint32_t tda : 1; /**< [ 21: 21](R/W) Used for save/restore of EDSCR[TDA].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_20 : 1;
+ uint32_t reserved_19 : 1;
+ uint32_t reserved_16_18 : 3;
+ uint32_t mde : 1; /**< [ 15: 15](R/W) Monitor debug events. Enable Breakpoint, Watchpoint, and
+ Vector catch debug exceptions.
+ 0 = Breakpoint, Watchpoint, and Vector catch debug exceptions
+ disabled.
+ 1 = Breakpoint, Watchpoint, and Vector catch debug exceptions
+ enabled. */
+ uint32_t hde : 1; /**< [ 14: 14](R/W) Used for save/restore of EDSCR[HDE].
+
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t kde : 1; /**< [ 13: 13](R/W) Local (kernel) debug enable. If EL{d} is using
+ AArch64, enable Software debug events within EL{d}.
+
+ RES0 if EL{d} is using AArch32.
+ 0 = Software debug events, other than Software breakpoint
+ instructions, disabled within EL{d}.
+ 1 = Software debug events enabled within EL{d}. */
+ uint32_t tdcc : 1; /**< [ 12: 12](R/W) Trap Debug Communications Channel access. When set, any EL0
+ access to the following registers is trapped to EL1:
+
+ AArch32: DBGDIDR, DBGDRAR, DBGDSAR, DBGDSCRint, DBGDTRTXint,
+ DBGDTRRXint.
+
+ AArch64: AP_MDCCSR_EL0, AP_DBGDTR_EL0, AP_DBGDTRTX_EL0, AP_DBGDTRRX_EL0. */
+ uint32_t reserved_7_11 : 5;
+ uint32_t err : 1; /**< [ 6: 6](R/W) Used for save/restore of EDSCR[ERR].
+
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t ss : 1; /**< [ 0: 0](R/W) Software step control bit. If EL{d} is using AArch64,
+ enable Software step.
+ RES0 if EL{d} is using AArch32.
+ 0 = Software step disabled
+ 1 = Software step enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t ss : 1; /**< [ 0: 0](R/W) Software step control bit. If EL{d} is using AArch64,
+ enable Software step.
+ RES0 if EL{d} is using AArch32.
+ 0 = Software step disabled
+ 1 = Software step enabled. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t err : 1; /**< [ 6: 6](R/W) Used for save/restore of EDSCR[ERR].
+
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_7_11 : 5;
+ uint32_t tdcc : 1; /**< [ 12: 12](R/W) Trap Debug Communications Channel access. When set, any EL0
+ access to the following registers is trapped to EL1:
+
+ AArch32: DBGDIDR, DBGDRAR, DBGDSAR, DBGDSCRint, DBGDTRTXint,
+ DBGDTRRXint.
+
+ AArch64: AP_MDCCSR_EL0, AP_DBGDTR_EL0, AP_DBGDTRTX_EL0, AP_DBGDTRRX_EL0. */
+ uint32_t kde : 1; /**< [ 13: 13](R/W) Local (kernel) debug enable. If EL{d} is using
+ AArch64, enable Software debug events within EL{d}.
+
+ RES0 if EL{d} is using AArch32.
+ 0 = Software debug events, other than Software breakpoint
+ instructions, disabled within EL{d}.
+ 1 = Software debug events enabled within EL{d}. */
+ uint32_t hde : 1; /**< [ 14: 14](R/W) Used for save/restore of EDSCR[HDE].
+
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t mde : 1; /**< [ 15: 15](R/W) Monitor debug events. Enable Breakpoint, Watchpoint, and
+ Vector catch debug exceptions.
+ 0 = Breakpoint, Watchpoint, and Vector catch debug exceptions
+ disabled.
+ 1 = Breakpoint, Watchpoint, and Vector catch debug exceptions
+ enabled. */
+ uint32_t reserved_16_18 : 3;
+ uint32_t reserved_19 : 1;
+ uint32_t reserved_20 : 1;
+ uint32_t tda : 1; /**< [ 21: 21](R/W) Used for save/restore of EDSCR[TDA].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t intdis : 2; /**< [ 23: 22](R/W) Used for save/restore of EDSCR[INTdis].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this field
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this field
+ is RW. */
+ uint32_t reserved_24_25 : 2;
+ uint32_t txu : 1; /**< [ 26: 26](R/W) Used for save/restore of EDSCR[TXU].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t rxo : 1; /**< [ 27: 27](R/W) Used for save/restore of EDSCR[RXO].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO. Software must treat it as UNKNOWN and use an SBZP
+ policy for writes.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_28 : 1;
+ uint32_t txfull : 1; /**< [ 29: 29](R/W) Used for save/restore of EDSCR[TXfull].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO, and software must treat it as UNK/SBZP.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t rxfull : 1; /**< [ 30: 30](R/W) Used for save/restore of EDSCR[RXfull].
+ When AP_OSLSR_EL1[OSLK] == 0 (the OS lock is unlocked), this bit
+ is RO, and software must treat it as UNK/SBZP.
+
+ When AP_OSLSR_EL1[OSLK] == 1 (the OS lock is locked), this bit is
+ RW. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_ap_mdscr_el1_s cn9; */
+ /* struct bdk_ap_mdscr_el1_s cn81xx; */
+ /* struct bdk_ap_mdscr_el1_s cn83xx; */
+ /* struct bdk_ap_mdscr_el1_s cn88xxp2; */
+};
+typedef union bdk_ap_mdscr_el1 bdk_ap_mdscr_el1_t;
+
+#define BDK_AP_MDSCR_EL1 BDK_AP_MDSCR_EL1_FUNC()
+static inline uint64_t BDK_AP_MDSCR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MDSCR_EL1_FUNC(void)
+{
+ return 0x20000020200ll;
+}
+
+#define typedef_BDK_AP_MDSCR_EL1 bdk_ap_mdscr_el1_t
+#define bustype_BDK_AP_MDSCR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MDSCR_EL1 "AP_MDSCR_EL1"
+#define busnum_BDK_AP_MDSCR_EL1 0
+#define arguments_BDK_AP_MDSCR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_midr_el1
+ *
+ * AP Main ID Register
+ * This register provides identification information for the PE, including an
+ * implementer code for the device and a device ID number.
+ */
+union bdk_ap_midr_el1
+{
+ uint32_t u;
+ struct bdk_ap_midr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t implementer : 8; /**< [ 31: 24](RO) Implementer code that has been assigned by ARM. Assigned codes include the
+ following:
+ 0x41 = 'A' = ARM Limited.
+ 0x42 = 'B' = Broadcom Corporation.
+ 0x43 = 'C' = Cavium Inc.
+ 0x44 = 'D' = Digital Equipment Corporation.
+ 0x49 = 'I' = Infineon Technologies AG.
+ 0x4D = 'M' = Motorola or Freescale Semiconductor Inc.
+ 0x4E = 'N' = NVIDIA Corporation.
+ 0x50 = 'P' = Applied Micro Circuits Corporation.
+ 0x51 = 'Q' = Qualcomm Inc.
+ 0x56 = 'V' = Marvell International Ltd.
+ 0x69 = 'i' = Intel Corporation.
+
+ For CNXXXX, 'C'. */
+ uint32_t variant : 4; /**< [ 23: 20](RO) An implementation defined variant number. Typically, this
+ field is used to distinguish between different product
+ variants, or major revisions of a product.
+
+ For CNXXXX this is the major revision field.
+ See MIO_FUS_DAT2[CHIP_ID] bits 21..19 for more information. */
+ uint32_t architecture : 4; /**< [ 19: 16](RO) 0x1 = ARMv4.
+ 0x2 = ARMv4T.
+ 0x3 = ARMv5 (obsolete).
+ 0x4 = ARMv5T.
+ 0x5 = ARMv5TE.
+ 0x6 = ARMv5TEJ.
+ 0x7 = ARMv6.
+ 0xF = Defined by CPUID scheme.
+
+ For CNXXXX, CPUID scheme. */
+ uint32_t partnum : 12; /**< [ 15: 4](RO) An implementation defined primary part number for the device.
+ On processors implemented by ARM, if the top four bits of the
+ primary part number are 0x00x7.
+ Processors implemented by ARM have an Implementer code of 0x41.
+
+ For CNXXXX, the chip ID. Enumerated by PCC_PROD_E. */
+ uint32_t revision : 4; /**< [ 3: 0](RO) An implementation defined revision number for the device.
+
+ For CNXXXX this is the minor revision field.
+ See MIO_FUS_DAT2[CHIP_ID] bits 18..16 for more information. */
+#else /* Word 0 - Little Endian */
+ uint32_t revision : 4; /**< [ 3: 0](RO) An implementation defined revision number for the device.
+
+ For CNXXXX this is the minor revision field.
+ See MIO_FUS_DAT2[CHIP_ID] bits 18..16 for more information. */
+ uint32_t partnum : 12; /**< [ 15: 4](RO) An implementation defined primary part number for the device.
+ On processors implemented by ARM, if the top four bits of the
+ primary part number are 0x00x7.
+ Processors implemented by ARM have an Implementer code of 0x41.
+
+ For CNXXXX, the chip ID. Enumerated by PCC_PROD_E. */
+ uint32_t architecture : 4; /**< [ 19: 16](RO) 0x1 = ARMv4.
+ 0x2 = ARMv4T.
+ 0x3 = ARMv5 (obsolete).
+ 0x4 = ARMv5T.
+ 0x5 = ARMv5TE.
+ 0x6 = ARMv5TEJ.
+ 0x7 = ARMv6.
+ 0xF = Defined by CPUID scheme.
+
+ For CNXXXX, CPUID scheme. */
+ uint32_t variant : 4; /**< [ 23: 20](RO) An implementation defined variant number. Typically, this
+ field is used to distinguish between different product
+ variants, or major revisions of a product.
+
+ For CNXXXX this is the major revision field.
+ See MIO_FUS_DAT2[CHIP_ID] bits 21..19 for more information. */
+ uint32_t implementer : 8; /**< [ 31: 24](RO) Implementer code that has been assigned by ARM. Assigned codes include the
+ following:
+ 0x41 = 'A' = ARM Limited.
+ 0x42 = 'B' = Broadcom Corporation.
+ 0x43 = 'C' = Cavium Inc.
+ 0x44 = 'D' = Digital Equipment Corporation.
+ 0x49 = 'I' = Infineon Technologies AG.
+ 0x4D = 'M' = Motorola or Freescale Semiconductor Inc.
+ 0x4E = 'N' = NVIDIA Corporation.
+ 0x50 = 'P' = Applied Micro Circuits Corporation.
+ 0x51 = 'Q' = Qualcomm Inc.
+ 0x56 = 'V' = Marvell International Ltd.
+ 0x69 = 'i' = Intel Corporation.
+
+ For CNXXXX, 'C'. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_midr_el1_s cn; */
+};
+typedef union bdk_ap_midr_el1 bdk_ap_midr_el1_t;
+
+#define BDK_AP_MIDR_EL1 BDK_AP_MIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_MIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MIDR_EL1_FUNC(void)
+{
+ return 0x30000000000ll;
+}
+
+#define typedef_BDK_AP_MIDR_EL1 bdk_ap_midr_el1_t
+#define bustype_BDK_AP_MIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MIDR_EL1 "AP_MIDR_EL1"
+#define busnum_BDK_AP_MIDR_EL1 0
+#define arguments_BDK_AP_MIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mpidr_el1
+ *
+ * AP Multiprocessor Affinity Register
+ * This register in a multiprocessor system provides an additional PE identification
+ * mechanism for scheduling purposes, and indicates whether the implementation includes
+ * the multiprocessing extensions.
+ */
+union bdk_ap_mpidr_el1
+{
+ uint64_t u;
+ struct bdk_ap_mpidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t aff3 : 8; /**< [ 39: 32](RO) Affinity level 3. Highest level affinity field.
+
+ Always zero on CNXXXX. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint64_t u : 1; /**< [ 30: 30](RO) Indicates a uniprocessor system, as distinct from PE 0 in a
+ multiprocessor system.
+ 0 = Processor is part of a multiprocessor system.
+ 1 = Processor is part of a uniprocessor system. */
+ uint64_t reserved_25_29 : 5;
+ uint64_t mt : 1; /**< [ 24: 24](RO) Indicates whether the lowest level of affinity consists of
+ logical PEs that are implemented using a multi-threading type
+ approach.
+ 0 = Performance of PEs at the lowest affinity level is largely
+ independent.
+ 1 = Performance of PEs at the lowest affinity level is very
+ interdependent. */
+ uint64_t aff2 : 8; /**< [ 23: 16](RO) Affinity level 2. Second highest level affinity field.
+
+ For CNXXXX, the socket number. */
+ uint64_t aff1 : 8; /**< [ 15: 8](RO) Affinity level 1. Third highest level affinity field.
+
+ For CNXXXX the processor number upper 2 bits. */
+ uint64_t aff0 : 8; /**< [ 7: 0](RO) Affinity level 0. Lowest level affinity field.
+ WARNING: The GIC register ICC_SGI{0,1}R_EL1 limits this
+ to 0-15 as its a 16 bit mask.
+
+ For CNXXXX the processor number lower 4 bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t aff0 : 8; /**< [ 7: 0](RO) Affinity level 0. Lowest level affinity field.
+ WARNING: The GIC register ICC_SGI{0,1}R_EL1 limits this
+ to 0-15 as its a 16 bit mask.
+
+ For CNXXXX the processor number lower 4 bits. */
+ uint64_t aff1 : 8; /**< [ 15: 8](RO) Affinity level 1. Third highest level affinity field.
+
+ For CNXXXX the processor number upper 2 bits. */
+ uint64_t aff2 : 8; /**< [ 23: 16](RO) Affinity level 2. Second highest level affinity field.
+
+ For CNXXXX, the socket number. */
+ uint64_t mt : 1; /**< [ 24: 24](RO) Indicates whether the lowest level of affinity consists of
+ logical PEs that are implemented using a multi-threading type
+ approach.
+ 0 = Performance of PEs at the lowest affinity level is largely
+ independent.
+ 1 = Performance of PEs at the lowest affinity level is very
+ interdependent. */
+ uint64_t reserved_25_29 : 5;
+ uint64_t u : 1; /**< [ 30: 30](RO) Indicates a uniprocessor system, as distinct from PE 0 in a
+ multiprocessor system.
+ 0 = Processor is part of a multiprocessor system.
+ 1 = Processor is part of a uniprocessor system. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint64_t aff3 : 8; /**< [ 39: 32](RO) Affinity level 3. Highest level affinity field.
+
+ Always zero on CNXXXX. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_mpidr_el1_s cn8; */
+ struct bdk_ap_mpidr_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t aff3 : 8; /**< [ 39: 32](RO) Affinity level 3. Highest level affinity field.
+
+ Always zero on CNXXXX. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint64_t u : 1; /**< [ 30: 30](RO) Indicates a uniprocessor system, as distinct from PE 0 in a
+ multiprocessor system.
+ 0 = Processor is part of a multiprocessor system.
+ 1 = Processor is part of a uniprocessor system. */
+ uint64_t reserved_25_29 : 5;
+ uint64_t mt : 1; /**< [ 24: 24](RO) Indicates whether the lowest level of affinity consists of
+ logical PEs that are implemented using a multi-threading type
+ approach.
+ 0 = Performance of PEs at the lowest affinity level is largely
+ independent.
+ 1 = Performance of PEs at the lowest affinity level is very
+ interdependent. */
+ uint64_t aff2 : 8; /**< [ 23: 16](RO) Affinity level 2. Second highest level affinity field.
+
+ For CNXXXX, the socket number. */
+ uint64_t aff1 : 8; /**< [ 15: 8](RO) Affinity level 1. Third highest level affinity field.
+
+ For CN93XX the processor number lower 2 bits. */
+ uint64_t aff0 : 8; /**< [ 7: 0](RO) Affinity level 0. Lowest level affinity field.
+ WARNING: The GIC register ICC_SGI{0,1}R_EL1 limits this
+ to 0-15 as its a 16 bit mask.
+
+ For CN93XX the processor number upper 3 bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t aff0 : 8; /**< [ 7: 0](RO) Affinity level 0. Lowest level affinity field.
+ WARNING: The GIC register ICC_SGI{0,1}R_EL1 limits this
+ to 0-15 as its a 16 bit mask.
+
+ For CN93XX the processor number upper 3 bits. */
+ uint64_t aff1 : 8; /**< [ 15: 8](RO) Affinity level 1. Third highest level affinity field.
+
+ For CN93XX the processor number lower 2 bits. */
+ uint64_t aff2 : 8; /**< [ 23: 16](RO) Affinity level 2. Second highest level affinity field.
+
+ For CNXXXX, the socket number. */
+ uint64_t mt : 1; /**< [ 24: 24](RO) Indicates whether the lowest level of affinity consists of
+ logical PEs that are implemented using a multi-threading type
+ approach.
+ 0 = Performance of PEs at the lowest affinity level is largely
+ independent.
+ 1 = Performance of PEs at the lowest affinity level is very
+ interdependent. */
+ uint64_t reserved_25_29 : 5;
+ uint64_t u : 1; /**< [ 30: 30](RO) Indicates a uniprocessor system, as distinct from PE 0 in a
+ multiprocessor system.
+ 0 = Processor is part of a multiprocessor system.
+ 1 = Processor is part of a uniprocessor system. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint64_t aff3 : 8; /**< [ 39: 32](RO) Affinity level 3. Highest level affinity field.
+
+ Always zero on CNXXXX. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_mpidr_el1 bdk_ap_mpidr_el1_t;
+
+#define BDK_AP_MPIDR_EL1 BDK_AP_MPIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_MPIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MPIDR_EL1_FUNC(void)
+{
+ return 0x30000000500ll;
+}
+
+#define typedef_BDK_AP_MPIDR_EL1 bdk_ap_mpidr_el1_t
+#define bustype_BDK_AP_MPIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MPIDR_EL1 "AP_MPIDR_EL1"
+#define busnum_BDK_AP_MPIDR_EL1 0
+#define arguments_BDK_AP_MPIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_mvfr#_el1
+ *
+ * AP ARM32 Media and VFP Feature Register
+ * Describes the features provided by the Advanced SIMD and Floating-point Extensions.
+ */
+union bdk_ap_mvfrx_el1
+{
+ uint32_t u;
+ struct bdk_ap_mvfrx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_mvfrx_el1_s cn; */
+};
+typedef union bdk_ap_mvfrx_el1 bdk_ap_mvfrx_el1_t;
+
+static inline uint64_t BDK_AP_MVFRX_EL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_MVFRX_EL1(unsigned long a)
+{
+ if (a<=2)
+ return 0x30000030000ll + 0x100ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_MVFRX_EL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_MVFRX_EL1(a) bdk_ap_mvfrx_el1_t
+#define bustype_BDK_AP_MVFRX_EL1(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_MVFRX_EL1(a) "AP_MVFRX_EL1"
+#define busnum_BDK_AP_MVFRX_EL1(a) (a)
+#define arguments_BDK_AP_MVFRX_EL1(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_nzcv
+ *
+ * AP Condition Flags Register
+ * Allows access to the condition flags.
+ */
+union bdk_ap_nzcv
+{
+ uint32_t u;
+ struct bdk_ap_nzcv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Negative condition flag. Set to bit[31] of the result of the
+ last flag-setting instruction. If the result is regarded as a
+ two's complement signed integer, then the processor sets N to
+ 1 if the result was negative, and sets N to 0 if it was
+ positive or zero. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Zero condition flag. Set to 1 if the result of the last flag-
+ setting instruction was zero, and to 0 otherwise. A result of
+ zero often indicates an equal result from a comparison. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Carry condition flag. Set to 1 if the last flag-setting
+ instruction resulted in a carry condition, for example an
+ unsigned overflow on an addition. */
+ uint32_t v : 1; /**< [ 28: 28](R/W) Overflow condition flag. Set to 1 if the last flag-setting
+ instruction resulted in an overflow condition, for example a
+ signed overflow on an addition. */
+ uint32_t reserved_0_27 : 28;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_27 : 28;
+ uint32_t v : 1; /**< [ 28: 28](R/W) Overflow condition flag. Set to 1 if the last flag-setting
+ instruction resulted in an overflow condition, for example a
+ signed overflow on an addition. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Carry condition flag. Set to 1 if the last flag-setting
+ instruction resulted in a carry condition, for example an
+ unsigned overflow on an addition. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Zero condition flag. Set to 1 if the result of the last flag-
+ setting instruction was zero, and to 0 otherwise. A result of
+ zero often indicates an equal result from a comparison. */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Negative condition flag. Set to bit[31] of the result of the
+ last flag-setting instruction. If the result is regarded as a
+ two's complement signed integer, then the processor sets N to
+ 1 if the result was negative, and sets N to 0 if it was
+ positive or zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_nzcv_s cn; */
+};
+typedef union bdk_ap_nzcv bdk_ap_nzcv_t;
+
+#define BDK_AP_NZCV BDK_AP_NZCV_FUNC()
+static inline uint64_t BDK_AP_NZCV_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_NZCV_FUNC(void)
+{
+ return 0x30304020000ll;
+}
+
+#define typedef_BDK_AP_NZCV bdk_ap_nzcv_t
+#define bustype_BDK_AP_NZCV BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_NZCV "AP_NZCV"
+#define busnum_BDK_AP_NZCV 0
+#define arguments_BDK_AP_NZCV -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_osdlr_el1
+ *
+ * AP OS Double Lock Register
+ * Used to control the OS Double Lock.
+ */
+union bdk_ap_osdlr_el1
+{
+ uint32_t u;
+ struct bdk_ap_osdlr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t dlk : 1; /**< [ 0: 0](R/W) OS Double Lock control bit.
+ 0 = OS Double Lock unlocked.
+ 1 = OS Double Lock locked, if AP_DBGPRCR_EL1[CORENPDRQ] (Core no
+ power-down request) bit is set to 0 and the processor is in
+ Non-debug state. */
+#else /* Word 0 - Little Endian */
+ uint32_t dlk : 1; /**< [ 0: 0](R/W) OS Double Lock control bit.
+ 0 = OS Double Lock unlocked.
+ 1 = OS Double Lock locked, if AP_DBGPRCR_EL1[CORENPDRQ] (Core no
+ power-down request) bit is set to 0 and the processor is in
+ Non-debug state. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_osdlr_el1_s cn; */
+};
+typedef union bdk_ap_osdlr_el1 bdk_ap_osdlr_el1_t;
+
+#define BDK_AP_OSDLR_EL1 BDK_AP_OSDLR_EL1_FUNC()
+static inline uint64_t BDK_AP_OSDLR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_OSDLR_EL1_FUNC(void)
+{
+ return 0x20001030400ll;
+}
+
+#define typedef_BDK_AP_OSDLR_EL1 bdk_ap_osdlr_el1_t
+#define bustype_BDK_AP_OSDLR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_OSDLR_EL1 "AP_OSDLR_EL1"
+#define busnum_BDK_AP_OSDLR_EL1 0
+#define arguments_BDK_AP_OSDLR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_osdtrrx_el1
+ *
+ * AP OS Lock Data Transfer Receive Register
+ * Used for save/restore of AP_DBGDTRRX_EL0. It is a component of
+ * the Debug Communications Channel.
+ */
+union bdk_ap_osdtrrx_el1
+{
+ uint32_t u;
+ struct bdk_ap_osdtrrx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Host to target data. One word of data for transfer from the
+ debug host to the debug target.
+ For the full behavior of the Debug Communications Channel,
+ see. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Host to target data. One word of data for transfer from the
+ debug host to the debug target.
+ For the full behavior of the Debug Communications Channel,
+ see. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_osdtrrx_el1_s cn; */
+};
+typedef union bdk_ap_osdtrrx_el1 bdk_ap_osdtrrx_el1_t;
+
+#define BDK_AP_OSDTRRX_EL1 BDK_AP_OSDTRRX_EL1_FUNC()
+static inline uint64_t BDK_AP_OSDTRRX_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_OSDTRRX_EL1_FUNC(void)
+{
+ return 0x20000000200ll;
+}
+
+#define typedef_BDK_AP_OSDTRRX_EL1 bdk_ap_osdtrrx_el1_t
+#define bustype_BDK_AP_OSDTRRX_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_OSDTRRX_EL1 "AP_OSDTRRX_EL1"
+#define busnum_BDK_AP_OSDTRRX_EL1 0
+#define arguments_BDK_AP_OSDTRRX_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_osdtrtx_el1
+ *
+ * AP OS Lock Data Transfer Transmit Register
+ * Used for save/restore of AP_DBGDTRTX_EL0. It is a component of
+ * the Debug Communications Channel.
+ */
+union bdk_ap_osdtrtx_el1
+{
+ uint32_t u;
+ struct bdk_ap_osdtrtx_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Target to host data. One word of data for transfer from the
+ debug target to the debug host.
+ For the full behavior of the Debug Communications Channel,
+ see. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Target to host data. One word of data for transfer from the
+ debug target to the debug host.
+ For the full behavior of the Debug Communications Channel,
+ see. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_osdtrtx_el1_s cn; */
+};
+typedef union bdk_ap_osdtrtx_el1 bdk_ap_osdtrtx_el1_t;
+
+#define BDK_AP_OSDTRTX_EL1 BDK_AP_OSDTRTX_EL1_FUNC()
+static inline uint64_t BDK_AP_OSDTRTX_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_OSDTRTX_EL1_FUNC(void)
+{
+ return 0x20000030200ll;
+}
+
+#define typedef_BDK_AP_OSDTRTX_EL1 bdk_ap_osdtrtx_el1_t
+#define bustype_BDK_AP_OSDTRTX_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_OSDTRTX_EL1 "AP_OSDTRTX_EL1"
+#define busnum_BDK_AP_OSDTRTX_EL1 0
+#define arguments_BDK_AP_OSDTRTX_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_oseccr_el1
+ *
+ * AP OS Lock Exception Catch Control Register
+ * Provides a mechanism for an operating system to access the
+ * contents of EDECCR that are otherwise invisible to software,
+ * so it can save/restore the contents of EDECCR over powerdown
+ * on behalf of the external debugger.
+ */
+union bdk_ap_oseccr_el1
+{
+ uint32_t u;
+ struct bdk_ap_oseccr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t edeccr : 32; /**< [ 31: 0](R/W) Used for save/restore to EDECCR over powerdown. */
+#else /* Word 0 - Little Endian */
+ uint32_t edeccr : 32; /**< [ 31: 0](R/W) Used for save/restore to EDECCR over powerdown. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_oseccr_el1_s cn; */
+};
+typedef union bdk_ap_oseccr_el1 bdk_ap_oseccr_el1_t;
+
+#define BDK_AP_OSECCR_EL1 BDK_AP_OSECCR_EL1_FUNC()
+static inline uint64_t BDK_AP_OSECCR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_OSECCR_EL1_FUNC(void)
+{
+ return 0x20000060200ll;
+}
+
+#define typedef_BDK_AP_OSECCR_EL1 bdk_ap_oseccr_el1_t
+#define bustype_BDK_AP_OSECCR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_OSECCR_EL1 "AP_OSECCR_EL1"
+#define busnum_BDK_AP_OSECCR_EL1 0
+#define arguments_BDK_AP_OSECCR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_oslar_el1
+ *
+ * AP OS Lock Access Register
+ * Used to lock or unlock the OS lock.
+ */
+union bdk_ap_oslar_el1
+{
+ uint32_t u;
+ struct bdk_ap_oslar_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t oslk : 1; /**< [ 0: 0](RO) On writes to AP_OSLAR_EL1, bit[0] is copied to the OS lock.
+ Use AP_OSLSR_EL1[OSLK] to check the current status of the lock. */
+#else /* Word 0 - Little Endian */
+ uint32_t oslk : 1; /**< [ 0: 0](RO) On writes to AP_OSLAR_EL1, bit[0] is copied to the OS lock.
+ Use AP_OSLSR_EL1[OSLK] to check the current status of the lock. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_oslar_el1_s cn8; */
+ struct bdk_ap_oslar_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t oslk : 1; /**< [ 0: 0](WO) On writes to AP_OSLAR_EL1, bit[0] is copied to the OS lock.
+ Use AP_OSLSR_EL1[OSLK] to check the current status of the lock. */
+#else /* Word 0 - Little Endian */
+ uint32_t oslk : 1; /**< [ 0: 0](WO) On writes to AP_OSLAR_EL1, bit[0] is copied to the OS lock.
+ Use AP_OSLSR_EL1[OSLK] to check the current status of the lock. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_oslar_el1 bdk_ap_oslar_el1_t;
+
+#define BDK_AP_OSLAR_EL1 BDK_AP_OSLAR_EL1_FUNC()
+static inline uint64_t BDK_AP_OSLAR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_OSLAR_EL1_FUNC(void)
+{
+ return 0x20001000400ll;
+}
+
+#define typedef_BDK_AP_OSLAR_EL1 bdk_ap_oslar_el1_t
+#define bustype_BDK_AP_OSLAR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_OSLAR_EL1 "AP_OSLAR_EL1"
+#define busnum_BDK_AP_OSLAR_EL1 0
+#define arguments_BDK_AP_OSLAR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_oslsr_el1
+ *
+ * AP OS Lock Status Register
+ * Provides the status of the OS lock.
+ */
+union bdk_ap_oslsr_el1
+{
+ uint32_t u;
+ struct bdk_ap_oslsr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t oslm_high : 1; /**< [ 3: 3](RO) See below for description of the OSLM field. */
+ uint32_t ntt : 1; /**< [ 2: 2](RO) Not 32-bit access. This bit is always 0. It indicates that a
+ 32-bit access is needed to write the key to the OS lock access
+ register. */
+ uint32_t oslk : 1; /**< [ 1: 1](RO) OS Lock Status.
+ The OS Lock is locked and unlocked by writing to the OS Lock
+ Access Register.
+ 0 = OS lock unlocked.
+ 1 = OS lock locked. */
+ uint32_t oslm_low : 1; /**< [ 0: 0](RO) OS lock model implemented. Identifies the form of OS save and
+ restore mechanism implemented. In ARMv8 these bits are as
+ follows:
+ All other values are reserved.
+ OSLM0x2 = OS lock implemented. DBGOSSRR not implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t oslm_low : 1; /**< [ 0: 0](RO) OS lock model implemented. Identifies the form of OS save and
+ restore mechanism implemented. In ARMv8 these bits are as
+ follows:
+ All other values are reserved.
+ OSLM0x2 = OS lock implemented. DBGOSSRR not implemented. */
+ uint32_t oslk : 1; /**< [ 1: 1](RO) OS Lock Status.
+ The OS Lock is locked and unlocked by writing to the OS Lock
+ Access Register.
+ 0 = OS lock unlocked.
+ 1 = OS lock locked. */
+ uint32_t ntt : 1; /**< [ 2: 2](RO) Not 32-bit access. This bit is always 0. It indicates that a
+ 32-bit access is needed to write the key to the OS lock access
+ register. */
+ uint32_t oslm_high : 1; /**< [ 3: 3](RO) See below for description of the OSLM field. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_oslsr_el1_s cn; */
+};
+typedef union bdk_ap_oslsr_el1 bdk_ap_oslsr_el1_t;
+
+#define BDK_AP_OSLSR_EL1 BDK_AP_OSLSR_EL1_FUNC()
+static inline uint64_t BDK_AP_OSLSR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_OSLSR_EL1_FUNC(void)
+{
+ return 0x20001010400ll;
+}
+
+#define typedef_BDK_AP_OSLSR_EL1 bdk_ap_oslsr_el1_t
+#define bustype_BDK_AP_OSLSR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_OSLSR_EL1 "AP_OSLSR_EL1"
+#define busnum_BDK_AP_OSLSR_EL1 0
+#define arguments_BDK_AP_OSLSR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pan
+ *
+ * AP Privileged Access Never Register
+ * v8.1: Privileged Access Never bit.
+ *
+ * When 0, this bit has no effect on the translation system compared with
+ * the situation in ARMv8.
+ *
+ * When 1, this bit disables data read or data write access from EL1 (or
+ * EL2 when AP_HCR_EL2[E2H]==1) to a virtual address where access to the
+ * virtual address at EL0 is permitted at stage 1 by the combination of
+ * the AP[1] bit and the APTable[0] bits(if appropriate). That is, when
+ * AP[1]==1 && APTable[0]==0, for all APTable bits associated with that
+ * virtual address.
+ *
+ * The AP_PAN bit has no effect on instruction accesses.
+ *
+ * If access is disabled, then the access will give rise to a stage 1
+ * permission fault, taken in the same way as all other stage 1
+ * permission faults.
+ *
+ * PSTATE[AP_PAN] is copied to SPSR[AP_PAN] on an exception taken from AArch64
+ * SPSR[AP_PAN] is copied to PSTATE[AP_PAN] on an exception return to AArch64
+ */
+union bdk_ap_pan
+{
+ uint64_t u;
+ struct bdk_ap_pan_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_23_63 : 41;
+ uint64_t pan : 1; /**< [ 22: 22](R/W) Privileged Access Never bit. */
+ uint64_t reserved_0_21 : 22;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_21 : 22;
+ uint64_t pan : 1; /**< [ 22: 22](R/W) Privileged Access Never bit. */
+ uint64_t reserved_23_63 : 41;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pan_s cn; */
+};
+typedef union bdk_ap_pan bdk_ap_pan_t;
+
+#define BDK_AP_PAN BDK_AP_PAN_FUNC()
+static inline uint64_t BDK_AP_PAN_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PAN_FUNC(void)
+{
+ return 0x30004020300ll;
+}
+
+#define typedef_BDK_AP_PAN bdk_ap_pan_t
+#define bustype_BDK_AP_PAN BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PAN "AP_PAN"
+#define busnum_BDK_AP_PAN 0
+#define arguments_BDK_AP_PAN -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_par_el1
+ *
+ * AP Physical Address Register
+ * Receives the PA from any address translation operation.
+ */
+union bdk_ap_par_el1
+{
+ uint64_t u;
+ struct bdk_ap_par_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mair : 8; /**< [ 63: 56](R/W) On success (f=0): Memory Attributes, following the encodings for the MAIR.
+ On failure (f=1): Zero. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t pa : 40; /**< [ 51: 12](R/W) Physical Address. The physical address corresponding to the
+ supplied virtual address. This field returns address
+ bits \<47:12\>. */
+ uint64_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint64_t reserved_10 : 1;
+ uint64_t nsec : 1; /**< [ 9: 9](R/W) Nonsecure. The NS attribute for a translation table entry
+ read from Secure state.
+ This bit is UNKNOWN for a translation table entry read from
+ nonsecure state. */
+ uint64_t sha : 2; /**< [ 8: 7](R/W) Shareability attribute, from the translation table entry for
+ the returned PA.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t fs : 6; /**< [ 6: 1](R/W) On success (f=0): Zero.
+ On failure (f=1): Fault Status code shown in the Data Abort. */
+ uint64_t f : 1; /**< [ 0: 0](R/W) Indicates whether the conversion completed successfully.
+ 0 = VA to PA conversion completed successfully. */
+#else /* Word 0 - Little Endian */
+ uint64_t f : 1; /**< [ 0: 0](R/W) Indicates whether the conversion completed successfully.
+ 0 = VA to PA conversion completed successfully. */
+ uint64_t fs : 6; /**< [ 6: 1](R/W) On success (f=0): Zero.
+ On failure (f=1): Fault Status code shown in the Data Abort. */
+ uint64_t sha : 2; /**< [ 8: 7](R/W) Shareability attribute, from the translation table entry for
+ the returned PA.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t nsec : 1; /**< [ 9: 9](R/W) Nonsecure. The NS attribute for a translation table entry
+ read from Secure state.
+ This bit is UNKNOWN for a translation table entry read from
+ nonsecure state. */
+ uint64_t reserved_10 : 1;
+ uint64_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint64_t pa : 40; /**< [ 51: 12](R/W) Physical Address. The physical address corresponding to the
+ supplied virtual address. This field returns address
+ bits \<47:12\>. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t mair : 8; /**< [ 63: 56](R/W) On success (f=0): Memory Attributes, following the encodings for the MAIR.
+ On failure (f=1): Zero. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_par_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mair : 8; /**< [ 63: 56](R/W) On success (f=0): Memory Attributes, following the encodings for the MAIR.
+ On failure (f=1): Zero. */
+ uint64_t reserved_48_55 : 8;
+ uint64_t pa : 36; /**< [ 47: 12](R/W) Physical Address. The physical address corresponding to the
+ supplied virtual address. This field returns address
+ bits \<47:12\>. */
+ uint64_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint64_t reserved_10 : 1;
+ uint64_t nsec : 1; /**< [ 9: 9](R/W) Nonsecure. The NS attribute for a translation table entry
+ read from Secure state.
+ This bit is UNKNOWN for a translation table entry read from
+ nonsecure state. */
+ uint64_t sha : 2; /**< [ 8: 7](R/W) Shareability attribute, from the translation table entry for
+ the returned PA.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t fs : 6; /**< [ 6: 1](R/W) On success (f=0): Zero.
+ On failure (f=1): Fault Status code shown in the Data Abort. */
+ uint64_t f : 1; /**< [ 0: 0](R/W) Indicates whether the conversion completed successfully.
+ 0 = VA to PA conversion completed successfully. */
+#else /* Word 0 - Little Endian */
+ uint64_t f : 1; /**< [ 0: 0](R/W) Indicates whether the conversion completed successfully.
+ 0 = VA to PA conversion completed successfully. */
+ uint64_t fs : 6; /**< [ 6: 1](R/W) On success (f=0): Zero.
+ On failure (f=1): Fault Status code shown in the Data Abort. */
+ uint64_t sha : 2; /**< [ 8: 7](R/W) Shareability attribute, from the translation table entry for
+ the returned PA.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t nsec : 1; /**< [ 9: 9](R/W) Nonsecure. The NS attribute for a translation table entry
+ read from Secure state.
+ This bit is UNKNOWN for a translation table entry read from
+ nonsecure state. */
+ uint64_t reserved_10 : 1;
+ uint64_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint64_t pa : 36; /**< [ 47: 12](R/W) Physical Address. The physical address corresponding to the
+ supplied virtual address. This field returns address
+ bits \<47:12\>. */
+ uint64_t reserved_48_55 : 8;
+ uint64_t mair : 8; /**< [ 63: 56](R/W) On success (f=0): Memory Attributes, following the encodings for the MAIR.
+ On failure (f=1): Zero. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_ap_par_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mair : 8; /**< [ 63: 56](R/W) On success (f=0): Memory Attributes, following the encodings for the MAIR.
+ On failure (f=1): Zero. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t pa : 40; /**< [ 51: 12](R/W) Physical address. The physical address corresponding to the
+ supplied virtual address. This field returns address bits \<51:12\>. */
+ uint64_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint64_t reserved_10 : 1;
+ uint64_t nsec : 1; /**< [ 9: 9](R/W) Nonsecure. The NS attribute for a translation table entry
+ read from Secure state.
+ This bit is UNKNOWN for a translation table entry read from
+ nonsecure state. */
+ uint64_t sha : 2; /**< [ 8: 7](R/W) Shareability attribute, from the translation table entry for
+ the returned PA.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t fs : 6; /**< [ 6: 1](R/W) On success (f=0): Zero.
+ On failure (f=1): Fault Status code shown in the Data Abort. */
+ uint64_t f : 1; /**< [ 0: 0](R/W) Indicates whether the conversion completed successfully.
+ 0 = VA to PA conversion completed successfully. */
+#else /* Word 0 - Little Endian */
+ uint64_t f : 1; /**< [ 0: 0](R/W) Indicates whether the conversion completed successfully.
+ 0 = VA to PA conversion completed successfully. */
+ uint64_t fs : 6; /**< [ 6: 1](R/W) On success (f=0): Zero.
+ On failure (f=1): Fault Status code shown in the Data Abort. */
+ uint64_t sha : 2; /**< [ 8: 7](R/W) Shareability attribute, from the translation table entry for
+ the returned PA.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t nsec : 1; /**< [ 9: 9](R/W) Nonsecure. The NS attribute for a translation table entry
+ read from Secure state.
+ This bit is UNKNOWN for a translation table entry read from
+ nonsecure state. */
+ uint64_t reserved_10 : 1;
+ uint64_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint64_t pa : 40; /**< [ 51: 12](R/W) Physical address. The physical address corresponding to the
+ supplied virtual address. This field returns address bits \<51:12\>. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t mair : 8; /**< [ 63: 56](R/W) On success (f=0): Memory Attributes, following the encodings for the MAIR.
+ On failure (f=1): Zero. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_par_el1 bdk_ap_par_el1_t;
+
+#define BDK_AP_PAR_EL1 BDK_AP_PAR_EL1_FUNC()
+static inline uint64_t BDK_AP_PAR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PAR_EL1_FUNC(void)
+{
+ return 0x30007040000ll;
+}
+
+#define typedef_BDK_AP_PAR_EL1 bdk_ap_par_el1_t
+#define bustype_BDK_AP_PAR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PAR_EL1 "AP_PAR_EL1"
+#define busnum_BDK_AP_PAR_EL1 0
+#define arguments_BDK_AP_PAR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmbidr_el1
+ *
+ * AP Profiling Buffer ID Register
+ * Provides information to software as to whether the buffer can be programmed at the current
+ * Exception level.
+ */
+union bdk_ap_pmbidr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmbidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t f : 1; /**< [ 5: 5](RO) Flag Updates. Defines whether the address translation performed by the profiling buffer
+ manages the access flag and dirty bit.
+ 0 = Accesses to pages not marked with Access Flag and not with dirty bit set will
+ generate an Unsupported Access fault if hardware management of those flags is enabled.
+ 1 = Profiling Buffer address translation manages the access flag and dirty bit in the
+ same way as the MMU on this PE. */
+ uint64_t p : 1; /**< [ 4: 4](RO) Prohibited. The profiling buffer is owned by the current or a lower exception level in the
+ current security state.
+ 0 = Profiling buffer is owned by the current or a lower exception level in the current
+ security state. This does not mean an access will not be trapped to a higher exception
+ level.
+ 1 = Profiling buffer is owned by a higher exception level or the other security state. */
+ uint64_t align : 4; /**< [ 3: 0](RO) Defines the minimum alignment constraint for PMBPTR_EL1. If this field is non-zero, then
+ the PE must pad every record up to a multiple of this size.
+ 0x0 = Byte.
+ 0x1 = Halfword. PMBPTR_EL1[0] is RES0.
+ 0x2 = Word. PMBPTR_EL1[1:0] is RES0.
+ 0x3 = Doubleword. PMBPTR_EL1[2:0] is RES0.
+ ... ...
+ 0xB = 2KB. PMBPTR_EL1[10:0] is RES0.
+
+ All other values are reserved. Reserved values might be defined in a future version of
+ the architecture. */
+#else /* Word 0 - Little Endian */
+ uint64_t align : 4; /**< [ 3: 0](RO) Defines the minimum alignment constraint for PMBPTR_EL1. If this field is non-zero, then
+ the PE must pad every record up to a multiple of this size.
+ 0x0 = Byte.
+ 0x1 = Halfword. PMBPTR_EL1[0] is RES0.
+ 0x2 = Word. PMBPTR_EL1[1:0] is RES0.
+ 0x3 = Doubleword. PMBPTR_EL1[2:0] is RES0.
+ ... ...
+ 0xB = 2KB. PMBPTR_EL1[10:0] is RES0.
+
+ All other values are reserved. Reserved values might be defined in a future version of
+ the architecture. */
+ uint64_t p : 1; /**< [ 4: 4](RO) Prohibited. The profiling buffer is owned by the current or a lower exception level in the
+ current security state.
+ 0 = Profiling buffer is owned by the current or a lower exception level in the current
+ security state. This does not mean an access will not be trapped to a higher exception
+ level.
+ 1 = Profiling buffer is owned by a higher exception level or the other security state. */
+ uint64_t f : 1; /**< [ 5: 5](RO) Flag Updates. Defines whether the address translation performed by the profiling buffer
+ manages the access flag and dirty bit.
+ 0 = Accesses to pages not marked with Access Flag and not with dirty bit set will
+ generate an Unsupported Access fault if hardware management of those flags is enabled.
+ 1 = Profiling Buffer address translation manages the access flag and dirty bit in the
+ same way as the MMU on this PE. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmbidr_el1_s cn; */
+};
+typedef union bdk_ap_pmbidr_el1 bdk_ap_pmbidr_el1_t;
+
+#define BDK_AP_PMBIDR_EL1 BDK_AP_PMBIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMBIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMBIDR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x300090a0700ll;
+ __bdk_csr_fatal("AP_PMBIDR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMBIDR_EL1 bdk_ap_pmbidr_el1_t
+#define bustype_BDK_AP_PMBIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMBIDR_EL1 "AP_PMBIDR_EL1"
+#define busnum_BDK_AP_PMBIDR_EL1 0
+#define arguments_BDK_AP_PMBIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmblimitr_el1
+ *
+ * AP Profiling Buffer Limit Address Register
+ * Defines the upper limit for the profiling buffer, and enables the profiling buffer.
+ */
+union bdk_ap_pmblimitr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmblimitr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t limit : 52; /**< [ 63: 12](R/W) Limit address, plus one. Read/write. Defines the limit of the buffer. If the smallest
+ implemented translation granule is not 4KB, then bits [N-1:12] are RES0, where N is the
+ IMPLEMENTATION DEFINED value, Log2(smallest implemented translation granule). */
+ uint64_t reserved_3_11 : 9;
+ uint64_t fm : 2; /**< [ 2: 1](R/W) Fill mode.
+ 0x0 = Stop collection and raise maintenance interrupt on buffer fill.
+
+ All other values are reserved. If this field is programmed with a reserved value, the PE
+ behaves as if this field has a defined value, other than for a direct read of the
+ register. Software must not rely on the behavior of reserved values, as they might change
+ in a future version of the architecture. */
+ uint64_t ee : 1; /**< [ 0: 0](R/W) Profiling buffer enable.
+ 0 = All output is discarded.
+ 1 = Enabled.
+
+ This bit resets to zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t ee : 1; /**< [ 0: 0](R/W) Profiling buffer enable.
+ 0 = All output is discarded.
+ 1 = Enabled.
+
+ This bit resets to zero. */
+ uint64_t fm : 2; /**< [ 2: 1](R/W) Fill mode.
+ 0x0 = Stop collection and raise maintenance interrupt on buffer fill.
+
+ All other values are reserved. If this field is programmed with a reserved value, the PE
+ behaves as if this field has a defined value, other than for a direct read of the
+ register. Software must not rely on the behavior of reserved values, as they might change
+ in a future version of the architecture. */
+ uint64_t reserved_3_11 : 9;
+ uint64_t limit : 52; /**< [ 63: 12](R/W) Limit address, plus one. Read/write. Defines the limit of the buffer. If the smallest
+ implemented translation granule is not 4KB, then bits [N-1:12] are RES0, where N is the
+ IMPLEMENTATION DEFINED value, Log2(smallest implemented translation granule). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmblimitr_el1_s cn; */
+};
+typedef union bdk_ap_pmblimitr_el1 bdk_ap_pmblimitr_el1_t;
+
+#define BDK_AP_PMBLIMITR_EL1 BDK_AP_PMBLIMITR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMBLIMITR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMBLIMITR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x300090a0000ll;
+ __bdk_csr_fatal("AP_PMBLIMITR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMBLIMITR_EL1 bdk_ap_pmblimitr_el1_t
+#define bustype_BDK_AP_PMBLIMITR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMBLIMITR_EL1 "AP_PMBLIMITR_EL1"
+#define busnum_BDK_AP_PMBLIMITR_EL1 0
+#define arguments_BDK_AP_PMBLIMITR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmbptr_el1
+ *
+ * AP Profiling Buffer Write Pointer Register
+ * Defines the current write pointer for the profiling buffer.
+ */
+union bdk_ap_pmbptr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmbptr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ptr : 64; /**< [ 63: 0](R/W) Current write address. Defines the virtual address of the next entry to be written to the
+ buffer.
+ Software must treat bits [M:0] of this register as RES0, where M is defined by
+ PMBIDR_EL1.Align. If
+ synchronous reporting of external aborts is not supported, then hardware also treats these
+ bits as
+ RES0. Otherwise, bits [M:0] might contain part of a fault address on a synchronous
+ external abort.
+ On a management interrupt, PMBPTR_EL1 is frozen. */
+#else /* Word 0 - Little Endian */
+ uint64_t ptr : 64; /**< [ 63: 0](R/W) Current write address. Defines the virtual address of the next entry to be written to the
+ buffer.
+ Software must treat bits [M:0] of this register as RES0, where M is defined by
+ PMBIDR_EL1.Align. If
+ synchronous reporting of external aborts is not supported, then hardware also treats these
+ bits as
+ RES0. Otherwise, bits [M:0] might contain part of a fault address on a synchronous
+ external abort.
+ On a management interrupt, PMBPTR_EL1 is frozen. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmbptr_el1_s cn; */
+};
+typedef union bdk_ap_pmbptr_el1 bdk_ap_pmbptr_el1_t;
+
+#define BDK_AP_PMBPTR_EL1 BDK_AP_PMBPTR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMBPTR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMBPTR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x300090a0100ll;
+ __bdk_csr_fatal("AP_PMBPTR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMBPTR_EL1 bdk_ap_pmbptr_el1_t
+#define bustype_BDK_AP_PMBPTR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMBPTR_EL1 "AP_PMBPTR_EL1"
+#define busnum_BDK_AP_PMBPTR_EL1 0
+#define arguments_BDK_AP_PMBPTR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmbsr_el1
+ *
+ * AP Profiling Buffer Status/syndrome Register
+ * Provides syndrome information to software when the buffer is disabled because the management
+ * interrupt has been raised.
+ */
+union bdk_ap_pmbsr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmbsr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ec : 6; /**< [ 31: 26](R/W) Exception class.
+ 000000 = Buffer management event.
+ 100100 = Data abort on write to buffer.
+ 100101 = Stage 2 data abort on write to buffer.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t reserved_20_25 : 6;
+ uint64_t dl : 1; /**< [ 19: 19](R/W) Partial record lost.
+ 0 = PMBPTR_EL1 points to the first byte after the last complete record written to the
+ buffer.
+ 1 = Part of a record was lost due to a service event or external abort. PMBPTR_EL1 might
+ not point to
+ the first byte after the last complete record written to the buffer, and so restarting
+ collection might
+ result in a data record stream that software cannot parse. All records prior to the last
+ record have
+ been written to the buffer. */
+ uint64_t ea : 1; /**< [ 18: 18](R/W) External abort.
+ 0 = An external abort has not been asserted.
+ 1 = An external abort has been asserted. */
+ uint64_t s : 1; /**< [ 17: 17](R/W) Service.
+ 0 = PMBIRQ has not been asserted.
+ 1 = PMBIRQ has been asserted. All profiling data has either been written to the buffer
+ or discarded. */
+ uint64_t coll : 1; /**< [ 16: 16](R/W) Collision detected.
+ 0 = No collision events detected.
+ 1 = At least one collision event was recorded. */
+ uint64_t reserved_6_15 : 10;
+ uint64_t bsc_fsc : 6; /**< [ 5: 0](R/W) BSC when EC == 0b000000.
+
+ BSC, bits [5:0], when EC == 0b000000.
+ Buffer status code.
+ 0x0 = Buffer not filled.
+ 0x1 = Buffer filled.
+
+ FSC when EC == 0b10010x.
+ Fault status code.
+ 0000xx = Address Size fault, bits [1:0] encode the level.
+ 0001xx = Translation fault, bits [1:0] encode the level.
+ 0010xx = Access Flag fault, bits [1:0] encode the level.
+ 0011xx = Permission fault, bits [1:0] encode the level.
+ 010000 = Synchronous external abort on write.
+ 0101xx = Synchronous external abort on page table walk, bits [1:0] encode the level.
+ 010001 = Asynchronous external abort on write.
+ 100001 = Alignment fault.
+ 110000 = TLB Conflict fault.
+ 110101 = Unsupported Access fault.
+
+ All other values are reserved. Reserved values might be defined in a future version of
+ the architecture. */
+#else /* Word 0 - Little Endian */
+ uint64_t bsc_fsc : 6; /**< [ 5: 0](R/W) BSC when EC == 0b000000.
+
+ BSC, bits [5:0], when EC == 0b000000.
+ Buffer status code.
+ 0x0 = Buffer not filled.
+ 0x1 = Buffer filled.
+
+ FSC when EC == 0b10010x.
+ Fault status code.
+ 0000xx = Address Size fault, bits [1:0] encode the level.
+ 0001xx = Translation fault, bits [1:0] encode the level.
+ 0010xx = Access Flag fault, bits [1:0] encode the level.
+ 0011xx = Permission fault, bits [1:0] encode the level.
+ 010000 = Synchronous external abort on write.
+ 0101xx = Synchronous external abort on page table walk, bits [1:0] encode the level.
+ 010001 = Asynchronous external abort on write.
+ 100001 = Alignment fault.
+ 110000 = TLB Conflict fault.
+ 110101 = Unsupported Access fault.
+
+ All other values are reserved. Reserved values might be defined in a future version of
+ the architecture. */
+ uint64_t reserved_6_15 : 10;
+ uint64_t coll : 1; /**< [ 16: 16](R/W) Collision detected.
+ 0 = No collision events detected.
+ 1 = At least one collision event was recorded. */
+ uint64_t s : 1; /**< [ 17: 17](R/W) Service.
+ 0 = PMBIRQ has not been asserted.
+ 1 = PMBIRQ has been asserted. All profiling data has either been written to the buffer
+ or discarded. */
+ uint64_t ea : 1; /**< [ 18: 18](R/W) External abort.
+ 0 = An external abort has not been asserted.
+ 1 = An external abort has been asserted. */
+ uint64_t dl : 1; /**< [ 19: 19](R/W) Partial record lost.
+ 0 = PMBPTR_EL1 points to the first byte after the last complete record written to the
+ buffer.
+ 1 = Part of a record was lost due to a service event or external abort. PMBPTR_EL1 might
+ not point to
+ the first byte after the last complete record written to the buffer, and so restarting
+ collection might
+ result in a data record stream that software cannot parse. All records prior to the last
+ record have
+ been written to the buffer. */
+ uint64_t reserved_20_25 : 6;
+ uint64_t ec : 6; /**< [ 31: 26](R/W) Exception class.
+ 000000 = Buffer management event.
+ 100100 = Data abort on write to buffer.
+ 100101 = Stage 2 data abort on write to buffer.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmbsr_el1_s cn; */
+};
+typedef union bdk_ap_pmbsr_el1 bdk_ap_pmbsr_el1_t;
+
+#define BDK_AP_PMBSR_EL1 BDK_AP_PMBSR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMBSR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMBSR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x300090a0300ll;
+ __bdk_csr_fatal("AP_PMBSR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMBSR_EL1 bdk_ap_pmbsr_el1_t
+#define bustype_BDK_AP_PMBSR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMBSR_EL1 "AP_PMBSR_EL1"
+#define busnum_BDK_AP_PMBSR_EL1 0
+#define arguments_BDK_AP_PMBSR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmccfiltr_el0
+ *
+ * AP Performance Monitors Cycle Count Filter Register
+ * Determines the modes in which the Cycle Counter, AP_PMCCNTR_EL0,
+ * increments.
+ */
+union bdk_ap_pmccfiltr_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmccfiltr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t p : 1; /**< [ 31: 31](R/W) EL1 modes filtering bit. Controls counting in EL1. If EL3 is
+ implemented, then counting in nonsecure EL1 is further
+ controlled by the NSK bit.
+ 0 = Count cycles in EL1.
+ 1 = Do not count cycles in EL1. */
+ uint32_t u : 1; /**< [ 30: 30](R/W) EL0 filtering bit. Controls counting in EL0. If EL3 is
+ implemented, then counting in nonsecure EL0 is further
+ controlled by the NSU bit.
+ 0 = Count cycles in EL0.
+ 1 = Do not count cycles in EL0. */
+ uint32_t nsk : 1; /**< [ 29: 29](R/W) Nonsecure kernel modes filtering bit. Controls counting in
+ nonsecure EL1. If EL3 is not implemented, this bit is RES0.
+ If the value of this bit is equal to the value of P, cycles in
+ nonsecure EL1 are counted.
+ Otherwise, cycles in nonsecure EL1 are not counted. */
+ uint32_t nsu : 1; /**< [ 28: 28](R/W) Nonsecure user modes filtering bit. Controls counting in Non-
+ secure EL0. If EL3 is not implemented, this bit is RES0.
+ If the value of this bit is equal to the value of U, cycles in
+ nonsecure EL0 are counted.
+ Otherwise, cycles in nonsecure EL0 are not counted. */
+ uint32_t nsh : 1; /**< [ 27: 27](R/W) Nonsecure Hyp modes filtering bit. Controls counting in Non-
+ secure EL2. If EL2 is not implemented, this bit is RES0.
+ 0 = Do not count cycles in EL2.
+ 1 = Count cycles in EL2. */
+ uint32_t m : 1; /**< [ 26: 26](R/W) Secure EL3 filtering bit. Most applications can ignore this
+ bit and set the value to zero. If EL3 is not implemented, this
+ bit is RES0.
+
+ If the value of this bit is equal to the value of P, cycles in
+ Secure EL3 are counted.
+
+ Otherwise, cycles in Secure EL3 are not counted. */
+ uint32_t reserved_0_25 : 26;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_25 : 26;
+ uint32_t m : 1; /**< [ 26: 26](R/W) Secure EL3 filtering bit. Most applications can ignore this
+ bit and set the value to zero. If EL3 is not implemented, this
+ bit is RES0.
+
+ If the value of this bit is equal to the value of P, cycles in
+ Secure EL3 are counted.
+
+ Otherwise, cycles in Secure EL3 are not counted. */
+ uint32_t nsh : 1; /**< [ 27: 27](R/W) Nonsecure Hyp modes filtering bit. Controls counting in Non-
+ secure EL2. If EL2 is not implemented, this bit is RES0.
+ 0 = Do not count cycles in EL2.
+ 1 = Count cycles in EL2. */
+ uint32_t nsu : 1; /**< [ 28: 28](R/W) Nonsecure user modes filtering bit. Controls counting in Non-
+ secure EL0. If EL3 is not implemented, this bit is RES0.
+ If the value of this bit is equal to the value of U, cycles in
+ nonsecure EL0 are counted.
+ Otherwise, cycles in nonsecure EL0 are not counted. */
+ uint32_t nsk : 1; /**< [ 29: 29](R/W) Nonsecure kernel modes filtering bit. Controls counting in
+ nonsecure EL1. If EL3 is not implemented, this bit is RES0.
+ If the value of this bit is equal to the value of P, cycles in
+ nonsecure EL1 are counted.
+ Otherwise, cycles in nonsecure EL1 are not counted. */
+ uint32_t u : 1; /**< [ 30: 30](R/W) EL0 filtering bit. Controls counting in EL0. If EL3 is
+ implemented, then counting in nonsecure EL0 is further
+ controlled by the NSU bit.
+ 0 = Count cycles in EL0.
+ 1 = Do not count cycles in EL0. */
+ uint32_t p : 1; /**< [ 31: 31](R/W) EL1 modes filtering bit. Controls counting in EL1. If EL3 is
+ implemented, then counting in nonsecure EL1 is further
+ controlled by the NSK bit.
+ 0 = Count cycles in EL1.
+ 1 = Do not count cycles in EL1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmccfiltr_el0_s cn; */
+};
+typedef union bdk_ap_pmccfiltr_el0 bdk_ap_pmccfiltr_el0_t;
+
+#define BDK_AP_PMCCFILTR_EL0 BDK_AP_PMCCFILTR_EL0_FUNC()
+static inline uint64_t BDK_AP_PMCCFILTR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMCCFILTR_EL0_FUNC(void)
+{
+ return 0x3030e0f0700ll;
+}
+
+#define typedef_BDK_AP_PMCCFILTR_EL0 bdk_ap_pmccfiltr_el0_t
+#define bustype_BDK_AP_PMCCFILTR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMCCFILTR_EL0 "AP_PMCCFILTR_EL0"
+#define busnum_BDK_AP_PMCCFILTR_EL0 0
+#define arguments_BDK_AP_PMCCFILTR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmccntr_el0
+ *
+ * AP Performance Monitors Cycle Count Register
+ * Holds the value of the processor Cycle Counter, CCNT, that
+ * counts processor clock cycles.
+ */
+union bdk_ap_pmccntr_el0
+{
+ uint64_t u;
+ struct bdk_ap_pmccntr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ccnt : 64; /**< [ 63: 0](R/W) Cycle count. Depending on the values of AP_PMCR_EL0.{LC,D}, this
+ field increments in one of the following ways:
+ Every processor clock cycle.
+ Every 64th processor clock cycle.
+ This field can be reset to zero by writing 1 to AP_PMCR_EL0[C]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ccnt : 64; /**< [ 63: 0](R/W) Cycle count. Depending on the values of AP_PMCR_EL0.{LC,D}, this
+ field increments in one of the following ways:
+ Every processor clock cycle.
+ Every 64th processor clock cycle.
+ This field can be reset to zero by writing 1 to AP_PMCR_EL0[C]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmccntr_el0_s cn; */
+};
+typedef union bdk_ap_pmccntr_el0 bdk_ap_pmccntr_el0_t;
+
+#define BDK_AP_PMCCNTR_EL0 BDK_AP_PMCCNTR_EL0_FUNC()
+static inline uint64_t BDK_AP_PMCCNTR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMCCNTR_EL0_FUNC(void)
+{
+ return 0x303090d0000ll;
+}
+
+#define typedef_BDK_AP_PMCCNTR_EL0 bdk_ap_pmccntr_el0_t
+#define bustype_BDK_AP_PMCCNTR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMCCNTR_EL0 "AP_PMCCNTR_EL0"
+#define busnum_BDK_AP_PMCCNTR_EL0 0
+#define arguments_BDK_AP_PMCCNTR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmceid0_el0
+ *
+ * AP Performance Monitors Common Event Identification Register 0
+ * Defines which common architectural and common
+ * microarchitectural feature events are implemented. If a
+ * particular bit is set to 1, then the event for that bit is
+ * implemented.
+ */
+union bdk_ap_pmceid0_el0
+{
+ uint64_t u;
+ struct bdk_ap_pmceid0_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ce : 64; /**< [ 63: 0](RO) Common architectural and microarchitectural feature events
+ that can be counted by the PMU event counters.
+ For each bit described in the following table, the event is
+ implemented if the bit is set to 1, or not implemented if the
+ bit is set to 0.
+
+ \<pre\>
+ Bit Event number Event mnemonic
+ 31 0x01F L1D_CACHE_ALLOCATE
+ 30 0x01E CHAIN
+ 29 0x01D BUS_CYCLES
+ 28 0x01C TTBR_WRITE_RETIRED
+ 27 0x01B INST_SPEC
+ 26 0x01A MEMORY_ERROR
+ 25 0x019 BUS_ACCESS
+ 24 0x018 L2D_CACHE_WB
+ 23 0x017 L2D_CACHE_REFILL
+ 22 0x016 L2D_CACHE
+ 21 0x015 L1D_CACHE_WB
+ 20 0x014 L1I_CACHE
+ 19 0x013 MEM_ACCESS
+ 18 0x012 BR_PRED
+ 17 0x011 CPU_CYCLES
+ 16 0x010 BR_MIS_PRED
+ 15 0x00F UNALIGNED_LDST_RETIRED
+ 14 0x00E BR_RETURN_RETIRED
+ 13 0x00D BR_IMMED_RETIRED
+ 12 0x00C PC_WRITE_RETIRED
+ 11 0x00B CID_WRITE_RETIRED
+ 10 0x00A EXC_RETURN
+ 9 0x009 EXC_TAKEN
+ 8 0x008 INST_RETIRED
+ 7 0x007 ST_RETIRED
+ 6 0x006 LD_RETIRED
+ 5 0x005 L1D_TLB_REFILL
+ 4 0x004 L1D_CACHE
+ 3 0x003 L1D_CACHE_REFILL
+ 2 0x002 L1I_TLB_REFILL
+ 1 0x001 L1I_CACHE_REFILL
+ 0 0x000 SW_INCR
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint64_t ce : 64; /**< [ 63: 0](RO) Common architectural and microarchitectural feature events
+ that can be counted by the PMU event counters.
+ For each bit described in the following table, the event is
+ implemented if the bit is set to 1, or not implemented if the
+ bit is set to 0.
+
+ \<pre\>
+ Bit Event number Event mnemonic
+ 31 0x01F L1D_CACHE_ALLOCATE
+ 30 0x01E CHAIN
+ 29 0x01D BUS_CYCLES
+ 28 0x01C TTBR_WRITE_RETIRED
+ 27 0x01B INST_SPEC
+ 26 0x01A MEMORY_ERROR
+ 25 0x019 BUS_ACCESS
+ 24 0x018 L2D_CACHE_WB
+ 23 0x017 L2D_CACHE_REFILL
+ 22 0x016 L2D_CACHE
+ 21 0x015 L1D_CACHE_WB
+ 20 0x014 L1I_CACHE
+ 19 0x013 MEM_ACCESS
+ 18 0x012 BR_PRED
+ 17 0x011 CPU_CYCLES
+ 16 0x010 BR_MIS_PRED
+ 15 0x00F UNALIGNED_LDST_RETIRED
+ 14 0x00E BR_RETURN_RETIRED
+ 13 0x00D BR_IMMED_RETIRED
+ 12 0x00C PC_WRITE_RETIRED
+ 11 0x00B CID_WRITE_RETIRED
+ 10 0x00A EXC_RETURN
+ 9 0x009 EXC_TAKEN
+ 8 0x008 INST_RETIRED
+ 7 0x007 ST_RETIRED
+ 6 0x006 LD_RETIRED
+ 5 0x005 L1D_TLB_REFILL
+ 4 0x004 L1D_CACHE
+ 3 0x003 L1D_CACHE_REFILL
+ 2 0x002 L1I_TLB_REFILL
+ 1 0x001 L1I_CACHE_REFILL
+ 0 0x000 SW_INCR
+ \</pre\> */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmceid0_el0_s cn8; */
+ struct bdk_ap_pmceid0_el0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ce : 64; /**< [ 63: 0](RO) Common architectural and microarchitectural feature events
+ that can be counted by the PMU event counters.
+ For each bit described in the following table, the event is
+ implemented if the bit is set to 1, or not implemented if the
+ bit is set to 0.
+
+ \<pre\>
+ Bit Event number Event mnemonic
+ 35 0x4003 SAMPLE_COLLISION.
+ 34 0x4002 SAMPLE_FILTRATE.
+ 33 0x4001 SAMPLE_FEED.
+ 32 0x4000 SAMPLE_POP.
+ 31 0x01F L1D_CACHE_ALLOCATE
+ 30 0x01E CHAIN
+ 29 0x01D BUS_CYCLES
+ 28 0x01C TTBR_WRITE_RETIRED
+ 27 0x01B INST_SPEC
+ 26 0x01A MEMORY_ERROR
+ 25 0x019 BUS_ACCESS
+ 24 0x018 L2D_CACHE_WB
+ 23 0x017 L2D_CACHE_REFILL
+ 22 0x016 L2D_CACHE
+ 21 0x015 L1D_CACHE_WB
+ 20 0x014 L1I_CACHE
+ 19 0x013 MEM_ACCESS
+ 18 0x012 BR_PRED
+ 17 0x011 CPU_CYCLES
+ 16 0x010 BR_MIS_PRED
+ 15 0x00F UNALIGNED_LDST_RETIRED
+ 14 0x00E BR_RETURN_RETIRED
+ 13 0x00D BR_IMMED_RETIRED
+ 12 0x00C PC_WRITE_RETIRED
+ 11 0x00B CID_WRITE_RETIRED
+ 10 0x00A EXC_RETURN
+ 9 0x009 EXC_TAKEN
+ 8 0x008 INST_RETIRED
+ 7 0x007 ST_RETIRED
+ 6 0x006 LD_RETIRED
+ 5 0x005 L1D_TLB_REFILL
+ 4 0x004 L1D_CACHE
+ 3 0x003 L1D_CACHE_REFILL
+ 2 0x002 L1I_TLB_REFILL
+ 1 0x001 L1I_CACHE_REFILL
+ 0 0x000 SW_INCR
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint64_t ce : 64; /**< [ 63: 0](RO) Common architectural and microarchitectural feature events
+ that can be counted by the PMU event counters.
+ For each bit described in the following table, the event is
+ implemented if the bit is set to 1, or not implemented if the
+ bit is set to 0.
+
+ \<pre\>
+ Bit Event number Event mnemonic
+ 35 0x4003 SAMPLE_COLLISION.
+ 34 0x4002 SAMPLE_FILTRATE.
+ 33 0x4001 SAMPLE_FEED.
+ 32 0x4000 SAMPLE_POP.
+ 31 0x01F L1D_CACHE_ALLOCATE
+ 30 0x01E CHAIN
+ 29 0x01D BUS_CYCLES
+ 28 0x01C TTBR_WRITE_RETIRED
+ 27 0x01B INST_SPEC
+ 26 0x01A MEMORY_ERROR
+ 25 0x019 BUS_ACCESS
+ 24 0x018 L2D_CACHE_WB
+ 23 0x017 L2D_CACHE_REFILL
+ 22 0x016 L2D_CACHE
+ 21 0x015 L1D_CACHE_WB
+ 20 0x014 L1I_CACHE
+ 19 0x013 MEM_ACCESS
+ 18 0x012 BR_PRED
+ 17 0x011 CPU_CYCLES
+ 16 0x010 BR_MIS_PRED
+ 15 0x00F UNALIGNED_LDST_RETIRED
+ 14 0x00E BR_RETURN_RETIRED
+ 13 0x00D BR_IMMED_RETIRED
+ 12 0x00C PC_WRITE_RETIRED
+ 11 0x00B CID_WRITE_RETIRED
+ 10 0x00A EXC_RETURN
+ 9 0x009 EXC_TAKEN
+ 8 0x008 INST_RETIRED
+ 7 0x007 ST_RETIRED
+ 6 0x006 LD_RETIRED
+ 5 0x005 L1D_TLB_REFILL
+ 4 0x004 L1D_CACHE
+ 3 0x003 L1D_CACHE_REFILL
+ 2 0x002 L1I_TLB_REFILL
+ 1 0x001 L1I_CACHE_REFILL
+ 0 0x000 SW_INCR
+ \</pre\> */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_pmceid0_el0 bdk_ap_pmceid0_el0_t;
+
+#define BDK_AP_PMCEID0_EL0 BDK_AP_PMCEID0_EL0_FUNC()
+static inline uint64_t BDK_AP_PMCEID0_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMCEID0_EL0_FUNC(void)
+{
+ return 0x303090c0600ll;
+}
+
+#define typedef_BDK_AP_PMCEID0_EL0 bdk_ap_pmceid0_el0_t
+#define bustype_BDK_AP_PMCEID0_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMCEID0_EL0 "AP_PMCEID0_EL0"
+#define busnum_BDK_AP_PMCEID0_EL0 0
+#define arguments_BDK_AP_PMCEID0_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmceid1_el0
+ *
+ * AP Performance Monitors Common Event Identification Register 1
+ * Reserved for future indication of which common architectural
+ * and common microarchitectural feature events are implemented.
+ */
+union bdk_ap_pmceid1_el0
+{
+ uint64_t u;
+ struct bdk_ap_pmceid1_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ce : 64; /**< [ 63: 0](RO) Common architectural and microarchitectural feature events
+ that can be counted by the PMU event counters.
+
+ For the bit described in the following table, the event is
+ implemented if the bit is set to 1, or not implemented if the
+ bit is set to 0.
+
+ \<pre\>
+ Bit Event number Event mnemonic
+ 0 0x020 L2D_CACHE_ALLOCATE
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint64_t ce : 64; /**< [ 63: 0](RO) Common architectural and microarchitectural feature events
+ that can be counted by the PMU event counters.
+
+ For the bit described in the following table, the event is
+ implemented if the bit is set to 1, or not implemented if the
+ bit is set to 0.
+
+ \<pre\>
+ Bit Event number Event mnemonic
+ 0 0x020 L2D_CACHE_ALLOCATE
+ \</pre\> */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmceid1_el0_s cn8; */
+ struct bdk_ap_pmceid1_el0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ce : 64; /**< [ 63: 0](RO) Common architectural and microarchitectural feature events
+ that can be counted by the PMU event counters.
+
+ For the bit described in the following table, the event is
+ implemented if the bit is set to 1, or not implemented if the
+ bit is set to 0.
+
+ \<pre\>
+ Bit Event number Event mnemonic
+ 24 0X0038 = REMOTE_ACCESS_RD.
+ 23 0X0037 = LL_CACHE_MISS_RD.
+ 22 0X0036 = LL_CACHE_RD.
+ 21 0X0035 = ITLB_WALK.
+ 20 0X0034 = DTLB_WALK.
+ 19 0X0033 = LL_CACHE MISS.
+ 18 0X0032 = LL_CACHE.
+ 17 0X0031 = REMOTE_ACCESS.
+ 16 RAZ
+ 15 0x002f = L2D_TLB.
+ 14 0x002e = L2I_TLB_REFILL.
+ 13 0x002d = L2D_TLB_REFILL.
+ 8 0x0028 = L2I_CACHE_REFILL.
+ 7 0x0027 = L2I_CACHE.
+ 6 0x0026 = L1I_TLB.
+ 5 0x0025 = L1D_TLB.
+ 4 0x0024 = STALL_BACKEND.
+ 3 0x0023 = STALL_FRONTEND.
+ 2 0x0022 = BR_MIS_PRED_RETIRED.
+ 1 0x0021 = BR_RETIRED.
+ 0 0x020 L2D_CACHE_ALLOCATE.
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint64_t ce : 64; /**< [ 63: 0](RO) Common architectural and microarchitectural feature events
+ that can be counted by the PMU event counters.
+
+ For the bit described in the following table, the event is
+ implemented if the bit is set to 1, or not implemented if the
+ bit is set to 0.
+
+ \<pre\>
+ Bit Event number Event mnemonic
+ 24 0X0038 = REMOTE_ACCESS_RD.
+ 23 0X0037 = LL_CACHE_MISS_RD.
+ 22 0X0036 = LL_CACHE_RD.
+ 21 0X0035 = ITLB_WALK.
+ 20 0X0034 = DTLB_WALK.
+ 19 0X0033 = LL_CACHE MISS.
+ 18 0X0032 = LL_CACHE.
+ 17 0X0031 = REMOTE_ACCESS.
+ 16 RAZ
+ 15 0x002f = L2D_TLB.
+ 14 0x002e = L2I_TLB_REFILL.
+ 13 0x002d = L2D_TLB_REFILL.
+ 8 0x0028 = L2I_CACHE_REFILL.
+ 7 0x0027 = L2I_CACHE.
+ 6 0x0026 = L1I_TLB.
+ 5 0x0025 = L1D_TLB.
+ 4 0x0024 = STALL_BACKEND.
+ 3 0x0023 = STALL_FRONTEND.
+ 2 0x0022 = BR_MIS_PRED_RETIRED.
+ 1 0x0021 = BR_RETIRED.
+ 0 0x020 L2D_CACHE_ALLOCATE.
+ \</pre\> */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_pmceid1_el0 bdk_ap_pmceid1_el0_t;
+
+#define BDK_AP_PMCEID1_EL0 BDK_AP_PMCEID1_EL0_FUNC()
+static inline uint64_t BDK_AP_PMCEID1_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMCEID1_EL0_FUNC(void)
+{
+ return 0x303090c0700ll;
+}
+
+#define typedef_BDK_AP_PMCEID1_EL0 bdk_ap_pmceid1_el0_t
+#define bustype_BDK_AP_PMCEID1_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMCEID1_EL0 "AP_PMCEID1_EL0"
+#define busnum_BDK_AP_PMCEID1_EL0 0
+#define arguments_BDK_AP_PMCEID1_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmcntenclr_el0
+ *
+ * AP Performance Monitors Count Enable Clear Register
+ * Disables the Cycle Count Register, AP_PMCCNTR_EL0, and any
+ * implemented event counters PMEVCNTR\<x\>. Reading this register
+ * shows which counters are enabled.
+ */
+union bdk_ap_pmcntenclr_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmcntenclr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 disable bit. Disables the cycle counter register.
+ 0 = When read, means the cycle counter is disabled. When written,
+ has no effect.
+ 1 = When read, means the cycle counter is enabled. When written,
+ disables the cycle counter. */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter disable bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ 0 = When read, means that PMEVCNTR\<x\> is disabled. When written,
+ has no effect.
+ 1 = When read, means that PMEVCNTR\<x\> is enabled. When written,
+ disables PMEVCNTR\<x\>. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter disable bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ 0 = When read, means that PMEVCNTR\<x\> is disabled. When written,
+ has no effect.
+ 1 = When read, means that PMEVCNTR\<x\> is enabled. When written,
+ disables PMEVCNTR\<x\>. */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 disable bit. Disables the cycle counter register.
+ 0 = When read, means the cycle counter is disabled. When written,
+ has no effect.
+ 1 = When read, means the cycle counter is enabled. When written,
+ disables the cycle counter. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmcntenclr_el0_s cn; */
+};
+typedef union bdk_ap_pmcntenclr_el0 bdk_ap_pmcntenclr_el0_t;
+
+#define BDK_AP_PMCNTENCLR_EL0 BDK_AP_PMCNTENCLR_EL0_FUNC()
+static inline uint64_t BDK_AP_PMCNTENCLR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMCNTENCLR_EL0_FUNC(void)
+{
+ return 0x303090c0200ll;
+}
+
+#define typedef_BDK_AP_PMCNTENCLR_EL0 bdk_ap_pmcntenclr_el0_t
+#define bustype_BDK_AP_PMCNTENCLR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMCNTENCLR_EL0 "AP_PMCNTENCLR_EL0"
+#define busnum_BDK_AP_PMCNTENCLR_EL0 0
+#define arguments_BDK_AP_PMCNTENCLR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmcntenset_el0
+ *
+ * AP Performance Monitors Count Enable Set Register
+ * Enables the Cycle Count Register, AP_PMCCNTR_EL0, and any
+ * implemented event counters PMEVCNTR\<x\>. Reading this register
+ * shows which counters are enabled.
+ */
+union bdk_ap_pmcntenset_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmcntenset_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 enable bit. Enables the cycle counter register.
+ 0 = When read, means the cycle counter is disabled. When written,
+ has no effect.
+ 1 = When read, means the cycle counter is enabled. When written,
+ enables the cycle counter. */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter enable bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ 0 = When read, means that PMEVCNTR\<x\> is disabled. When written,
+ has no effect.
+ 1 = When read, means that PMEVCNTR\<x\> event counter is enabled.
+ When written, enables PMEVCNTR\<x\>. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter enable bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ 0 = When read, means that PMEVCNTR\<x\> is disabled. When written,
+ has no effect.
+ 1 = When read, means that PMEVCNTR\<x\> event counter is enabled.
+ When written, enables PMEVCNTR\<x\>. */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 enable bit. Enables the cycle counter register.
+ 0 = When read, means the cycle counter is disabled. When written,
+ has no effect.
+ 1 = When read, means the cycle counter is enabled. When written,
+ enables the cycle counter. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmcntenset_el0_s cn; */
+};
+typedef union bdk_ap_pmcntenset_el0 bdk_ap_pmcntenset_el0_t;
+
+#define BDK_AP_PMCNTENSET_EL0 BDK_AP_PMCNTENSET_EL0_FUNC()
+static inline uint64_t BDK_AP_PMCNTENSET_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMCNTENSET_EL0_FUNC(void)
+{
+ return 0x303090c0100ll;
+}
+
+#define typedef_BDK_AP_PMCNTENSET_EL0 bdk_ap_pmcntenset_el0_t
+#define bustype_BDK_AP_PMCNTENSET_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMCNTENSET_EL0 "AP_PMCNTENSET_EL0"
+#define busnum_BDK_AP_PMCNTENSET_EL0 0
+#define arguments_BDK_AP_PMCNTENSET_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmcr_el0
+ *
+ * AP Performance Monitors Control Register
+ * Provides details of the Performance Monitors implementation,
+ * including the number of counters implemented, and configures
+ * and controls the counters.
+ */
+union bdk_ap_pmcr_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmcr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t imp : 8; /**< [ 31: 24](RO) Implementer code. This field is RO with an implementation
+ defined value.
+ The implementer codes are allocated by ARM. Values have the
+ same interpretation as bits [31:24] of the MIDR. */
+ uint32_t idcode : 8; /**< [ 23: 16](RO) Identification code. This field is RO with an implementation
+ defined value.
+ Each implementer must maintain a list of identification codes
+ that is specific to the implementer. A specific implementation
+ is identified by the combination of the implementer code and
+ the identification code. */
+ uint32_t n : 5; /**< [ 15: 11](RO) Number of event counters. This field is RO with an
+ implementation defined value that indicates the number of
+ counters implemented.
+ The value of this field is the number of counters implemented.
+
+ An implementation can implement only the Cycle Count Register,
+ AP_PMCCNTR_EL0. This is indicated by a value of0b00000
+
+ CNXXXX has 6 counters. */
+ uint32_t reserved_7_10 : 4;
+ uint32_t lc : 1; /**< [ 6: 6](RO) Long cycle counter enable. Determines which AP_PMCCNTR_EL0 bit
+ generates an overflow recorded by PMOVSR[31].
+ ARM deprecates use of AP_PMCR_EL0[LC] = 0.
+ 0 = Cycle counter overflow on increment that changes
+ AP_PMCCNTR_EL0[31] from 1 to 0.
+ 1 = Cycle counter overflow on increment that changes
+ AP_PMCCNTR_EL0[63] from 1 to 0.
+
+ CNXXXX doesn't support 32 bit, so this bit is RAO / WI. */
+ uint32_t dp : 1; /**< [ 5: 5](R/W) Disable cycle counter when event counting is prohibited.
+ Event counting is prohibited when
+ ProfilingProhibited(IsSecure(),PSTATE[EL]) == TRUE.
+ 0 = AP_PMCCNTR_EL0, if enabled, counts when event counting is
+ prohibited.
+ 1 = AP_PMCCNTR_EL0 does not count when event counting is prohibited. */
+ uint32_t x : 1; /**< [ 4: 4](RO) Enable export of events in an implementation defined event
+ stream.
+ This bit is used to permit events to be exported to another
+ debug device, such as an OPTIONAL trace extension, over an
+ event bus. If the implementation does not include such an
+ event bus, this bit is RAZ/WI.
+
+ This bit does not affect the generation of Performance
+ Monitors overflow interrupt requests or signaling to a cross-
+ trigger interface (CTI) that can be implemented as signals
+ exported from the processor.
+
+ If the implementation does not include an exported event
+ stream, this bit is RAZ/WI. Otherwise this bit is RW.
+ 0 = Do not export events.
+ 1 = Export events where not prohibited.
+
+ CNXXXX doesn't support export of events. */
+ uint32_t dd : 1; /**< [ 3: 3](RO) Clock divider.
+ If AP_PMCR_EL0[LC] == 1, this bit is ignored and the cycle counter
+ counts every clock cycle.
+ ARM deprecates use of PMCR[D] = 1.
+ 0 = When enabled, AP_PMCCNTR_EL0 counts every clock cycle.
+ 1 = When enabled, AP_PMCCNTR_EL0 counts once every 64 clock cycles.
+
+ CNXXXX doesn't support 32-bit, so this bit is RAZ/WI. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cycle counter reset. This bit is WO. The effects of writing to
+ this bit are:
+ This bit reads as zero.
+ Resetting AP_PMCCNTR_EL0 does not clear the AP_PMCCNTR_EL0 overflow
+ bit to 0.
+ 0 = No action.
+ 1 = Reset AP_PMCCNTR_EL0 to zero. */
+ uint32_t p : 1; /**< [ 1: 1](R/W) Event counter reset. This bit is WO. The effects of writing to
+ this bit are:
+ This bit reads as zero.
+ In nonsecure EL0 and EL1, if EL2 is implemented, a write of 1
+ to this bit does not reset event counters that AP_MDCR_EL2[HPMN]
+ reserves for EL2 use.
+ In EL2 and EL3, a write of 1 to this bit resets all the event
+ counters.
+ Resetting the event counters does not clear any overflow bits
+ to 0.
+ 0 = No action.
+ 1 = Reset all event counters accessible in the current EL, not
+ including AP_PMCCNTR_EL0, to zero. */
+ uint32_t ee : 1; /**< [ 0: 0](R/W) Enable.
+ This bit is RW.
+ In nonsecure EL0 and EL1, if EL2 is implemented, this bit
+ does not affect the operation of event counters that
+ AP_MDCR_EL2[HPMN] reserves for EL2 use.
+ 0 = All counters, including AP_PMCCNTR_EL0, are disabled.
+ 1 = All counters are enabled by AP_PMCNTENSET_EL0. */
+#else /* Word 0 - Little Endian */
+ uint32_t ee : 1; /**< [ 0: 0](R/W) Enable.
+ This bit is RW.
+ In nonsecure EL0 and EL1, if EL2 is implemented, this bit
+ does not affect the operation of event counters that
+ AP_MDCR_EL2[HPMN] reserves for EL2 use.
+ 0 = All counters, including AP_PMCCNTR_EL0, are disabled.
+ 1 = All counters are enabled by AP_PMCNTENSET_EL0. */
+ uint32_t p : 1; /**< [ 1: 1](R/W) Event counter reset. This bit is WO. The effects of writing to
+ this bit are:
+ This bit reads as zero.
+ In nonsecure EL0 and EL1, if EL2 is implemented, a write of 1
+ to this bit does not reset event counters that AP_MDCR_EL2[HPMN]
+ reserves for EL2 use.
+ In EL2 and EL3, a write of 1 to this bit resets all the event
+ counters.
+ Resetting the event counters does not clear any overflow bits
+ to 0.
+ 0 = No action.
+ 1 = Reset all event counters accessible in the current EL, not
+ including AP_PMCCNTR_EL0, to zero. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cycle counter reset. This bit is WO. The effects of writing to
+ this bit are:
+ This bit reads as zero.
+ Resetting AP_PMCCNTR_EL0 does not clear the AP_PMCCNTR_EL0 overflow
+ bit to 0.
+ 0 = No action.
+ 1 = Reset AP_PMCCNTR_EL0 to zero. */
+ uint32_t dd : 1; /**< [ 3: 3](RO) Clock divider.
+ If AP_PMCR_EL0[LC] == 1, this bit is ignored and the cycle counter
+ counts every clock cycle.
+ ARM deprecates use of PMCR[D] = 1.
+ 0 = When enabled, AP_PMCCNTR_EL0 counts every clock cycle.
+ 1 = When enabled, AP_PMCCNTR_EL0 counts once every 64 clock cycles.
+
+ CNXXXX doesn't support 32-bit, so this bit is RAZ/WI. */
+ uint32_t x : 1; /**< [ 4: 4](RO) Enable export of events in an implementation defined event
+ stream.
+ This bit is used to permit events to be exported to another
+ debug device, such as an OPTIONAL trace extension, over an
+ event bus. If the implementation does not include such an
+ event bus, this bit is RAZ/WI.
+
+ This bit does not affect the generation of Performance
+ Monitors overflow interrupt requests or signaling to a cross-
+ trigger interface (CTI) that can be implemented as signals
+ exported from the processor.
+
+ If the implementation does not include an exported event
+ stream, this bit is RAZ/WI. Otherwise this bit is RW.
+ 0 = Do not export events.
+ 1 = Export events where not prohibited.
+
+ CNXXXX doesn't support export of events. */
+ uint32_t dp : 1; /**< [ 5: 5](R/W) Disable cycle counter when event counting is prohibited.
+ Event counting is prohibited when
+ ProfilingProhibited(IsSecure(),PSTATE[EL]) == TRUE.
+ 0 = AP_PMCCNTR_EL0, if enabled, counts when event counting is
+ prohibited.
+ 1 = AP_PMCCNTR_EL0 does not count when event counting is prohibited. */
+ uint32_t lc : 1; /**< [ 6: 6](RO) Long cycle counter enable. Determines which AP_PMCCNTR_EL0 bit
+ generates an overflow recorded by PMOVSR[31].
+ ARM deprecates use of AP_PMCR_EL0[LC] = 0.
+ 0 = Cycle counter overflow on increment that changes
+ AP_PMCCNTR_EL0[31] from 1 to 0.
+ 1 = Cycle counter overflow on increment that changes
+ AP_PMCCNTR_EL0[63] from 1 to 0.
+
+ CNXXXX doesn't support 32 bit, so this bit is RAO / WI. */
+ uint32_t reserved_7_10 : 4;
+ uint32_t n : 5; /**< [ 15: 11](RO) Number of event counters. This field is RO with an
+ implementation defined value that indicates the number of
+ counters implemented.
+ The value of this field is the number of counters implemented.
+
+ An implementation can implement only the Cycle Count Register,
+ AP_PMCCNTR_EL0. This is indicated by a value of0b00000
+
+ CNXXXX has 6 counters. */
+ uint32_t idcode : 8; /**< [ 23: 16](RO) Identification code. This field is RO with an implementation
+ defined value.
+ Each implementer must maintain a list of identification codes
+ that is specific to the implementer. A specific implementation
+ is identified by the combination of the implementer code and
+ the identification code. */
+ uint32_t imp : 8; /**< [ 31: 24](RO) Implementer code. This field is RO with an implementation
+ defined value.
+ The implementer codes are allocated by ARM. Values have the
+ same interpretation as bits [31:24] of the MIDR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmcr_el0_s cn; */
+};
+typedef union bdk_ap_pmcr_el0 bdk_ap_pmcr_el0_t;
+
+#define BDK_AP_PMCR_EL0 BDK_AP_PMCR_EL0_FUNC()
+static inline uint64_t BDK_AP_PMCR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMCR_EL0_FUNC(void)
+{
+ return 0x303090c0000ll;
+}
+
+#define typedef_BDK_AP_PMCR_EL0 bdk_ap_pmcr_el0_t
+#define bustype_BDK_AP_PMCR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMCR_EL0 "AP_PMCR_EL0"
+#define busnum_BDK_AP_PMCR_EL0 0
+#define arguments_BDK_AP_PMCR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmevcntr#_el0
+ *
+ * AP Performance Monitors Event Count Registers
+ * Holds event counter n, which counts events, where n is 0 to
+ * 30.
+ */
+union bdk_ap_pmevcntrx_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmevcntrx_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Event counter n. Value of event counter n, where n is the
+ number of this register and is a number from 0 to 30. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Event counter n. Value of event counter n, where n is the
+ number of this register and is a number from 0 to 30. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmevcntrx_el0_s cn; */
+};
+typedef union bdk_ap_pmevcntrx_el0 bdk_ap_pmevcntrx_el0_t;
+
+static inline uint64_t BDK_AP_PMEVCNTRX_EL0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMEVCNTRX_EL0(unsigned long a)
+{
+ if (a<=30)
+ return 0x3030e080000ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("AP_PMEVCNTRX_EL0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMEVCNTRX_EL0(a) bdk_ap_pmevcntrx_el0_t
+#define bustype_BDK_AP_PMEVCNTRX_EL0(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMEVCNTRX_EL0(a) "AP_PMEVCNTRX_EL0"
+#define busnum_BDK_AP_PMEVCNTRX_EL0(a) (a)
+#define arguments_BDK_AP_PMEVCNTRX_EL0(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmevtyper#_el0
+ *
+ * AP Performance Monitors Event Type Registers
+ * Configures event counter n, where n is 0 to 30.
+ */
+union bdk_ap_pmevtyperx_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmevtyperx_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t p : 1; /**< [ 31: 31](R/W) EL1 modes filtering bit. Controls counting in EL1. If EL3 is
+ implemented, then counting in nonsecure EL1 is further
+ controlled by the NSK bit.
+ 0 = Count events in EL1.
+ 1 = Do not count events in EL1. */
+ uint32_t u : 1; /**< [ 30: 30](R/W) EL0 filtering bit. Controls counting in EL0. If EL3 is
+ implemented, then counting in nonsecure EL0 is further
+ controlled by the NSU bit.
+ 0 = Count events in EL0.
+ 1 = Do not count events in EL0. */
+ uint32_t nsk : 1; /**< [ 29: 29](R/W) Nonsecure kernel modes filtering bit. Controls counting in
+ nonsecure EL1. If EL3 is not implemented, this bit is RES0.
+ If the value of this bit is equal to the value of P, events in
+ nonsecure EL1 are counted.
+ Otherwise, events in nonsecure EL1 are not counted. */
+ uint32_t nsu : 1; /**< [ 28: 28](R/W) Nonsecure user modes filtering bit. Controls counting in Non-
+ secure EL0. If EL3 is not implemented, this bit is RES0.
+ If the value of this bit is equal to the value of U, events in
+ nonsecure EL0 are counted.
+ Otherwise, events in nonsecure EL0 are not counted. */
+ uint32_t nsh : 1; /**< [ 27: 27](R/W) Nonsecure Hyp modes filtering bit. Controls counting in Non-
+ secure EL2. If EL2 is not implemented, this bit is RES0.
+ 0 = Do not count events in EL2.
+ 1 = Count events in EL2. */
+ uint32_t m : 1; /**< [ 26: 26](R/W) Secure EL3 filtering bit. Most applications can ignore this
+ bit and set the value to zero. If EL3 is not implemented, this
+ bit is RES0.
+ If the value of this bit is equal to the value of P, events in
+ Secure EL3 are counted.
+ Otherwise, events in Secure EL3 are not counted. */
+ uint32_t reserved_16_25 : 10;
+ uint32_t evtcount : 16; /**< [ 15: 0](R/W) Event to count. The event number of the event that is counted
+ by event counter PMEVCNTR\<n\>_EL0.
+ Software must program this field with an event defined by the
+ processor or a common event defined by the architecture.
+ If evtCount is programmed to an event that is reserved or not
+ implemented, the behavior depends on the event type.
+
+ For common architectural and microarchitectural events:
+ No events are counted.
+ The value read back on evtCount is the value written.
+
+ For implementation defined events:
+
+ It is UNPREDICTABLE what event, if any, is counted.
+ UNPREDICTABLE in this case means the event must not expose
+ privileged information.
+
+ The value read back on evtCount is an UNKNOWN value with the
+ same effect.
+
+ ARM recommends that the behavior across a family of
+ implementations is defined such that if a given implementation
+ does not include an event from a set of common implementation
+ defined events, then no event is counted and the value read
+ back on evtCount is the value written.
+
+ v8.1: Width was extended to 16 bits. */
+#else /* Word 0 - Little Endian */
+ uint32_t evtcount : 16; /**< [ 15: 0](R/W) Event to count. The event number of the event that is counted
+ by event counter PMEVCNTR\<n\>_EL0.
+ Software must program this field with an event defined by the
+ processor or a common event defined by the architecture.
+ If evtCount is programmed to an event that is reserved or not
+ implemented, the behavior depends on the event type.
+
+ For common architectural and microarchitectural events:
+ No events are counted.
+ The value read back on evtCount is the value written.
+
+ For implementation defined events:
+
+ It is UNPREDICTABLE what event, if any, is counted.
+ UNPREDICTABLE in this case means the event must not expose
+ privileged information.
+
+ The value read back on evtCount is an UNKNOWN value with the
+ same effect.
+
+ ARM recommends that the behavior across a family of
+ implementations is defined such that if a given implementation
+ does not include an event from a set of common implementation
+ defined events, then no event is counted and the value read
+ back on evtCount is the value written.
+
+ v8.1: Width was extended to 16 bits. */
+ uint32_t reserved_16_25 : 10;
+ uint32_t m : 1; /**< [ 26: 26](R/W) Secure EL3 filtering bit. Most applications can ignore this
+ bit and set the value to zero. If EL3 is not implemented, this
+ bit is RES0.
+ If the value of this bit is equal to the value of P, events in
+ Secure EL3 are counted.
+ Otherwise, events in Secure EL3 are not counted. */
+ uint32_t nsh : 1; /**< [ 27: 27](R/W) Nonsecure Hyp modes filtering bit. Controls counting in Non-
+ secure EL2. If EL2 is not implemented, this bit is RES0.
+ 0 = Do not count events in EL2.
+ 1 = Count events in EL2. */
+ uint32_t nsu : 1; /**< [ 28: 28](R/W) Nonsecure user modes filtering bit. Controls counting in Non-
+ secure EL0. If EL3 is not implemented, this bit is RES0.
+ If the value of this bit is equal to the value of U, events in
+ nonsecure EL0 are counted.
+ Otherwise, events in nonsecure EL0 are not counted. */
+ uint32_t nsk : 1; /**< [ 29: 29](R/W) Nonsecure kernel modes filtering bit. Controls counting in
+ nonsecure EL1. If EL3 is not implemented, this bit is RES0.
+ If the value of this bit is equal to the value of P, events in
+ nonsecure EL1 are counted.
+ Otherwise, events in nonsecure EL1 are not counted. */
+ uint32_t u : 1; /**< [ 30: 30](R/W) EL0 filtering bit. Controls counting in EL0. If EL3 is
+ implemented, then counting in nonsecure EL0 is further
+ controlled by the NSU bit.
+ 0 = Count events in EL0.
+ 1 = Do not count events in EL0. */
+ uint32_t p : 1; /**< [ 31: 31](R/W) EL1 modes filtering bit. Controls counting in EL1. If EL3 is
+ implemented, then counting in nonsecure EL1 is further
+ controlled by the NSK bit.
+ 0 = Count events in EL1.
+ 1 = Do not count events in EL1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmevtyperx_el0_s cn; */
+};
+typedef union bdk_ap_pmevtyperx_el0 bdk_ap_pmevtyperx_el0_t;
+
+static inline uint64_t BDK_AP_PMEVTYPERX_EL0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMEVTYPERX_EL0(unsigned long a)
+{
+ if (a<=30)
+ return 0x3030e0c0000ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("AP_PMEVTYPERX_EL0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMEVTYPERX_EL0(a) bdk_ap_pmevtyperx_el0_t
+#define bustype_BDK_AP_PMEVTYPERX_EL0(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMEVTYPERX_EL0(a) "AP_PMEVTYPERX_EL0"
+#define busnum_BDK_AP_PMEVTYPERX_EL0(a) (a)
+#define arguments_BDK_AP_PMEVTYPERX_EL0(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmintenclr_el1
+ *
+ * AP Performance Monitors Interrupt Enable Clear Register
+ * Disables the generation of interrupt requests on overflows
+ * from the Cycle Count Register, AP_PMCCNTR_EL0, and the event
+ * counters PMEVCNTR\<n\>_EL0. Reading the register shows which
+ * overflow interrupt requests are enabled.
+ */
+union bdk_ap_pmintenclr_el1
+{
+ uint32_t u;
+ struct bdk_ap_pmintenclr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow interrupt request disable bit. Possible
+ values are:
+ 0 = When read, means the cycle counter overflow interrupt request
+ is disabled. When written, has no effect.
+ 1 = When read, means the cycle counter overflow interrupt request
+ is enabled. When written, disables the cycle count overflow
+ interrupt request. */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow interrupt request disable bit for
+ PMEVCNTR\<x\>_EL0.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+ Bits [30:N] are RAZ/WI.
+ 0 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is disabled. When written, has no effect.
+ 1 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is enabled. When written, disables the
+ PMEVCNTR\<x\>_EL0 interrupt request. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow interrupt request disable bit for
+ PMEVCNTR\<x\>_EL0.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+ Bits [30:N] are RAZ/WI.
+ 0 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is disabled. When written, has no effect.
+ 1 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is enabled. When written, disables the
+ PMEVCNTR\<x\>_EL0 interrupt request. */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow interrupt request disable bit. Possible
+ values are:
+ 0 = When read, means the cycle counter overflow interrupt request
+ is disabled. When written, has no effect.
+ 1 = When read, means the cycle counter overflow interrupt request
+ is enabled. When written, disables the cycle count overflow
+ interrupt request. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmintenclr_el1_s cn8; */
+ struct bdk_ap_pmintenclr_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow interrupt request disable bit.
+ 0 = When read, means the cycle counter overflow interrupt request
+ is disabled. When written, has no effect.
+ 1 = When read, means the cycle counter overflow interrupt request
+ is enabled. When written, disables the cycle count overflow
+ interrupt request. */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow interrupt request disable bit for
+ PMEVCNTR\<x\>_EL0.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+ Bits [30:N] are RAZ/WI.
+ 0 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is disabled. When written, has no effect.
+ 1 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is enabled. When written, disables the
+ PMEVCNTR\<x\>_EL0 interrupt request. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow interrupt request disable bit for
+ PMEVCNTR\<x\>_EL0.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+ Bits [30:N] are RAZ/WI.
+ 0 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is disabled. When written, has no effect.
+ 1 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is enabled. When written, disables the
+ PMEVCNTR\<x\>_EL0 interrupt request. */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow interrupt request disable bit.
+ 0 = When read, means the cycle counter overflow interrupt request
+ is disabled. When written, has no effect.
+ 1 = When read, means the cycle counter overflow interrupt request
+ is enabled. When written, disables the cycle count overflow
+ interrupt request. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_pmintenclr_el1 bdk_ap_pmintenclr_el1_t;
+
+#define BDK_AP_PMINTENCLR_EL1 BDK_AP_PMINTENCLR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMINTENCLR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMINTENCLR_EL1_FUNC(void)
+{
+ return 0x300090e0200ll;
+}
+
+#define typedef_BDK_AP_PMINTENCLR_EL1 bdk_ap_pmintenclr_el1_t
+#define bustype_BDK_AP_PMINTENCLR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMINTENCLR_EL1 "AP_PMINTENCLR_EL1"
+#define busnum_BDK_AP_PMINTENCLR_EL1 0
+#define arguments_BDK_AP_PMINTENCLR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmintenset_el1
+ *
+ * AP Performance Monitors Interrupt Enable Set Register
+ * Enables the generation of interrupt requests on overflows from
+ * the Cycle Count Register, AP_PMCCNTR_EL0, and the event counters
+ * PMEVCNTR\<n\>_EL0. Reading the register shows which overflow
+ * interrupt requests are enabled.
+ */
+union bdk_ap_pmintenset_el1
+{
+ uint32_t u;
+ struct bdk_ap_pmintenset_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow interrupt request enable bit. Possible
+ values are:
+ 0 = When read, means the cycle counter overflow interrupt request
+ is disabled. When written, has no effect.
+ 1 = When read, means the cycle counter overflow interrupt request
+ is enabled. When written, enables the cycle count overflow
+ interrupt request. */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow interrupt request enable bit for
+ PMEVCNTR\<x\>_EL0.
+
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ Bits [30:N] are RAZ/WI.
+
+ 0 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is disabled. When written, has no effect.
+ 1 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is enabled. When written, enables the
+ PMEVCNTR\<x\>_EL0 interrupt request. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow interrupt request enable bit for
+ PMEVCNTR\<x\>_EL0.
+
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ Bits [30:N] are RAZ/WI.
+
+ 0 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is disabled. When written, has no effect.
+ 1 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is enabled. When written, enables the
+ PMEVCNTR\<x\>_EL0 interrupt request. */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow interrupt request enable bit. Possible
+ values are:
+ 0 = When read, means the cycle counter overflow interrupt request
+ is disabled. When written, has no effect.
+ 1 = When read, means the cycle counter overflow interrupt request
+ is enabled. When written, enables the cycle count overflow
+ interrupt request. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmintenset_el1_s cn8; */
+ struct bdk_ap_pmintenset_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow interrupt request enable bit.
+ 0 = When read, means the cycle counter overflow interrupt request
+ is disabled. When written, has no effect.
+ 1 = When read, means the cycle counter overflow interrupt request
+ is enabled. When written, enables the cycle count overflow
+ interrupt request. */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow interrupt request enable bit for
+ PMEVCNTR\<x\>_EL0.
+
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ Bits [30:N] are RAZ/WI.
+
+ 0 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is disabled. When written, has no effect.
+ 1 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is enabled. When written, enables the
+ PMEVCNTR\<x\>_EL0 interrupt request. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow interrupt request enable bit for
+ PMEVCNTR\<x\>_EL0.
+
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ Bits [30:N] are RAZ/WI.
+
+ 0 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is disabled. When written, has no effect.
+ 1 = When read, means that the PMEVCNTR\<x\>_EL0 event counter
+ interrupt request is enabled. When written, enables the
+ PMEVCNTR\<x\>_EL0 interrupt request. */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow interrupt request enable bit.
+ 0 = When read, means the cycle counter overflow interrupt request
+ is disabled. When written, has no effect.
+ 1 = When read, means the cycle counter overflow interrupt request
+ is enabled. When written, enables the cycle count overflow
+ interrupt request. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_pmintenset_el1 bdk_ap_pmintenset_el1_t;
+
+#define BDK_AP_PMINTENSET_EL1 BDK_AP_PMINTENSET_EL1_FUNC()
+static inline uint64_t BDK_AP_PMINTENSET_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMINTENSET_EL1_FUNC(void)
+{
+ return 0x300090e0100ll;
+}
+
+#define typedef_BDK_AP_PMINTENSET_EL1 bdk_ap_pmintenset_el1_t
+#define bustype_BDK_AP_PMINTENSET_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMINTENSET_EL1 "AP_PMINTENSET_EL1"
+#define busnum_BDK_AP_PMINTENSET_EL1 0
+#define arguments_BDK_AP_PMINTENSET_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmovsclr_el0
+ *
+ * AP Performance Monitors Overflow Flag Status Clear Register
+ * Contains the state of the overflow bit for the Cycle Count
+ * Register, AP_PMCCNTR_EL0, and each of the implemented event
+ * counters PMEVCNTR\<x\>. Writing to this register clears these
+ * bits.
+ */
+union bdk_ap_pmovsclr_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmovsclr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow bit.
+ AP_PMCR_EL0[LC] is used to control from which bit of AP_PMCCNTR_EL0
+ (bit 31 or bit 63) an overflow is detected.
+ 0 = When read, means the cycle counter has not overflowed. When
+ written, has no effect.
+ 1 = When read, means the cycle counter has overflowed. When
+ written, clears the overflow bit to 0. */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow clear bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ 0 = When read, means that PMEVCNTR\<x\> has not overflowed. When
+ written, has no effect.
+ 1 = When read, means that PMEVCNTR\<x\> has overflowed. When
+ written, clears the PMEVCNTR\<x\> overflow bit to 0. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow clear bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+
+ 0 = When read, means that PMEVCNTR\<x\> has not overflowed. When
+ written, has no effect.
+ 1 = When read, means that PMEVCNTR\<x\> has overflowed. When
+ written, clears the PMEVCNTR\<x\> overflow bit to 0. */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow bit.
+ AP_PMCR_EL0[LC] is used to control from which bit of AP_PMCCNTR_EL0
+ (bit 31 or bit 63) an overflow is detected.
+ 0 = When read, means the cycle counter has not overflowed. When
+ written, has no effect.
+ 1 = When read, means the cycle counter has overflowed. When
+ written, clears the overflow bit to 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmovsclr_el0_s cn; */
+};
+typedef union bdk_ap_pmovsclr_el0 bdk_ap_pmovsclr_el0_t;
+
+#define BDK_AP_PMOVSCLR_EL0 BDK_AP_PMOVSCLR_EL0_FUNC()
+static inline uint64_t BDK_AP_PMOVSCLR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMOVSCLR_EL0_FUNC(void)
+{
+ return 0x303090c0300ll;
+}
+
+#define typedef_BDK_AP_PMOVSCLR_EL0 bdk_ap_pmovsclr_el0_t
+#define bustype_BDK_AP_PMOVSCLR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMOVSCLR_EL0 "AP_PMOVSCLR_EL0"
+#define busnum_BDK_AP_PMOVSCLR_EL0 0
+#define arguments_BDK_AP_PMOVSCLR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmovsset_el0
+ *
+ * AP Performance Monitors Overflow Flag Status Set Register
+ * Sets the state of the overflow bit for the Cycle Count
+ * Register, AP_PMCCNTR_EL0, and each of the implemented event
+ * counters PMEVCNTR\<x\>.
+ */
+union bdk_ap_pmovsset_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmovsset_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow bit.
+ 0 = When read, means the cycle counter has not overflowed. When
+ written, has no effect.
+ 1 = When read, means the cycle counter has overflowed. When
+ written, sets the overflow bit to 1. */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow set bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+ 0 = When read, means that PMEVCNTR\<x\> has not overflowed. When
+ written, has no effect.
+ 1 = When read, means that PMEVCNTR\<x\> has overflowed. When
+ written, sets the PMEVCNTR\<x\> overflow bit to 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](R/W) Event counter overflow set bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in
+ AP_PMCR_EL0[N].
+ 0 = When read, means that PMEVCNTR\<x\> has not overflowed. When
+ written, has no effect.
+ 1 = When read, means that PMEVCNTR\<x\> has overflowed. When
+ written, sets the PMEVCNTR\<x\> overflow bit to 1. */
+ uint32_t cc : 1; /**< [ 31: 31](R/W) AP_PMCCNTR_EL0 overflow bit.
+ 0 = When read, means the cycle counter has not overflowed. When
+ written, has no effect.
+ 1 = When read, means the cycle counter has overflowed. When
+ written, sets the overflow bit to 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmovsset_el0_s cn; */
+};
+typedef union bdk_ap_pmovsset_el0 bdk_ap_pmovsset_el0_t;
+
+#define BDK_AP_PMOVSSET_EL0 BDK_AP_PMOVSSET_EL0_FUNC()
+static inline uint64_t BDK_AP_PMOVSSET_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMOVSSET_EL0_FUNC(void)
+{
+ return 0x303090e0300ll;
+}
+
+#define typedef_BDK_AP_PMOVSSET_EL0 bdk_ap_pmovsset_el0_t
+#define bustype_BDK_AP_PMOVSSET_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMOVSSET_EL0 "AP_PMOVSSET_EL0"
+#define busnum_BDK_AP_PMOVSSET_EL0 0
+#define arguments_BDK_AP_PMOVSSET_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmscr_el1
+ *
+ * AP Statistical Profiling Control Register
+ * Provides EL1 controls for Statistical Profiling..
+ */
+union bdk_ap_pmscr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmscr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t pct : 1; /**< [ 6: 6](R/W) Physical timestamp.
+ 0 = Virtual counter, CNTVCT_EL0, is collected.
+ 1 = Physical counter, CNTPCT_EL0, is collected.
+
+ Ignored when the TS bit is 0, at EL2, and in nonsecure state when HCR_EL2[TGE] = 1.
+ RES0 in nonsecure state when MDCR_EL2[E2DB] != 0x0 and PMSCR_EL2[PCT ]= 0. */
+ uint64_t ts : 1; /**< [ 5: 5](R/W) Timestamp enable.
+ 0 = Timestamp sampling disabled.
+ 1 = Timestamp sampling enabled.
+
+ Ignored at EL2 and in nonsecure state when HCR_EL2[TGE] = 1. */
+ uint64_t pa : 1; /**< [ 4: 4](R/W) Physical address sample enable.
+ 0 = Physical addresses are not collected.
+ 1 = Physical addresses are collected.
+
+ Ignored at EL2 and in nonsecure state when HCR_EL2[TGE] = 1.
+ RES0 in nonsecure state when MDCR_EL2[E2DB] = 0x0 and PMSCR_EL2[PA] = 0. */
+ uint64_t cx : 1; /**< [ 3: 3](R/W) CONTEXTIDR_EL1 sample enable.
+ 0 = CONTEXTIDR_EL1 is not collected.
+ 1 = CONTEXTIDR_EL1 is collected.
+
+ RES0 at EL2 and in nonsecure state when HCR_EL2.TGE = 1. */
+ uint64_t reserved_2 : 1;
+ uint64_t e1spe : 1; /**< [ 1: 1](R/W) EL1 statistical profiling enable.
+ 0 = Sampling disabled at EL1.
+ 1 = Sampling enabled at EL1.
+
+ Ignored in nonsecure state when HCR_EL2[TGE] = 1. */
+ uint64_t e0spe : 1; /**< [ 0: 0](R/W) EL0 statistical profiling enable.
+ 0 = Sampling disabled at EL0 when HCR_EL2[TGE] = 0.
+ 1 = Sampling enabled at EL0 when HCR_EL2[TGE] = 0.
+
+ Ignored in nonsecure state when HCR_EL2[TGE] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t e0spe : 1; /**< [ 0: 0](R/W) EL0 statistical profiling enable.
+ 0 = Sampling disabled at EL0 when HCR_EL2[TGE] = 0.
+ 1 = Sampling enabled at EL0 when HCR_EL2[TGE] = 0.
+
+ Ignored in nonsecure state when HCR_EL2[TGE] = 1. */
+ uint64_t e1spe : 1; /**< [ 1: 1](R/W) EL1 statistical profiling enable.
+ 0 = Sampling disabled at EL1.
+ 1 = Sampling enabled at EL1.
+
+ Ignored in nonsecure state when HCR_EL2[TGE] = 1. */
+ uint64_t reserved_2 : 1;
+ uint64_t cx : 1; /**< [ 3: 3](R/W) CONTEXTIDR_EL1 sample enable.
+ 0 = CONTEXTIDR_EL1 is not collected.
+ 1 = CONTEXTIDR_EL1 is collected.
+
+ RES0 at EL2 and in nonsecure state when HCR_EL2.TGE = 1. */
+ uint64_t pa : 1; /**< [ 4: 4](R/W) Physical address sample enable.
+ 0 = Physical addresses are not collected.
+ 1 = Physical addresses are collected.
+
+ Ignored at EL2 and in nonsecure state when HCR_EL2[TGE] = 1.
+ RES0 in nonsecure state when MDCR_EL2[E2DB] = 0x0 and PMSCR_EL2[PA] = 0. */
+ uint64_t ts : 1; /**< [ 5: 5](R/W) Timestamp enable.
+ 0 = Timestamp sampling disabled.
+ 1 = Timestamp sampling enabled.
+
+ Ignored at EL2 and in nonsecure state when HCR_EL2[TGE] = 1. */
+ uint64_t pct : 1; /**< [ 6: 6](R/W) Physical timestamp.
+ 0 = Virtual counter, CNTVCT_EL0, is collected.
+ 1 = Physical counter, CNTPCT_EL0, is collected.
+
+ Ignored when the TS bit is 0, at EL2, and in nonsecure state when HCR_EL2[TGE] = 1.
+ RES0 in nonsecure state when MDCR_EL2[E2DB] != 0x0 and PMSCR_EL2[PCT ]= 0. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmscr_el1_s cn; */
+};
+typedef union bdk_ap_pmscr_el1 bdk_ap_pmscr_el1_t;
+
+#define BDK_AP_PMSCR_EL1 BDK_AP_PMSCR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMSCR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSCR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30009090000ll;
+ __bdk_csr_fatal("AP_PMSCR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSCR_EL1 bdk_ap_pmscr_el1_t
+#define bustype_BDK_AP_PMSCR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSCR_EL1 "AP_PMSCR_EL1"
+#define busnum_BDK_AP_PMSCR_EL1 0
+#define arguments_BDK_AP_PMSCR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmscr_el12
+ *
+ * AP Statistical Profiling Control Register
+ * Alias of AP_PMSCR_EL1 when accessed at EL2 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_pmscr_el12
+{
+ uint64_t u;
+ struct bdk_ap_pmscr_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmscr_el12_s cn; */
+};
+typedef union bdk_ap_pmscr_el12 bdk_ap_pmscr_el12_t;
+
+#define BDK_AP_PMSCR_EL12 BDK_AP_PMSCR_EL12_FUNC()
+static inline uint64_t BDK_AP_PMSCR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSCR_EL12_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30509090000ll;
+ __bdk_csr_fatal("AP_PMSCR_EL12", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSCR_EL12 bdk_ap_pmscr_el12_t
+#define bustype_BDK_AP_PMSCR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSCR_EL12 "AP_PMSCR_EL12"
+#define busnum_BDK_AP_PMSCR_EL12 0
+#define arguments_BDK_AP_PMSCR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmscr_el2
+ *
+ * AP Statistical Profiling Control Register
+ * Provides EL2 controls for Statistical Profiling.
+ */
+union bdk_ap_pmscr_el2
+{
+ uint64_t u;
+ struct bdk_ap_pmscr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t pct : 1; /**< [ 6: 6](R/W) Physical Timestamp.
+ 0 = Virtual counter, CNTVCT_EL0, is collected.
+ 1 = Physical counter, CNTPCT_EL0, is collected.
+
+ Ignored when the TS bit is 0 and in Secure state.
+ If MDCR_EL2.E2DB != 0b00, this bit is combined with PMSCR_EL1.PCT to determine which
+ counter is collected. See CollectTimeStamp.
+ If EL2 is not implemented, the PE behaves as if PCT == 1, other than for a direct read of
+ the register. */
+ uint64_t ts : 1; /**< [ 5: 5](R/W) Timestamp Enable.
+ 0 = Timestamp sampling disabled.
+ 1 = Timestamp sampling enabled.
+
+ Ignored in Secure state, and at EL1 and EL0 when HCR_EL2.TGE == 0.
+ See CollectTimeStamp. */
+ uint64_t pa : 1; /**< [ 4: 4](R/W) Physical Address Sample Enable.
+ 0 = Physical addresses are not collected.
+ 1 = Physical addresses are collected.
+
+ Ignored when the TS bit is 0 and in Secure state.
+ If MDCR_EL2.E2DB != 0b00, this bit is combined with PMSCR_EL1.PA to determine which
+ counter is collected. See CollectPhysicalAddress.
+ If EL2 is not implemented, the PE behaves as if PA == 1, other than for a direct read of
+ the register. */
+ uint64_t cx : 1; /**< [ 3: 3](R/W) CONTEXTIDR_EL2 Sample Enable.
+ 0 = CONTEXTIDR_EL2 is not collected.
+ 1 = CONTEXTIDR_EL2 is collected.
+
+ RES0 in secure state. */
+ uint64_t reserved_2 : 1;
+ uint64_t e2spe : 1; /**< [ 1: 1](R/W) EL2 statistical profiling enable.
+ 0 = Sampling disabled at EL2.
+ 1 = Sampling enabled at EL2.
+
+ RES0 if MDCR_EL2[E2PB] != 0x0. Ignored in Secure state. */
+ uint64_t e0hspe : 1; /**< [ 0: 0](R/W) EL0 statistical profiling enable.
+ 0 = Sampling disabled at EL0 when HCR_EL2.TGE == 1.
+ 1 = Sampling enabled at EL0 when HCR_EL2.TGE == 1.
+
+ RES0 if MDCR_EL2.E2PB != 0x0. Ignored in Secure state and when HCR_EL2[TGE] = 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t e0hspe : 1; /**< [ 0: 0](R/W) EL0 statistical profiling enable.
+ 0 = Sampling disabled at EL0 when HCR_EL2.TGE == 1.
+ 1 = Sampling enabled at EL0 when HCR_EL2.TGE == 1.
+
+ RES0 if MDCR_EL2.E2PB != 0x0. Ignored in Secure state and when HCR_EL2[TGE] = 0. */
+ uint64_t e2spe : 1; /**< [ 1: 1](R/W) EL2 statistical profiling enable.
+ 0 = Sampling disabled at EL2.
+ 1 = Sampling enabled at EL2.
+
+ RES0 if MDCR_EL2[E2PB] != 0x0. Ignored in Secure state. */
+ uint64_t reserved_2 : 1;
+ uint64_t cx : 1; /**< [ 3: 3](R/W) CONTEXTIDR_EL2 Sample Enable.
+ 0 = CONTEXTIDR_EL2 is not collected.
+ 1 = CONTEXTIDR_EL2 is collected.
+
+ RES0 in secure state. */
+ uint64_t pa : 1; /**< [ 4: 4](R/W) Physical Address Sample Enable.
+ 0 = Physical addresses are not collected.
+ 1 = Physical addresses are collected.
+
+ Ignored when the TS bit is 0 and in Secure state.
+ If MDCR_EL2.E2DB != 0b00, this bit is combined with PMSCR_EL1.PA to determine which
+ counter is collected. See CollectPhysicalAddress.
+ If EL2 is not implemented, the PE behaves as if PA == 1, other than for a direct read of
+ the register. */
+ uint64_t ts : 1; /**< [ 5: 5](R/W) Timestamp Enable.
+ 0 = Timestamp sampling disabled.
+ 1 = Timestamp sampling enabled.
+
+ Ignored in Secure state, and at EL1 and EL0 when HCR_EL2.TGE == 0.
+ See CollectTimeStamp. */
+ uint64_t pct : 1; /**< [ 6: 6](R/W) Physical Timestamp.
+ 0 = Virtual counter, CNTVCT_EL0, is collected.
+ 1 = Physical counter, CNTPCT_EL0, is collected.
+
+ Ignored when the TS bit is 0 and in Secure state.
+ If MDCR_EL2.E2DB != 0b00, this bit is combined with PMSCR_EL1.PCT to determine which
+ counter is collected. See CollectTimeStamp.
+ If EL2 is not implemented, the PE behaves as if PCT == 1, other than for a direct read of
+ the register. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmscr_el2_s cn; */
+};
+typedef union bdk_ap_pmscr_el2 bdk_ap_pmscr_el2_t;
+
+#define BDK_AP_PMSCR_EL2 BDK_AP_PMSCR_EL2_FUNC()
+static inline uint64_t BDK_AP_PMSCR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSCR_EL2_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30409090000ll;
+ __bdk_csr_fatal("AP_PMSCR_EL2", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSCR_EL2 bdk_ap_pmscr_el2_t
+#define bustype_BDK_AP_PMSCR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSCR_EL2 "AP_PMSCR_EL2"
+#define busnum_BDK_AP_PMSCR_EL2 0
+#define arguments_BDK_AP_PMSCR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmselr_el0
+ *
+ * AP Performance Monitors Event Counter Selection Register
+ * Selects the current event counter PMEVCNTR\<x\> or the cycle
+ * counter, CCNT.
+ */
+union bdk_ap_pmselr_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmselr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t sel : 5; /**< [ 4: 0](R/W) Selects event counter, PMEVCNTR\<x\>, where x is the value held
+ in this field. This value identifies which event counter is
+ accessed when a subsequent access to AP_PMXEVTYPER_EL0 or
+ AP_PMXEVCNTR_EL0 occurs.
+ When AP_PMSELR_EL0[SEL] is 0b11111:
+
+ A read of the AP_PMXEVTYPER_EL0 returns the value of
+ AP_PMCCFILTR_EL0.
+
+ A write of the AP_PMXEVTYPER_EL0 writes to AP_PMCCFILTR_EL0.
+
+ A read or write of AP_PMXEVCNTR_EL0 has CONSTRAINED
+ UNPREDICTABLE effects, that can be one of the following:
+ Access to AP_PMXEVCNTR_EL0 is UNdefined. Access to AP_PMXEVCNTR_EL0
+ behaves as a NOP. Access to AP_PMXEVCNTR_EL0 behaves as if the
+ register is RAZ/WI. Access to AP_PMXEVCNTR_EL0 behaves as if the
+ AP_PMSELR_EL0[SEL] field contains an UNKNOWN value.
+
+ If this field is set to a value greater than or equal to the
+ number of implemented counters, but not equal to 31, the
+ results of access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 are
+ CONSTRAINED UNPREDICTABLE, and can be one of the following:
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 is UNdefined.
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 behaves as a NOP.
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 behaves as if the
+ register is RAZ/WI.
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 behaves as if the
+ AP_PMSELR_EL0[SEL] field contains an UNKNOWN value.
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 behaves as if the
+ AP_PMSELR_EL0[SEL] field contains0b11111 */
+#else /* Word 0 - Little Endian */
+ uint32_t sel : 5; /**< [ 4: 0](R/W) Selects event counter, PMEVCNTR\<x\>, where x is the value held
+ in this field. This value identifies which event counter is
+ accessed when a subsequent access to AP_PMXEVTYPER_EL0 or
+ AP_PMXEVCNTR_EL0 occurs.
+ When AP_PMSELR_EL0[SEL] is 0b11111:
+
+ A read of the AP_PMXEVTYPER_EL0 returns the value of
+ AP_PMCCFILTR_EL0.
+
+ A write of the AP_PMXEVTYPER_EL0 writes to AP_PMCCFILTR_EL0.
+
+ A read or write of AP_PMXEVCNTR_EL0 has CONSTRAINED
+ UNPREDICTABLE effects, that can be one of the following:
+ Access to AP_PMXEVCNTR_EL0 is UNdefined. Access to AP_PMXEVCNTR_EL0
+ behaves as a NOP. Access to AP_PMXEVCNTR_EL0 behaves as if the
+ register is RAZ/WI. Access to AP_PMXEVCNTR_EL0 behaves as if the
+ AP_PMSELR_EL0[SEL] field contains an UNKNOWN value.
+
+ If this field is set to a value greater than or equal to the
+ number of implemented counters, but not equal to 31, the
+ results of access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 are
+ CONSTRAINED UNPREDICTABLE, and can be one of the following:
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 is UNdefined.
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 behaves as a NOP.
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 behaves as if the
+ register is RAZ/WI.
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 behaves as if the
+ AP_PMSELR_EL0[SEL] field contains an UNKNOWN value.
+
+ Access to AP_PMXEVTYPER_EL0 or AP_PMXEVCNTR_EL0 behaves as if the
+ AP_PMSELR_EL0[SEL] field contains0b11111 */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmselr_el0_s cn; */
+};
+typedef union bdk_ap_pmselr_el0 bdk_ap_pmselr_el0_t;
+
+#define BDK_AP_PMSELR_EL0 BDK_AP_PMSELR_EL0_FUNC()
+static inline uint64_t BDK_AP_PMSELR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSELR_EL0_FUNC(void)
+{
+ return 0x303090c0500ll;
+}
+
+#define typedef_BDK_AP_PMSELR_EL0 bdk_ap_pmselr_el0_t
+#define bustype_BDK_AP_PMSELR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSELR_EL0 "AP_PMSELR_EL0"
+#define busnum_BDK_AP_PMSELR_EL0 0
+#define arguments_BDK_AP_PMSELR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmsevfr_el1
+ *
+ * AP Sampling Event Filter Register
+ * Controls sample filtering by events. The overall filter is the logical AND of these filters.
+ * For example, if
+ * E[3] and E[5] are both set to 1, only samples that have both event 3 (Level 1 unified or data
+ * cache
+ * refill) and event 5 set (TLB walk) are recorded.
+ */
+union bdk_ap_pmsevfr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmsevfr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t e_48_63 : 16; /**< [ 63: 48](R/W) E[\<n\>] is the event filter for event \<n\>. If event \<n\> is not implemented, or filtering on
+ event \<n\> is not supported, the corresponding bit is RES0.
+ 0 = Event \<n\> is ignored.
+ 1 = Record samples that have event \<n\> == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0.
+
+ An implementation defined event might be recorded as a multi-bit field. In this case, if
+ the corresponding bits of PMSEVFR_EL1 define an implementation defined filter for the
+ event. */
+ uint64_t reserved_32_47 : 16;
+ uint64_t e_24_31 : 8; /**< [ 31: 24](R/W) E[\<n\>] is the event filter for event \<n\>. If event \<n\> is not implemented, or filtering on
+ event \<n\> is not supported, the corresponding bit is RES0.
+ 0 = Event \<n\> is ignored.
+ 1 = Record samples that have event \<n\> == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0.
+
+ An implementation defined event might be recorded as a multi-bit field. In this case, if
+ the corresponding bits of PMSEVFR_EL1 define an implementation defined filter for the
+ event. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t e_12_15 : 4; /**< [ 15: 12](R/W) E[\<n\>] is the event filter for event \<n\>. If event \<n\> is not implemented, or filtering on
+ event \<n\> is not supported, the corresponding bit is RES0.
+ 0 = Event \<n\> is ignored.
+ 1 = Record samples that have event \<n\> = 1.
+
+ Ignored if PMSCR_EL1[FE] = 0.
+
+ An implementation defined event might be recorded as a multi-bit field. In this case, if
+ the corresponding bits of PMSEVFR_EL1 define an implementation defined filter for the
+ event. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t e_7 : 1; /**< [ 7: 7](R/W) Mispredicted.
+ 0 = Mispredicted event is ignored.
+ 1 = Record samples that have event 7 (Mispredicted) == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0. */
+ uint64_t reserved_6 : 1;
+ uint64_t e_5 : 1; /**< [ 5: 5](R/W) TLB walk.
+ 0 = TLB walk event is ignored.
+ 1 = Record samples that have event 5 (TLB walk) = 1.
+
+ Ignored if PMSCR_EL1[FE] = 0. */
+ uint64_t reserved_4 : 1;
+ uint64_t e_3 : 1; /**< [ 3: 3](R/W) Level 1 data or unified cache refill.
+ 0 = Level 1 data or unified cache refill event is ignored.
+ 1 = Record samples that have event 3 (Level 1 data or unified cache refill) == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0. */
+ uint64_t reserved_2 : 1;
+ uint64_t e_1 : 1; /**< [ 1: 1](R/W) Architecturally retired.
+ 0 = Architecturally retired event is ignored.
+ 1 = Record samples that have event 1 (Architecturally retired) == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0.
+
+ If the PE does not support sampling of speculative instructions, E[1] is RES1. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t e_1 : 1; /**< [ 1: 1](R/W) Architecturally retired.
+ 0 = Architecturally retired event is ignored.
+ 1 = Record samples that have event 1 (Architecturally retired) == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0.
+
+ If the PE does not support sampling of speculative instructions, E[1] is RES1. */
+ uint64_t reserved_2 : 1;
+ uint64_t e_3 : 1; /**< [ 3: 3](R/W) Level 1 data or unified cache refill.
+ 0 = Level 1 data or unified cache refill event is ignored.
+ 1 = Record samples that have event 3 (Level 1 data or unified cache refill) == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0. */
+ uint64_t reserved_4 : 1;
+ uint64_t e_5 : 1; /**< [ 5: 5](R/W) TLB walk.
+ 0 = TLB walk event is ignored.
+ 1 = Record samples that have event 5 (TLB walk) = 1.
+
+ Ignored if PMSCR_EL1[FE] = 0. */
+ uint64_t reserved_6 : 1;
+ uint64_t e_7 : 1; /**< [ 7: 7](R/W) Mispredicted.
+ 0 = Mispredicted event is ignored.
+ 1 = Record samples that have event 7 (Mispredicted) == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t e_12_15 : 4; /**< [ 15: 12](R/W) E[\<n\>] is the event filter for event \<n\>. If event \<n\> is not implemented, or filtering on
+ event \<n\> is not supported, the corresponding bit is RES0.
+ 0 = Event \<n\> is ignored.
+ 1 = Record samples that have event \<n\> = 1.
+
+ Ignored if PMSCR_EL1[FE] = 0.
+
+ An implementation defined event might be recorded as a multi-bit field. In this case, if
+ the corresponding bits of PMSEVFR_EL1 define an implementation defined filter for the
+ event. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t e_24_31 : 8; /**< [ 31: 24](R/W) E[\<n\>] is the event filter for event \<n\>. If event \<n\> is not implemented, or filtering on
+ event \<n\> is not supported, the corresponding bit is RES0.
+ 0 = Event \<n\> is ignored.
+ 1 = Record samples that have event \<n\> == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0.
+
+ An implementation defined event might be recorded as a multi-bit field. In this case, if
+ the corresponding bits of PMSEVFR_EL1 define an implementation defined filter for the
+ event. */
+ uint64_t reserved_32_47 : 16;
+ uint64_t e_48_63 : 16; /**< [ 63: 48](R/W) E[\<n\>] is the event filter for event \<n\>. If event \<n\> is not implemented, or filtering on
+ event \<n\> is not supported, the corresponding bit is RES0.
+ 0 = Event \<n\> is ignored.
+ 1 = Record samples that have event \<n\> == 1.
+
+ Ignored if PMSCR_EL1[FE] = 0.
+
+ An implementation defined event might be recorded as a multi-bit field. In this case, if
+ the corresponding bits of PMSEVFR_EL1 define an implementation defined filter for the
+ event. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmsevfr_el1_s cn; */
+};
+typedef union bdk_ap_pmsevfr_el1 bdk_ap_pmsevfr_el1_t;
+
+#define BDK_AP_PMSEVFR_EL1 BDK_AP_PMSEVFR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMSEVFR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSEVFR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30009090500ll;
+ __bdk_csr_fatal("AP_PMSEVFR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSEVFR_EL1 bdk_ap_pmsevfr_el1_t
+#define bustype_BDK_AP_PMSEVFR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSEVFR_EL1 "AP_PMSEVFR_EL1"
+#define busnum_BDK_AP_PMSEVFR_EL1 0
+#define arguments_BDK_AP_PMSEVFR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmsfcr_el1
+ *
+ * AP Sampling Filter Control Register
+ * Controls sample filtering. The filter is the logical AND of the FL, FT and FE bits. For
+ * example, if FE == 1 and FT == 1 only samples including the selected instruction types and the
+ * selected events will be recorded.
+ */
+union bdk_ap_pmsfcr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmsfcr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t st : 1; /**< [ 18: 18](R/W) Store filter enable.
+ 0 = Do not record store instructions.
+ 1 = Record all store instructions, including vector stores and all atomic operations.
+
+ Ignored if [FT] = 0. */
+ uint64_t ld : 1; /**< [ 17: 17](R/W) Load filter enable.
+ 0 = Do not record load instructions.
+ 1 = Record all load instructions, including vector loads and atomic operations that
+ return data.
+
+ Ignored if [FT] = 0. */
+ uint64_t bb : 1; /**< [ 16: 16](R/W) Branch filter enable.
+ 0 = Do not record branch instructions.
+ 1 = Record all branch instructions.
+
+ Ignored if [FT] = 0. */
+ uint64_t reserved_3_15 : 13;
+ uint64_t fl : 1; /**< [ 2: 2](R/W) Filter by latency.
+ 0 = Latency filtering disabled.
+ 1 = Latency filtering enabled. Samples with a total latency less than
+ PMSLATFR_EL1.MINLAT will not be recorded.
+ If this bit is set to 1 and PMSLATFR_EL1.MINLAT is zero, it is CONSTRAINED UNPREDICTABLE
+ whether no samples are recorded or the FL bit is ignored. */
+ uint64_t ft : 1; /**< [ 1: 1](R/W) Filter by type. The filter is the logical OR of the ST, LD and B bits. For example, if LD
+ and ST are both
+ set, both load and store instructions are recorded.
+ 0 = Type filtering disabled.
+ 1 = Type filtering enabled. Samples not one of the selected instruction types will not
+ be recorded.
+
+ If this bit is set to 1 and the ST, LD, and B bits are all zero, it is constrained
+ unpredictable whether no samples are recorded or the FT bit is ignored. */
+ uint64_t fe : 1; /**< [ 0: 0](R/W) Filter by event.
+ 0 = Event filtering disabled.
+ 1 = Event filtering enabled. Samples not including the events selected by PMSEVFR_EL1
+ will not be recorded.
+
+ If this bit is set to 1 and PMSEVFR_EL1 is zero, it is constrained unpredictable whether
+ no samples are recorded or the FE. */
+#else /* Word 0 - Little Endian */
+ uint64_t fe : 1; /**< [ 0: 0](R/W) Filter by event.
+ 0 = Event filtering disabled.
+ 1 = Event filtering enabled. Samples not including the events selected by PMSEVFR_EL1
+ will not be recorded.
+
+ If this bit is set to 1 and PMSEVFR_EL1 is zero, it is constrained unpredictable whether
+ no samples are recorded or the FE. */
+ uint64_t ft : 1; /**< [ 1: 1](R/W) Filter by type. The filter is the logical OR of the ST, LD and B bits. For example, if LD
+ and ST are both
+ set, both load and store instructions are recorded.
+ 0 = Type filtering disabled.
+ 1 = Type filtering enabled. Samples not one of the selected instruction types will not
+ be recorded.
+
+ If this bit is set to 1 and the ST, LD, and B bits are all zero, it is constrained
+ unpredictable whether no samples are recorded or the FT bit is ignored. */
+ uint64_t fl : 1; /**< [ 2: 2](R/W) Filter by latency.
+ 0 = Latency filtering disabled.
+ 1 = Latency filtering enabled. Samples with a total latency less than
+ PMSLATFR_EL1.MINLAT will not be recorded.
+ If this bit is set to 1 and PMSLATFR_EL1.MINLAT is zero, it is CONSTRAINED UNPREDICTABLE
+ whether no samples are recorded or the FL bit is ignored. */
+ uint64_t reserved_3_15 : 13;
+ uint64_t bb : 1; /**< [ 16: 16](R/W) Branch filter enable.
+ 0 = Do not record branch instructions.
+ 1 = Record all branch instructions.
+
+ Ignored if [FT] = 0. */
+ uint64_t ld : 1; /**< [ 17: 17](R/W) Load filter enable.
+ 0 = Do not record load instructions.
+ 1 = Record all load instructions, including vector loads and atomic operations that
+ return data.
+
+ Ignored if [FT] = 0. */
+ uint64_t st : 1; /**< [ 18: 18](R/W) Store filter enable.
+ 0 = Do not record store instructions.
+ 1 = Record all store instructions, including vector stores and all atomic operations.
+
+ Ignored if [FT] = 0. */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmsfcr_el1_s cn; */
+};
+typedef union bdk_ap_pmsfcr_el1 bdk_ap_pmsfcr_el1_t;
+
+#define BDK_AP_PMSFCR_EL1 BDK_AP_PMSFCR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMSFCR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSFCR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30009090400ll;
+ __bdk_csr_fatal("AP_PMSFCR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSFCR_EL1 bdk_ap_pmsfcr_el1_t
+#define bustype_BDK_AP_PMSFCR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSFCR_EL1 "AP_PMSFCR_EL1"
+#define busnum_BDK_AP_PMSFCR_EL1 0
+#define arguments_BDK_AP_PMSFCR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmsicr_el1
+ *
+ * AP Sampling Interval Control Register
+ * Software must write zero to PMSICR_EL1 before enabling sample profiling for a sampling
+ * session.
+ * Software must then treat PMSICR_EL1 as an opaque, 64-bit, read/write register used for context
+ * switches only.
+ */
+union bdk_ap_pmsicr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmsicr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmsicr_el1_s cn; */
+};
+typedef union bdk_ap_pmsicr_el1 bdk_ap_pmsicr_el1_t;
+
+#define BDK_AP_PMSICR_EL1 BDK_AP_PMSICR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMSICR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSICR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30009090200ll;
+ __bdk_csr_fatal("AP_PMSICR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSICR_EL1 bdk_ap_pmsicr_el1_t
+#define bustype_BDK_AP_PMSICR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSICR_EL1 "AP_PMSICR_EL1"
+#define busnum_BDK_AP_PMSICR_EL1 0
+#define arguments_BDK_AP_PMSICR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmsidr_el1
+ *
+ * AP Sampling Profiling ID Register
+ * Describes the Statistical Profiling implementation to software.
+ */
+union bdk_ap_pmsidr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmsidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t countsize : 4; /**< [ 19: 16](RO) Defines the size of the counters.
+ 0x2 = 12-bit saturating counters.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t maxsize : 4; /**< [ 15: 12](RO) Defines the largest size for a single record, rounded up to a power-of-two. If this is the
+ same as the minimum alignment (PMBIDR_EL1.Align), then each record is exactly this size.
+ 0x4 = 16 bytes.
+ 0x5 = 32 bytes.
+ 0x6 = 64 bytes.
+ 0x7 = 128 bytes.
+ ... .
+ 0xB = 2 KB.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t interval : 4; /**< [ 11: 8](RO) Recommended minimum sampling interval. This provides guidance from the implementer to the
+ smallest minimum sampling interval, N. It is encoded as floor((Log2(N)-8)*2).
+ 0x0 = 256.
+ 0x2 = 512.
+ 0x3 = 724.
+ 0x4 = 1024.
+ 0x5 = 1448.
+ 0x6 = 2048.
+ 0x7 = 2896.
+ 0x8 = 4096.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t llnw : 1; /**< [ 7: 7](RO) Last level cache write events. Defines whether Last Level Cache events in the Event packet
+ are valid on write operations.
+ 0 = Last level cache events are valid for write operations.
+ 1 = Last level cache events are not valid for write operations. */
+ uint64_t llnr : 1; /**< [ 6: 6](RO) Last level cache read events. Defines whether Last Level Cache events in the Event packet
+ are valid on read operations.
+ 0 = Last level cache events are valid for read operations.
+ 1 = Last level cache events are not valid for read operations. */
+ uint64_t ernd : 1; /**< [ 5: 5](RO) Defines how the random number generator is used in determining the interval between
+ samples, when enabled by PMSIRR_EL1[RND].
+ 0 = The random number is added at the start of the interval, and the sample is taken and
+ a new interval started when the combined interval expires.
+ 1 = The random number is added and the new interval started after the interval
+ programmed in PMSIRR_EL1[INTERVAL] expires, and the sample is taken when the random
+ interval expires. */
+ uint64_t lds : 1; /**< [ 4: 4](RO) Data source indicator for sampled load instructions.
+ 0 = Loaded data source not implemented.
+ 1 = Loaded data source implemented. */
+ uint64_t archinst : 1; /**< [ 3: 3](RO) Architectural instruction profiling.
+ 0 = Micro-op sampling implemented.
+ 1 = Architecture instruction sampling implemented. */
+ uint64_t fl : 1; /**< [ 2: 2](RO) Filtering by latency. This bit reads as one. */
+ uint64_t ft : 1; /**< [ 1: 1](RO) Filtering by operation type. This bit reads as one. */
+ uint64_t fe : 1; /**< [ 0: 0](RO) Filtering by events. This bit reads as one. */
+#else /* Word 0 - Little Endian */
+ uint64_t fe : 1; /**< [ 0: 0](RO) Filtering by events. This bit reads as one. */
+ uint64_t ft : 1; /**< [ 1: 1](RO) Filtering by operation type. This bit reads as one. */
+ uint64_t fl : 1; /**< [ 2: 2](RO) Filtering by latency. This bit reads as one. */
+ uint64_t archinst : 1; /**< [ 3: 3](RO) Architectural instruction profiling.
+ 0 = Micro-op sampling implemented.
+ 1 = Architecture instruction sampling implemented. */
+ uint64_t lds : 1; /**< [ 4: 4](RO) Data source indicator for sampled load instructions.
+ 0 = Loaded data source not implemented.
+ 1 = Loaded data source implemented. */
+ uint64_t ernd : 1; /**< [ 5: 5](RO) Defines how the random number generator is used in determining the interval between
+ samples, when enabled by PMSIRR_EL1[RND].
+ 0 = The random number is added at the start of the interval, and the sample is taken and
+ a new interval started when the combined interval expires.
+ 1 = The random number is added and the new interval started after the interval
+ programmed in PMSIRR_EL1[INTERVAL] expires, and the sample is taken when the random
+ interval expires. */
+ uint64_t llnr : 1; /**< [ 6: 6](RO) Last level cache read events. Defines whether Last Level Cache events in the Event packet
+ are valid on read operations.
+ 0 = Last level cache events are valid for read operations.
+ 1 = Last level cache events are not valid for read operations. */
+ uint64_t llnw : 1; /**< [ 7: 7](RO) Last level cache write events. Defines whether Last Level Cache events in the Event packet
+ are valid on write operations.
+ 0 = Last level cache events are valid for write operations.
+ 1 = Last level cache events are not valid for write operations. */
+ uint64_t interval : 4; /**< [ 11: 8](RO) Recommended minimum sampling interval. This provides guidance from the implementer to the
+ smallest minimum sampling interval, N. It is encoded as floor((Log2(N)-8)*2).
+ 0x0 = 256.
+ 0x2 = 512.
+ 0x3 = 724.
+ 0x4 = 1024.
+ 0x5 = 1448.
+ 0x6 = 2048.
+ 0x7 = 2896.
+ 0x8 = 4096.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t maxsize : 4; /**< [ 15: 12](RO) Defines the largest size for a single record, rounded up to a power-of-two. If this is the
+ same as the minimum alignment (PMBIDR_EL1.Align), then each record is exactly this size.
+ 0x4 = 16 bytes.
+ 0x5 = 32 bytes.
+ 0x6 = 64 bytes.
+ 0x7 = 128 bytes.
+ ... .
+ 0xB = 2 KB.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t countsize : 4; /**< [ 19: 16](RO) Defines the size of the counters.
+ 0x2 = 12-bit saturating counters.
+
+ All other values are reserved. Reserved values might be defined in a future version of the
+ architecture. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmsidr_el1_s cn; */
+};
+typedef union bdk_ap_pmsidr_el1 bdk_ap_pmsidr_el1_t;
+
+#define BDK_AP_PMSIDR_EL1 BDK_AP_PMSIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMSIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSIDR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30009090700ll;
+ __bdk_csr_fatal("AP_PMSIDR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSIDR_EL1 bdk_ap_pmsidr_el1_t
+#define bustype_BDK_AP_PMSIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSIDR_EL1 "AP_PMSIDR_EL1"
+#define busnum_BDK_AP_PMSIDR_EL1 0
+#define arguments_BDK_AP_PMSIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmsirr_el1
+ *
+ * AP Sampling Interval Reload Register
+ * Defines the interval between samples.
+ */
+union bdk_ap_pmsirr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmsirr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t interval : 24; /**< [ 31: 8](R/W) Bits [31:8] of the PMSICR_EL1 interval counter reload value. Software must set this to a
+ non-zero
+ value. If software sets this to zero, an UNKNOWN sampling interval is used. Software
+ should set this to
+ a value greater than the minimum indicated by PMSIDR_EL1.Interval. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t rnd : 1; /**< [ 0: 0](R/W) Controls randomization of the sampling interval.
+ 0 = Disable randomization of sampling interval.
+ 1 = Add (pseudo-)random jitter to sampling interval.
+
+ The random number generator is not architected. */
+#else /* Word 0 - Little Endian */
+ uint64_t rnd : 1; /**< [ 0: 0](R/W) Controls randomization of the sampling interval.
+ 0 = Disable randomization of sampling interval.
+ 1 = Add (pseudo-)random jitter to sampling interval.
+
+ The random number generator is not architected. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t interval : 24; /**< [ 31: 8](R/W) Bits [31:8] of the PMSICR_EL1 interval counter reload value. Software must set this to a
+ non-zero
+ value. If software sets this to zero, an UNKNOWN sampling interval is used. Software
+ should set this to
+ a value greater than the minimum indicated by PMSIDR_EL1.Interval. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmsirr_el1_s cn; */
+};
+typedef union bdk_ap_pmsirr_el1 bdk_ap_pmsirr_el1_t;
+
+#define BDK_AP_PMSIRR_EL1 BDK_AP_PMSIRR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMSIRR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSIRR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30009090300ll;
+ __bdk_csr_fatal("AP_PMSIRR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSIRR_EL1 bdk_ap_pmsirr_el1_t
+#define bustype_BDK_AP_PMSIRR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSIRR_EL1 "AP_PMSIRR_EL1"
+#define busnum_BDK_AP_PMSIRR_EL1 0
+#define arguments_BDK_AP_PMSIRR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmslatfr_el1
+ *
+ * AP Sampling Latency Filter Register
+ * Controls sample filtering by latency.
+ */
+union bdk_ap_pmslatfr_el1
+{
+ uint64_t u;
+ struct bdk_ap_pmslatfr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t minlat : 12; /**< [ 11: 0](R/W) Minimum latency. When PMSFCR_EL1.FL == 1, defines the minimum total latency for filtered
+ operations. Samples with a total latency less than MINLAT will not be recorded.
+ Ignored if PMSFCR_EL1.FL == 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t minlat : 12; /**< [ 11: 0](R/W) Minimum latency. When PMSFCR_EL1.FL == 1, defines the minimum total latency for filtered
+ operations. Samples with a total latency less than MINLAT will not be recorded.
+ Ignored if PMSFCR_EL1.FL == 0. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmslatfr_el1_s cn; */
+};
+typedef union bdk_ap_pmslatfr_el1 bdk_ap_pmslatfr_el1_t;
+
+#define BDK_AP_PMSLATFR_EL1 BDK_AP_PMSLATFR_EL1_FUNC()
+static inline uint64_t BDK_AP_PMSLATFR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSLATFR_EL1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30009090600ll;
+ __bdk_csr_fatal("AP_PMSLATFR_EL1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_PMSLATFR_EL1 bdk_ap_pmslatfr_el1_t
+#define bustype_BDK_AP_PMSLATFR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSLATFR_EL1 "AP_PMSLATFR_EL1"
+#define busnum_BDK_AP_PMSLATFR_EL1 0
+#define arguments_BDK_AP_PMSLATFR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmswinc_el0
+ *
+ * AP Performance Monitors Software Increment Register
+ * Increments a counter that is configured to count the Software
+ * increment event, event 0x0.
+ */
+union bdk_ap_pmswinc_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmswinc_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t p : 31; /**< [ 30: 0](RO) Event counter software increment bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in PMCR[N].
+ The effects of writing to this bit are:
+ 0 = No action. The write to this bit is ignored.
+ 1 = If PMEVCNTR\<x\> is enabled and configured to count the software
+ increment event, increments PMEVCNTR\<x\> by 1. If PMEVCNTR\<x\>
+ is disabled, or not configured to count the software increment
+ event, the write to this bit is ignored. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](RO) Event counter software increment bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in PMCR[N].
+ The effects of writing to this bit are:
+ 0 = No action. The write to this bit is ignored.
+ 1 = If PMEVCNTR\<x\> is enabled and configured to count the software
+ increment event, increments PMEVCNTR\<x\> by 1. If PMEVCNTR\<x\>
+ is disabled, or not configured to count the software increment
+ event, the write to this bit is ignored. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmswinc_el0_s cn8; */
+ struct bdk_ap_pmswinc_el0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t p : 31; /**< [ 30: 0](WO) Event counter software increment bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in PMCR[N].
+ The effects of writing to this bit are:
+ 0 = No action. The write to this bit is ignored.
+ 1 = If PMEVCNTR\<x\> is enabled and configured to count the software
+ increment event, increments PMEVCNTR\<x\> by 1. If PMEVCNTR\<x\>
+ is disabled, or not configured to count the software increment
+ event, the write to this bit is ignored. */
+#else /* Word 0 - Little Endian */
+ uint32_t p : 31; /**< [ 30: 0](WO) Event counter software increment bit for PMEVCNTR\<x\>.
+ Bits [30:N] are RAZ/WI.
+ When EL2 is implemented, in nonsecure EL1 and EL0, N is the
+ value in AP_MDCR_EL2[HPMN]. Otherwise, N is the value in PMCR[N].
+ The effects of writing to this bit are:
+ 0 = No action. The write to this bit is ignored.
+ 1 = If PMEVCNTR\<x\> is enabled and configured to count the software
+ increment event, increments PMEVCNTR\<x\> by 1. If PMEVCNTR\<x\>
+ is disabled, or not configured to count the software increment
+ event, the write to this bit is ignored. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_pmswinc_el0 bdk_ap_pmswinc_el0_t;
+
+#define BDK_AP_PMSWINC_EL0 BDK_AP_PMSWINC_EL0_FUNC()
+static inline uint64_t BDK_AP_PMSWINC_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMSWINC_EL0_FUNC(void)
+{
+ return 0x303090c0400ll;
+}
+
+#define typedef_BDK_AP_PMSWINC_EL0 bdk_ap_pmswinc_el0_t
+#define bustype_BDK_AP_PMSWINC_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMSWINC_EL0 "AP_PMSWINC_EL0"
+#define busnum_BDK_AP_PMSWINC_EL0 0
+#define arguments_BDK_AP_PMSWINC_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmuserenr_el0
+ *
+ * AP Performance Monitors User Enable Register
+ * Enables or disables EL0 access to the Performance Monitors.
+ */
+union bdk_ap_pmuserenr_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmuserenr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t er : 1; /**< [ 3: 3](R/W) Event counter read enable.
+ 0 = EL0 read access to AP_PMXEVCNTR_EL0 / PMEVCNTR\<n\>_EL0 and
+ read/write access to AP_PMSELR_EL0 disabled if AP_PMUSERENR_EL0[EN]
+ is also 0.
+ 1 = EL0 read access to AP_PMXEVCNTR_EL0 / PMEVCNTR\<n\>_EL0 and
+ read/write access to AP_PMSELR_EL0 enabled. */
+ uint32_t cr : 1; /**< [ 2: 2](R/W) Cycle counter read enable.
+ 0 = EL0 read access to AP_PMCCNTR_EL0 disabled if AP_PMUSERENR_EL0[EN] is
+ also 0.
+ 1 = EL0 read access to AP_PMCCNTR_EL0 enabled. */
+ uint32_t sw : 1; /**< [ 1: 1](R/W) Software Increment write enable.
+ 0 = EL0 write access to AP_PMSWINC_EL0 disabled if AP_PMUSERENR_EL0[EN]
+ is also 0.
+ 1 = EL0 write access to AP_PMSWINC_EL0 enabled. */
+ uint32_t en : 1; /**< [ 0: 0](R/W) EL0 access enable bit.
+ 0 = EL0 access to the Performance Monitors disabled.
+ 1 = EL0 access to the Performance Monitors enabled. Can access all
+ PMU registers at EL0, except for writes to AP_PMUSERENR_EL0 and
+ reads/writes of AP_PMINTENSET_EL1 and AP_PMINTENCLR_EL1. */
+#else /* Word 0 - Little Endian */
+ uint32_t en : 1; /**< [ 0: 0](R/W) EL0 access enable bit.
+ 0 = EL0 access to the Performance Monitors disabled.
+ 1 = EL0 access to the Performance Monitors enabled. Can access all
+ PMU registers at EL0, except for writes to AP_PMUSERENR_EL0 and
+ reads/writes of AP_PMINTENSET_EL1 and AP_PMINTENCLR_EL1. */
+ uint32_t sw : 1; /**< [ 1: 1](R/W) Software Increment write enable.
+ 0 = EL0 write access to AP_PMSWINC_EL0 disabled if AP_PMUSERENR_EL0[EN]
+ is also 0.
+ 1 = EL0 write access to AP_PMSWINC_EL0 enabled. */
+ uint32_t cr : 1; /**< [ 2: 2](R/W) Cycle counter read enable.
+ 0 = EL0 read access to AP_PMCCNTR_EL0 disabled if AP_PMUSERENR_EL0[EN] is
+ also 0.
+ 1 = EL0 read access to AP_PMCCNTR_EL0 enabled. */
+ uint32_t er : 1; /**< [ 3: 3](R/W) Event counter read enable.
+ 0 = EL0 read access to AP_PMXEVCNTR_EL0 / PMEVCNTR\<n\>_EL0 and
+ read/write access to AP_PMSELR_EL0 disabled if AP_PMUSERENR_EL0[EN]
+ is also 0.
+ 1 = EL0 read access to AP_PMXEVCNTR_EL0 / PMEVCNTR\<n\>_EL0 and
+ read/write access to AP_PMSELR_EL0 enabled. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmuserenr_el0_s cn; */
+};
+typedef union bdk_ap_pmuserenr_el0 bdk_ap_pmuserenr_el0_t;
+
+#define BDK_AP_PMUSERENR_EL0 BDK_AP_PMUSERENR_EL0_FUNC()
+static inline uint64_t BDK_AP_PMUSERENR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMUSERENR_EL0_FUNC(void)
+{
+ return 0x303090e0000ll;
+}
+
+#define typedef_BDK_AP_PMUSERENR_EL0 bdk_ap_pmuserenr_el0_t
+#define bustype_BDK_AP_PMUSERENR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMUSERENR_EL0 "AP_PMUSERENR_EL0"
+#define busnum_BDK_AP_PMUSERENR_EL0 0
+#define arguments_BDK_AP_PMUSERENR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmxevcntr_el0
+ *
+ * AP Performance Monitors Selected Event Count Register
+ * Reads or writes the value of the selected event counter,
+ * PMEVCNTR\<x\>_EL0. AP_PMSELR_EL0[SEL] determines which event counter
+ * is selected.
+ */
+union bdk_ap_pmxevcntr_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmxevcntr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pmevcntr : 32; /**< [ 31: 0](R/W) Value of the selected event counter, PMEVCNTR\<x\>_EL0, where x
+ is the value stored in AP_PMSELR_EL0[SEL]. */
+#else /* Word 0 - Little Endian */
+ uint32_t pmevcntr : 32; /**< [ 31: 0](R/W) Value of the selected event counter, PMEVCNTR\<x\>_EL0, where x
+ is the value stored in AP_PMSELR_EL0[SEL]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmxevcntr_el0_s cn; */
+};
+typedef union bdk_ap_pmxevcntr_el0 bdk_ap_pmxevcntr_el0_t;
+
+#define BDK_AP_PMXEVCNTR_EL0 BDK_AP_PMXEVCNTR_EL0_FUNC()
+static inline uint64_t BDK_AP_PMXEVCNTR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMXEVCNTR_EL0_FUNC(void)
+{
+ return 0x303090d0200ll;
+}
+
+#define typedef_BDK_AP_PMXEVCNTR_EL0 bdk_ap_pmxevcntr_el0_t
+#define bustype_BDK_AP_PMXEVCNTR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMXEVCNTR_EL0 "AP_PMXEVCNTR_EL0"
+#define busnum_BDK_AP_PMXEVCNTR_EL0 0
+#define arguments_BDK_AP_PMXEVCNTR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_pmxevtyper_el0
+ *
+ * AP Performance Monitors Selected Event Type Register
+ * When AP_PMSELR_EL0[SEL] selects an event counter, this accesses a
+ * PMEVTYPER\<n\>_EL0 register. When AP_PMSELR_EL0[SEL] selects the
+ * cycle counter, this accesses AP_PMCCFILTR_EL0.
+ */
+union bdk_ap_pmxevtyper_el0
+{
+ uint32_t u;
+ struct bdk_ap_pmxevtyper_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Event type register or AP_PMCCFILTR_EL0.
+ When AP_PMSELR_EL0[SEL] == 31, this register accesses
+ AP_PMCCFILTR_EL0.
+ Otherwise, this register accesses PMEVTYPER\<n\>_EL0 where n is
+ the value in AP_PMSELR_EL0[SEL]. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) Event type register or AP_PMCCFILTR_EL0.
+ When AP_PMSELR_EL0[SEL] == 31, this register accesses
+ AP_PMCCFILTR_EL0.
+ Otherwise, this register accesses PMEVTYPER\<n\>_EL0 where n is
+ the value in AP_PMSELR_EL0[SEL]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_pmxevtyper_el0_s cn; */
+};
+typedef union bdk_ap_pmxevtyper_el0 bdk_ap_pmxevtyper_el0_t;
+
+#define BDK_AP_PMXEVTYPER_EL0 BDK_AP_PMXEVTYPER_EL0_FUNC()
+static inline uint64_t BDK_AP_PMXEVTYPER_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_PMXEVTYPER_EL0_FUNC(void)
+{
+ return 0x303090d0100ll;
+}
+
+#define typedef_BDK_AP_PMXEVTYPER_EL0 bdk_ap_pmxevtyper_el0_t
+#define bustype_BDK_AP_PMXEVTYPER_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_PMXEVTYPER_EL0 "AP_PMXEVTYPER_EL0"
+#define busnum_BDK_AP_PMXEVTYPER_EL0 0
+#define arguments_BDK_AP_PMXEVTYPER_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_revidr_el1
+ *
+ * AP Revision ID Register
+ * This register provides implementation-specific minor revision information
+ * that can only be interpreted in conjunction with AP_MIDR_EL1.
+ */
+union bdk_ap_revidr_el1
+{
+ uint32_t u;
+ struct bdk_ap_revidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_revidr_el1_s cn; */
+};
+typedef union bdk_ap_revidr_el1 bdk_ap_revidr_el1_t;
+
+#define BDK_AP_REVIDR_EL1 BDK_AP_REVIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_REVIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_REVIDR_EL1_FUNC(void)
+{
+ return 0x30000000600ll;
+}
+
+#define typedef_BDK_AP_REVIDR_EL1 bdk_ap_revidr_el1_t
+#define bustype_BDK_AP_REVIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_REVIDR_EL1 "AP_REVIDR_EL1"
+#define busnum_BDK_AP_REVIDR_EL1 0
+#define arguments_BDK_AP_REVIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_rmr_el#
+ *
+ * AP Reset Management non-EL3 Register
+ * Reset control for EL1 or EL2. Doesn't exists since EL3 exists.
+ */
+union bdk_ap_rmr_elx
+{
+ uint32_t u;
+ struct bdk_ap_rmr_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_rmr_elx_s cn; */
+};
+typedef union bdk_ap_rmr_elx bdk_ap_rmr_elx_t;
+
+static inline uint64_t BDK_AP_RMR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_RMR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=2))
+ return 0x3000c000200ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_RMR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_RMR_ELX(a) bdk_ap_rmr_elx_t
+#define bustype_BDK_AP_RMR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_RMR_ELX(a) "AP_RMR_ELX"
+#define busnum_BDK_AP_RMR_ELX(a) (a)
+#define arguments_BDK_AP_RMR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_rmr_el3
+ *
+ * AP Reset Management EL3 Register
+ * If EL3 is the highest Exception level implemented, and is
+ * capable of using both AArch32 and AArch64, controls the
+ * Execution state that the processor boots into and allows
+ * request of a Warm reset.
+ *
+ * Not implemented on CNXXXX - no 32 bit support.
+ */
+union bdk_ap_rmr_el3
+{
+ uint32_t u;
+ struct bdk_ap_rmr_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t rr : 1; /**< [ 1: 1](R/W) When set to 1 this bit requests a Warm reset. The bit is
+ strictly a request. */
+ uint32_t aa64 : 1; /**< [ 0: 0](R/W) Determines which Execution state the processor boots into
+ after a Warm reset:
+ The reset vector address on reset takes a choice between two
+ IMP DEF values, depending on the value in the AA64 bit.
+ 0 = AArch32.
+ 1 = AArch64. */
+#else /* Word 0 - Little Endian */
+ uint32_t aa64 : 1; /**< [ 0: 0](R/W) Determines which Execution state the processor boots into
+ after a Warm reset:
+ The reset vector address on reset takes a choice between two
+ IMP DEF values, depending on the value in the AA64 bit.
+ 0 = AArch32.
+ 1 = AArch64. */
+ uint32_t rr : 1; /**< [ 1: 1](R/W) When set to 1 this bit requests a Warm reset. The bit is
+ strictly a request. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_rmr_el3_s cn8; */
+ struct bdk_ap_rmr_el3_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t rr : 1; /**< [ 1: 1](RAZ) When set to 1 this bit requests a Warm reset. The bit is
+ strictly a request. */
+ uint32_t aa64 : 1; /**< [ 0: 0](RAZ) Determines which Execution state the processor boots into
+ after a Warm reset:
+ The reset vector address on reset takes a choice between two
+ IMP DEF values, depending on the value in the AA64 bit.
+ 0 = AArch32.
+ 1 = AArch64. */
+#else /* Word 0 - Little Endian */
+ uint32_t aa64 : 1; /**< [ 0: 0](RAZ) Determines which Execution state the processor boots into
+ after a Warm reset:
+ The reset vector address on reset takes a choice between two
+ IMP DEF values, depending on the value in the AA64 bit.
+ 0 = AArch32.
+ 1 = AArch64. */
+ uint32_t rr : 1; /**< [ 1: 1](RAZ) When set to 1 this bit requests a Warm reset. The bit is
+ strictly a request. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_rmr_el3 bdk_ap_rmr_el3_t;
+
+#define BDK_AP_RMR_EL3 BDK_AP_RMR_EL3_FUNC()
+static inline uint64_t BDK_AP_RMR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_RMR_EL3_FUNC(void)
+{
+ return 0x3060c000200ll;
+}
+
+#define typedef_BDK_AP_RMR_EL3 bdk_ap_rmr_el3_t
+#define bustype_BDK_AP_RMR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_RMR_EL3 "AP_RMR_EL3"
+#define busnum_BDK_AP_RMR_EL3 0
+#define arguments_BDK_AP_RMR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_rvbar_el#
+ *
+ * AP Reset Vector Base Address non-EL3 Register
+ * Reset vector for EL1 or EL2. Doesn't exists since EL3 exists.
+ */
+union bdk_ap_rvbar_elx
+{
+ uint64_t u;
+ struct bdk_ap_rvbar_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_rvbar_elx_s cn; */
+};
+typedef union bdk_ap_rvbar_elx bdk_ap_rvbar_elx_t;
+
+static inline uint64_t BDK_AP_RVBAR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_RVBAR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=2))
+ return 0x3000c000100ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_RVBAR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_RVBAR_ELX(a) bdk_ap_rvbar_elx_t
+#define bustype_BDK_AP_RVBAR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_RVBAR_ELX(a) "AP_RVBAR_ELX"
+#define busnum_BDK_AP_RVBAR_ELX(a) (a)
+#define arguments_BDK_AP_RVBAR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_rvbar_el3
+ *
+ * AP Reset Vector Base Address EL3 Register
+ * If EL3 is the highest exception level implemented, contains
+ * the implementation defined address that execution starts from
+ * after reset when executing in AArch64 state.
+ */
+union bdk_ap_rvbar_el3
+{
+ uint64_t u;
+ struct bdk_ap_rvbar_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Reset Vector Base Address. If this Exception level is the
+ highest one implemented, this field contains the
+ implementation defined address that execution starts from
+ after reset when executing in 64-bit state. Bits[1:0] of this
+ register are 00, as this address must be aligned, and the
+ address must be within the physical address size supported by
+ the processor.
+
+ If this Exception level is not the highest one implemented,
+ then this register is not implemented and its encoding is
+ UNdefined. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO) Reset Vector Base Address. If this Exception level is the
+ highest one implemented, this field contains the
+ implementation defined address that execution starts from
+ after reset when executing in 64-bit state. Bits[1:0] of this
+ register are 00, as this address must be aligned, and the
+ address must be within the physical address size supported by
+ the processor.
+
+ If this Exception level is not the highest one implemented,
+ then this register is not implemented and its encoding is
+ UNdefined. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_rvbar_el3_s cn; */
+};
+typedef union bdk_ap_rvbar_el3 bdk_ap_rvbar_el3_t;
+
+#define BDK_AP_RVBAR_EL3 BDK_AP_RVBAR_EL3_FUNC()
+static inline uint64_t BDK_AP_RVBAR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_RVBAR_EL3_FUNC(void)
+{
+ return 0x3060c000100ll;
+}
+
+#define typedef_BDK_AP_RVBAR_EL3 bdk_ap_rvbar_el3_t
+#define bustype_BDK_AP_RVBAR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_RVBAR_EL3 "AP_RVBAR_EL3"
+#define busnum_BDK_AP_RVBAR_EL3 0
+#define arguments_BDK_AP_RVBAR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_scr_el3
+ *
+ * AP Secure Configuration Register
+ * Defines the configuration of the current Security state. It
+ * specifies:
+ * The Security state of EL0 and EL1, either Secure or Non-
+ * secure.
+ * The Execution state at lower Exception levels.
+ * Whether IRQ, FIQ, and External Abort interrupts are taken to
+ * EL3.
+ */
+union bdk_ap_scr_el3
+{
+ uint32_t u;
+ struct bdk_ap_scr_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t terr : 1; /**< [ 15: 15](R/W) Trap Error record accesses.
+ 0 = Do not trap accesses to error record registers from EL1 and EL2 to EL3.
+ 1 = Accesses to the ER* registers from EL1 and EL2 generate a Trap exception to EL3. */
+ uint32_t tlor : 1; /**< [ 14: 14](R/W) v8.1: Trap access to the LOR
+ Registers from EL1 and EL2 to EL3, unless the access has been
+ trapped to EL2:
+ 0 = EL1 and EL2 accesses to the LOR Registers are not trapped to EL3.
+ 1 = EL1 and EL2 accesses to the LOR Registers are trapped to EL3
+ unless the access has been trapped to EL2 as a result of the
+ AP_HCR_EL2[TLOR]. */
+ uint32_t twe : 1; /**< [ 13: 13](R/W) Trap WFE.
+ 0 = WFE instructions not trapped.
+ 1 = WFE instructions executed in AArch32 or AArch64 at EL2, EL1,
+ or EL0 are trapped to EL3 if the instruction would otherwise
+ cause suspension of execution, i.e. if there is not a pending
+ WFI wakeup event and the instruciton does not cause another
+ exception. */
+ uint32_t twi : 1; /**< [ 12: 12](R/W) Trap WFI.
+ 0 = WFI instructions not trapped.
+ 1 = WFI instructions executed in AArch32 or AArch64 at EL2, EL1,
+ or EL0 are trapped to EL3 if the instruction would otherwise
+ cause suspension of execution. */
+ uint32_t st : 1; /**< [ 11: 11](R/W) Enables Secure EL1 access to the AP_CNTPS_TVAL_EL1,
+ AP_CNTPS_CTL_EL1, and AP_CNTPS_CVAL_EL1 registers.
+
+ If this bit is 0 and there is a Secure EL1 access to one of
+ the CNTPS registers:
+ An exception is taken to EL3.
+ The exception class for this exception, as returned in
+ ESR_EL3[EC], is 0x18
+ 0 = These registers are only accessible in EL3.
+ 1 = These registers are accessible in EL3 and also in EL1 when
+ AP_SCR_EL3[NS]==0. */
+ uint32_t rsvd_10 : 1; /**< [ 10: 10](RO) Execution state control for lower Exception levels.
+ This bit is permitted to be cached in a TLB.
+ 0 = Lower levels are all AArch32.
+ 1 = The next lower level is AArch64.
+
+ If EL2 is present:
+ EL2 is AArch64.
+ EL2 controls EL1 and EL0 behaviors.
+
+ If EL2 is not present:
+ EL1 is AArch64.
+ EL0 is determined by the Execution state described in the
+ current process state when executing at EL0. */
+ uint32_t sif : 1; /**< [ 9: 9](R/W) Secure instruction fetch. When the processor is in Secure
+ state, this bit disables instruction fetch from nonsecure
+ memory.
+ This bit is permitted to be cached in a TLB.
+ 0 = Secure state instruction fetches from nonsecure memory are
+ permitted.
+ 1 = Secure state instruction fetches from nonsecure memory are
+ not permitted. */
+ uint32_t hce : 1; /**< [ 8: 8](R/W) Hypervisor Call enable. This bit enables use of the HVC
+ instruction from nonsecure EL1 modes.
+
+ If EL3 is implemented but EL2 is not implemented, this bit is
+ RES0.
+ 0 = HVC instruction is UNdefined in nonsecure EL1 modes, and
+ either UNdefined or a NOP in Hyp mode, depending on the
+ implementation.
+ 1 = HVC instruction is enabled in nonsecure EL1 modes, and
+ performs a Hypervisor Call. */
+ uint32_t smd : 1; /**< [ 7: 7](R/W) SMC Disable.
+ 0 = SMC is enabled at EL1, EL2, or EL3.
+ 1 = SMC is UNdefined at all Exception levels. At EL1 in the Non-
+ secure state, the AP_HCR_EL2[TSC] bit has priority over this
+ control. */
+ uint32_t reserved_6 : 1;
+ uint32_t rsvd_4_5 : 2; /**< [ 5: 4](RO) Reserved 1. */
+ uint32_t ea : 1; /**< [ 3: 3](R/W) External Abort and SError Interrupt Routing.
+ 0 = External Aborts and SError Interrupts while executing at
+ exception levels other than EL3 are not taken in EL3.
+ 1 = External Aborts and SError Interrupts while executing at all
+ exception levels are taken in EL3. */
+ uint32_t fiq : 1; /**< [ 2: 2](R/W) Physical FIQ Routing.
+ 0 = Physical FIQ while executing at exception levels other than
+ EL3 are not taken in EL3.
+ 1 = Physical FIQ while executing at all exception levels are taken
+ in EL3. */
+ uint32_t irq : 1; /**< [ 1: 1](R/W) Physical IRQ Routing.
+ 0 = Physical IRQ while executing at exception levels other than
+ EL3 are not taken in EL3.
+ 1 = Physical IRQ while executing at all exception levels are taken
+ in EL3. */
+ uint32_t nsec : 1; /**< [ 0: 0](R/W) Nonsecure bit.
+ 0 = Indicates that EL0 and EL1 are in Secure state, and so memory
+ accesses from those Exception levels can access Secure memory.
+ 1 = Indicates that EL0 and EL1 are in nonsecure state, and so
+ memory accesses from those Exception levels cannot access
+ Secure memory. */
+#else /* Word 0 - Little Endian */
+ uint32_t nsec : 1; /**< [ 0: 0](R/W) Nonsecure bit.
+ 0 = Indicates that EL0 and EL1 are in Secure state, and so memory
+ accesses from those Exception levels can access Secure memory.
+ 1 = Indicates that EL0 and EL1 are in nonsecure state, and so
+ memory accesses from those Exception levels cannot access
+ Secure memory. */
+ uint32_t irq : 1; /**< [ 1: 1](R/W) Physical IRQ Routing.
+ 0 = Physical IRQ while executing at exception levels other than
+ EL3 are not taken in EL3.
+ 1 = Physical IRQ while executing at all exception levels are taken
+ in EL3. */
+ uint32_t fiq : 1; /**< [ 2: 2](R/W) Physical FIQ Routing.
+ 0 = Physical FIQ while executing at exception levels other than
+ EL3 are not taken in EL3.
+ 1 = Physical FIQ while executing at all exception levels are taken
+ in EL3. */
+ uint32_t ea : 1; /**< [ 3: 3](R/W) External Abort and SError Interrupt Routing.
+ 0 = External Aborts and SError Interrupts while executing at
+ exception levels other than EL3 are not taken in EL3.
+ 1 = External Aborts and SError Interrupts while executing at all
+ exception levels are taken in EL3. */
+ uint32_t rsvd_4_5 : 2; /**< [ 5: 4](RO) Reserved 1. */
+ uint32_t reserved_6 : 1;
+ uint32_t smd : 1; /**< [ 7: 7](R/W) SMC Disable.
+ 0 = SMC is enabled at EL1, EL2, or EL3.
+ 1 = SMC is UNdefined at all Exception levels. At EL1 in the Non-
+ secure state, the AP_HCR_EL2[TSC] bit has priority over this
+ control. */
+ uint32_t hce : 1; /**< [ 8: 8](R/W) Hypervisor Call enable. This bit enables use of the HVC
+ instruction from nonsecure EL1 modes.
+
+ If EL3 is implemented but EL2 is not implemented, this bit is
+ RES0.
+ 0 = HVC instruction is UNdefined in nonsecure EL1 modes, and
+ either UNdefined or a NOP in Hyp mode, depending on the
+ implementation.
+ 1 = HVC instruction is enabled in nonsecure EL1 modes, and
+ performs a Hypervisor Call. */
+ uint32_t sif : 1; /**< [ 9: 9](R/W) Secure instruction fetch. When the processor is in Secure
+ state, this bit disables instruction fetch from nonsecure
+ memory.
+ This bit is permitted to be cached in a TLB.
+ 0 = Secure state instruction fetches from nonsecure memory are
+ permitted.
+ 1 = Secure state instruction fetches from nonsecure memory are
+ not permitted. */
+ uint32_t rsvd_10 : 1; /**< [ 10: 10](RO) Execution state control for lower Exception levels.
+ This bit is permitted to be cached in a TLB.
+ 0 = Lower levels are all AArch32.
+ 1 = The next lower level is AArch64.
+
+ If EL2 is present:
+ EL2 is AArch64.
+ EL2 controls EL1 and EL0 behaviors.
+
+ If EL2 is not present:
+ EL1 is AArch64.
+ EL0 is determined by the Execution state described in the
+ current process state when executing at EL0. */
+ uint32_t st : 1; /**< [ 11: 11](R/W) Enables Secure EL1 access to the AP_CNTPS_TVAL_EL1,
+ AP_CNTPS_CTL_EL1, and AP_CNTPS_CVAL_EL1 registers.
+
+ If this bit is 0 and there is a Secure EL1 access to one of
+ the CNTPS registers:
+ An exception is taken to EL3.
+ The exception class for this exception, as returned in
+ ESR_EL3[EC], is 0x18
+ 0 = These registers are only accessible in EL3.
+ 1 = These registers are accessible in EL3 and also in EL1 when
+ AP_SCR_EL3[NS]==0. */
+ uint32_t twi : 1; /**< [ 12: 12](R/W) Trap WFI.
+ 0 = WFI instructions not trapped.
+ 1 = WFI instructions executed in AArch32 or AArch64 at EL2, EL1,
+ or EL0 are trapped to EL3 if the instruction would otherwise
+ cause suspension of execution. */
+ uint32_t twe : 1; /**< [ 13: 13](R/W) Trap WFE.
+ 0 = WFE instructions not trapped.
+ 1 = WFE instructions executed in AArch32 or AArch64 at EL2, EL1,
+ or EL0 are trapped to EL3 if the instruction would otherwise
+ cause suspension of execution, i.e. if there is not a pending
+ WFI wakeup event and the instruciton does not cause another
+ exception. */
+ uint32_t tlor : 1; /**< [ 14: 14](R/W) v8.1: Trap access to the LOR
+ Registers from EL1 and EL2 to EL3, unless the access has been
+ trapped to EL2:
+ 0 = EL1 and EL2 accesses to the LOR Registers are not trapped to EL3.
+ 1 = EL1 and EL2 accesses to the LOR Registers are trapped to EL3
+ unless the access has been trapped to EL2 as a result of the
+ AP_HCR_EL2[TLOR]. */
+ uint32_t terr : 1; /**< [ 15: 15](R/W) Trap Error record accesses.
+ 0 = Do not trap accesses to error record registers from EL1 and EL2 to EL3.
+ 1 = Accesses to the ER* registers from EL1 and EL2 generate a Trap exception to EL3. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_scr_el3_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_15_31 : 17;
+ uint32_t tlor : 1; /**< [ 14: 14](R/W) v8.1: Trap access to the LOR
+ Registers from EL1 and EL2 to EL3, unless the access has been
+ trapped to EL2:
+ 0 = EL1 and EL2 accesses to the LOR Registers are not trapped to EL3.
+ 1 = EL1 and EL2 accesses to the LOR Registers are trapped to EL3
+ unless the access has been trapped to EL2 as a result of the
+ AP_HCR_EL2[TLOR]. */
+ uint32_t twe : 1; /**< [ 13: 13](R/W) Trap WFE.
+ 0 = WFE instructions not trapped.
+ 1 = WFE instructions executed in AArch32 or AArch64 at EL2, EL1,
+ or EL0 are trapped to EL3 if the instruction would otherwise
+ cause suspension of execution, i.e. if there is not a pending
+ WFI wakeup event and the instruciton does not cause another
+ exception. */
+ uint32_t twi : 1; /**< [ 12: 12](R/W) Trap WFI.
+ 0 = WFI instructions not trapped.
+ 1 = WFI instructions executed in AArch32 or AArch64 at EL2, EL1,
+ or EL0 are trapped to EL3 if the instruction would otherwise
+ cause suspension of execution. */
+ uint32_t st : 1; /**< [ 11: 11](R/W) Enables Secure EL1 access to the AP_CNTPS_TVAL_EL1,
+ AP_CNTPS_CTL_EL1, and AP_CNTPS_CVAL_EL1 registers.
+
+ If this bit is 0 and there is a Secure EL1 access to one of
+ the CNTPS registers:
+ An exception is taken to EL3.
+ The exception class for this exception, as returned in
+ ESR_EL3[EC], is 0x18
+ 0 = These registers are only accessible in EL3.
+ 1 = These registers are accessible in EL3 and also in EL1 when
+ AP_SCR_EL3[NS]==0. */
+ uint32_t rsvd_10 : 1; /**< [ 10: 10](RO) Execution state control for lower Exception levels.
+ This bit is permitted to be cached in a TLB.
+ 0 = Lower levels are all AArch32.
+ 1 = The next lower level is AArch64.
+
+ If EL2 is present:
+ EL2 is AArch64.
+ EL2 controls EL1 and EL0 behaviors.
+
+ If EL2 is not present:
+ EL1 is AArch64.
+ EL0 is determined by the Execution state described in the
+ current process state when executing at EL0. */
+ uint32_t sif : 1; /**< [ 9: 9](R/W) Secure instruction fetch. When the processor is in Secure
+ state, this bit disables instruction fetch from nonsecure
+ memory.
+ This bit is permitted to be cached in a TLB.
+ 0 = Secure state instruction fetches from nonsecure memory are
+ permitted.
+ 1 = Secure state instruction fetches from nonsecure memory are
+ not permitted. */
+ uint32_t hce : 1; /**< [ 8: 8](R/W) Hypervisor Call enable. This bit enables use of the HVC
+ instruction from nonsecure EL1 modes.
+
+ If EL3 is implemented but EL2 is not implemented, this bit is
+ RES0.
+ 0 = HVC instruction is UNdefined in nonsecure EL1 modes, and
+ either UNdefined or a NOP in Hyp mode, depending on the
+ implementation.
+ 1 = HVC instruction is enabled in nonsecure EL1 modes, and
+ performs a Hypervisor Call. */
+ uint32_t smd : 1; /**< [ 7: 7](R/W) SMC Disable.
+ 0 = SMC is enabled at EL1, EL2, or EL3.
+ 1 = SMC is UNdefined at all Exception levels. At EL1 in the Non-
+ secure state, the AP_HCR_EL2[TSC] bit has priority over this
+ control. */
+ uint32_t reserved_6 : 1;
+ uint32_t rsvd_4_5 : 2; /**< [ 5: 4](RO) Reserved 1. */
+ uint32_t ea : 1; /**< [ 3: 3](R/W) External Abort and SError Interrupt Routing.
+ 0 = External Aborts and SError Interrupts while executing at
+ exception levels other than EL3 are not taken in EL3.
+ 1 = External Aborts and SError Interrupts while executing at all
+ exception levels are taken in EL3. */
+ uint32_t fiq : 1; /**< [ 2: 2](R/W) Physical FIQ Routing.
+ 0 = Physical FIQ while executing at exception levels other than
+ EL3 are not taken in EL3.
+ 1 = Physical FIQ while executing at all exception levels are taken
+ in EL3. */
+ uint32_t irq : 1; /**< [ 1: 1](R/W) Physical IRQ Routing.
+ 0 = Physical IRQ while executing at exception levels other than
+ EL3 are not taken in EL3.
+ 1 = Physical IRQ while executing at all exception levels are taken
+ in EL3. */
+ uint32_t nsec : 1; /**< [ 0: 0](R/W) Nonsecure bit.
+ 0 = Indicates that EL0 and EL1 are in Secure state, and so memory
+ accesses from those Exception levels can access Secure memory.
+ 1 = Indicates that EL0 and EL1 are in nonsecure state, and so
+ memory accesses from those Exception levels cannot access
+ Secure memory. */
+#else /* Word 0 - Little Endian */
+ uint32_t nsec : 1; /**< [ 0: 0](R/W) Nonsecure bit.
+ 0 = Indicates that EL0 and EL1 are in Secure state, and so memory
+ accesses from those Exception levels can access Secure memory.
+ 1 = Indicates that EL0 and EL1 are in nonsecure state, and so
+ memory accesses from those Exception levels cannot access
+ Secure memory. */
+ uint32_t irq : 1; /**< [ 1: 1](R/W) Physical IRQ Routing.
+ 0 = Physical IRQ while executing at exception levels other than
+ EL3 are not taken in EL3.
+ 1 = Physical IRQ while executing at all exception levels are taken
+ in EL3. */
+ uint32_t fiq : 1; /**< [ 2: 2](R/W) Physical FIQ Routing.
+ 0 = Physical FIQ while executing at exception levels other than
+ EL3 are not taken in EL3.
+ 1 = Physical FIQ while executing at all exception levels are taken
+ in EL3. */
+ uint32_t ea : 1; /**< [ 3: 3](R/W) External Abort and SError Interrupt Routing.
+ 0 = External Aborts and SError Interrupts while executing at
+ exception levels other than EL3 are not taken in EL3.
+ 1 = External Aborts and SError Interrupts while executing at all
+ exception levels are taken in EL3. */
+ uint32_t rsvd_4_5 : 2; /**< [ 5: 4](RO) Reserved 1. */
+ uint32_t reserved_6 : 1;
+ uint32_t smd : 1; /**< [ 7: 7](R/W) SMC Disable.
+ 0 = SMC is enabled at EL1, EL2, or EL3.
+ 1 = SMC is UNdefined at all Exception levels. At EL1 in the Non-
+ secure state, the AP_HCR_EL2[TSC] bit has priority over this
+ control. */
+ uint32_t hce : 1; /**< [ 8: 8](R/W) Hypervisor Call enable. This bit enables use of the HVC
+ instruction from nonsecure EL1 modes.
+
+ If EL3 is implemented but EL2 is not implemented, this bit is
+ RES0.
+ 0 = HVC instruction is UNdefined in nonsecure EL1 modes, and
+ either UNdefined or a NOP in Hyp mode, depending on the
+ implementation.
+ 1 = HVC instruction is enabled in nonsecure EL1 modes, and
+ performs a Hypervisor Call. */
+ uint32_t sif : 1; /**< [ 9: 9](R/W) Secure instruction fetch. When the processor is in Secure
+ state, this bit disables instruction fetch from nonsecure
+ memory.
+ This bit is permitted to be cached in a TLB.
+ 0 = Secure state instruction fetches from nonsecure memory are
+ permitted.
+ 1 = Secure state instruction fetches from nonsecure memory are
+ not permitted. */
+ uint32_t rsvd_10 : 1; /**< [ 10: 10](RO) Execution state control for lower Exception levels.
+ This bit is permitted to be cached in a TLB.
+ 0 = Lower levels are all AArch32.
+ 1 = The next lower level is AArch64.
+
+ If EL2 is present:
+ EL2 is AArch64.
+ EL2 controls EL1 and EL0 behaviors.
+
+ If EL2 is not present:
+ EL1 is AArch64.
+ EL0 is determined by the Execution state described in the
+ current process state when executing at EL0. */
+ uint32_t st : 1; /**< [ 11: 11](R/W) Enables Secure EL1 access to the AP_CNTPS_TVAL_EL1,
+ AP_CNTPS_CTL_EL1, and AP_CNTPS_CVAL_EL1 registers.
+
+ If this bit is 0 and there is a Secure EL1 access to one of
+ the CNTPS registers:
+ An exception is taken to EL3.
+ The exception class for this exception, as returned in
+ ESR_EL3[EC], is 0x18
+ 0 = These registers are only accessible in EL3.
+ 1 = These registers are accessible in EL3 and also in EL1 when
+ AP_SCR_EL3[NS]==0. */
+ uint32_t twi : 1; /**< [ 12: 12](R/W) Trap WFI.
+ 0 = WFI instructions not trapped.
+ 1 = WFI instructions executed in AArch32 or AArch64 at EL2, EL1,
+ or EL0 are trapped to EL3 if the instruction would otherwise
+ cause suspension of execution. */
+ uint32_t twe : 1; /**< [ 13: 13](R/W) Trap WFE.
+ 0 = WFE instructions not trapped.
+ 1 = WFE instructions executed in AArch32 or AArch64 at EL2, EL1,
+ or EL0 are trapped to EL3 if the instruction would otherwise
+ cause suspension of execution, i.e. if there is not a pending
+ WFI wakeup event and the instruciton does not cause another
+ exception. */
+ uint32_t tlor : 1; /**< [ 14: 14](R/W) v8.1: Trap access to the LOR
+ Registers from EL1 and EL2 to EL3, unless the access has been
+ trapped to EL2:
+ 0 = EL1 and EL2 accesses to the LOR Registers are not trapped to EL3.
+ 1 = EL1 and EL2 accesses to the LOR Registers are trapped to EL3
+ unless the access has been trapped to EL2 as a result of the
+ AP_HCR_EL2[TLOR]. */
+ uint32_t reserved_15_31 : 17;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_scr_el3_s cn9; */
+};
+typedef union bdk_ap_scr_el3 bdk_ap_scr_el3_t;
+
+#define BDK_AP_SCR_EL3 BDK_AP_SCR_EL3_FUNC()
+static inline uint64_t BDK_AP_SCR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SCR_EL3_FUNC(void)
+{
+ return 0x30601010000ll;
+}
+
+#define typedef_BDK_AP_SCR_EL3 bdk_ap_scr_el3_t
+#define bustype_BDK_AP_SCR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SCR_EL3 "AP_SCR_EL3"
+#define busnum_BDK_AP_SCR_EL3 0
+#define arguments_BDK_AP_SCR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sctlr_el1
+ *
+ * AP System Control EL1 Register
+ * Provides top level control of the system, including its memory
+ * system, at EL1.
+ */
+union bdk_ap_sctlr_el1
+{
+ uint32_t u;
+ struct bdk_ap_sctlr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_27 : 1;
+ uint32_t uci : 1; /**< [ 26: 26](R/W) When set, enables EL0 access in AArch64 for DC CVAU, DC CIVAC,
+ DC CVAC, and IC IVAU instructions. */
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL1.
+ Stage 1 translation table walks at EL1 and EL0.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+ The EE bit is permitted to be cached in a TLB.
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t e0e : 1; /**< [ 24: 24](R/W) Endianness of explicit data accesses at EL0.
+
+ If an implementation only supports Little-endian accesses at
+ EL0 then this bit is RES0.
+ If an implementation only supports Big-endian accesses at EL0
+ then this bit is RES1.
+ This bit has no effect on the endianness of LDTR* and STTR*
+ instructions executed at EL1.
+ 0 = Explicit data accesses at EL0 are little-endian.
+ 1 = Explicit data accesses at EL0 are big-endian. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL1 exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL1 exception level.
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL1 exception
+ level.
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t reserved_21 : 1;
+ uint32_t rsvd_20 : 1; /**< [ 20: 20](RO) Reserved 1. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+ The WXN bit is permitted to be cached in a TLB.
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t ntwe : 1; /**< [ 18: 18](R/W) Not trap WFE.
+ Conditional WFE instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFE instruction executed at EL0 would cause execution to
+ be suspended, such as if the event register is not set and
+ there is not a pending WFE wakeup event, it is taken as an
+ exception to EL1 using the0x1
+ 1 = WFE instructions are executed as normal. */
+ uint32_t reserved_17 : 1;
+ uint32_t ntwi : 1; /**< [ 16: 16](R/W) Not trap WFI.
+ Conditional WFI instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFI instruction executed at EL0 would cause execution to
+ be suspended, such as if there is not a pending WFI wakeup
+ event, it is taken as an exception to EL1 using the0x1
+ 1 = WFI instructions are executed as normal. */
+ uint32_t uct : 1; /**< [ 15: 15](R/W) When set, enables EL0 access in AArch64 to the AP_CTR_EL0
+ register. */
+ uint32_t dze : 1; /**< [ 14: 14](R/W) Access to DC ZVA instruction at EL0.
+ 0 = Execution of the DC ZVA instruction is prohibited at EL0, and
+ it is treated as UNdefined at EL0.
+ 1 = Execution of the DC ZVA instruction is allowed at EL0. */
+ uint32_t reserved_13 : 1;
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL0 and EL1:
+ When this bit is 0, all EL1 and EL0 Normal memory instruction
+ accesses are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of this bit.
+ 0 = Instruction caches disabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Non-cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Write-Through, Outer Write-Through. */
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t reserved_10 : 1;
+ uint32_t uma : 1; /**< [ 9: 9](R/W) User Mask Access. Controls access to interrupt masks from EL0,
+ when EL0 is using AArch64.
+ 0 = Disable access to the interrupt masks from EL0.
+ 1 = Enable access to the interrupt masks from EL0. */
+ uint32_t rsvd_8 : 1; /**< [ 8: 8](RO) SETEND Disable.
+ If an implementation does not support mixed endian operation,
+ this bit is RES1.
+ 0 = The SETEND instruction is available.
+ 1 = The SETEND instruction is UNALLOCATED.
+
+ SED: SETEND Disable - Only supported with 32 bit. */
+ uint32_t rsvd_7 : 1; /**< [ 7: 7](RO) "IT Disable.
+ 0 = The IT instruction functionality is available.
+ 1 = It is implementation defined whether the IT instruction is
+ treated as either:
+
+ A 16-bit instruction, which can only be followed by another
+ 16-bit instruction.
+
+ The first half of a 32-bit instruction.
+
+ An implementation might vary dynamically as to whether IT is
+ treated as a 16-bit instruction or the first half of a 32-bit
+ instruction.
+ All encodings of the IT instruction with hw1[3:0]!=1000 are
+ UNdefined and treated as unallocated.
+ All encodings of the subsequent instruction with the following
+ values for hw1 are UNdefined (and treated as unallocated):
+
+ - 0b11xxxxxxxxxxxxxx: All 32-bit instructions, and the 16-bit
+ instructions B, UDF, SVC, LDM, and STM.
+ - 0b1x11xxxxxxxxxxxx: All instructions in.
+ - 0b1x100xxxxxxxxxxx: ADD Rd, PC, #imm
+ - 0b01001xxxxxxxxxxx: LDR Rd, [PC, #imm]
+ - 0b0100x1xxx1111xxx: ADD Rdn, PC; CMP Rn, PC; MOV Rd, PC; BX
+ PC; BLX PC.
+ - 0b010001xx1xxxx111: ADD PC, Rm; CMP PC, Rm; MOV PC, Rm. This
+ pattern also covers UNPREDICTABLE cases with BLX Rn.
+
+ Contrary to the standard treatment of conditional UNdefined
+ instructions in the ARM architecture, in this case these
+ instructions are always treated as UNdefined, regardless of
+ whether the instruction would pass or fail its condition codes
+ as a result of being in an IT block.
+
+ ITD: IT Disable - Only supported with 32 bit." */
+ uint32_t reserved_5_6 : 2;
+ uint32_t sa0 : 1; /**< [ 4: 4](R/W) Stack Alignment Check Enable for EL0. When set, use of the
+ stack pointer as the base address in a load/store instruction
+ at EL0 must be aligned to a 16-byte boundary, or a Stack
+ Alignment Fault exception will be raised. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL0 and EL1:
+ When this bit is 0, all EL0 and EL1 Normal memory data
+ accesses and all accesses to the EL1&0 stage 1 translation
+ tables are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of the AP_SCTLR_EL1[C] bit.
+ 0 = Data and unified caches disabled.
+ 1 = Data and unified caches enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking:
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL1 and EL0 stage 1 address translation.
+
+ If AP_HCR_EL2[DC] is set to 1, then in nonsecure state the
+ AP_SCTLR_EL1[M] bit behaves as 0 for all purposes other than
+ reading the value of the bit.
+ 0 = EL1 and EL0 stage 1 address translation disabled.
+ 1 = EL1 and EL0 stage 1 address translation enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL1 and EL0 stage 1 address translation.
+
+ If AP_HCR_EL2[DC] is set to 1, then in nonsecure state the
+ AP_SCTLR_EL1[M] bit behaves as 0 for all purposes other than
+ reading the value of the bit.
+ 0 = EL1 and EL0 stage 1 address translation disabled.
+ 1 = EL1 and EL0 stage 1 address translation enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking:
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL0 and EL1:
+ When this bit is 0, all EL0 and EL1 Normal memory data
+ accesses and all accesses to the EL1&0 stage 1 translation
+ tables are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of the AP_SCTLR_EL1[C] bit.
+ 0 = Data and unified caches disabled.
+ 1 = Data and unified caches enabled. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t sa0 : 1; /**< [ 4: 4](R/W) Stack Alignment Check Enable for EL0. When set, use of the
+ stack pointer as the base address in a load/store instruction
+ at EL0 must be aligned to a 16-byte boundary, or a Stack
+ Alignment Fault exception will be raised. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t rsvd_7 : 1; /**< [ 7: 7](RO) "IT Disable.
+ 0 = The IT instruction functionality is available.
+ 1 = It is implementation defined whether the IT instruction is
+ treated as either:
+
+ A 16-bit instruction, which can only be followed by another
+ 16-bit instruction.
+
+ The first half of a 32-bit instruction.
+
+ An implementation might vary dynamically as to whether IT is
+ treated as a 16-bit instruction or the first half of a 32-bit
+ instruction.
+ All encodings of the IT instruction with hw1[3:0]!=1000 are
+ UNdefined and treated as unallocated.
+ All encodings of the subsequent instruction with the following
+ values for hw1 are UNdefined (and treated as unallocated):
+
+ - 0b11xxxxxxxxxxxxxx: All 32-bit instructions, and the 16-bit
+ instructions B, UDF, SVC, LDM, and STM.
+ - 0b1x11xxxxxxxxxxxx: All instructions in.
+ - 0b1x100xxxxxxxxxxx: ADD Rd, PC, #imm
+ - 0b01001xxxxxxxxxxx: LDR Rd, [PC, #imm]
+ - 0b0100x1xxx1111xxx: ADD Rdn, PC; CMP Rn, PC; MOV Rd, PC; BX
+ PC; BLX PC.
+ - 0b010001xx1xxxx111: ADD PC, Rm; CMP PC, Rm; MOV PC, Rm. This
+ pattern also covers UNPREDICTABLE cases with BLX Rn.
+
+ Contrary to the standard treatment of conditional UNdefined
+ instructions in the ARM architecture, in this case these
+ instructions are always treated as UNdefined, regardless of
+ whether the instruction would pass or fail its condition codes
+ as a result of being in an IT block.
+
+ ITD: IT Disable - Only supported with 32 bit." */
+ uint32_t rsvd_8 : 1; /**< [ 8: 8](RO) SETEND Disable.
+ If an implementation does not support mixed endian operation,
+ this bit is RES1.
+ 0 = The SETEND instruction is available.
+ 1 = The SETEND instruction is UNALLOCATED.
+
+ SED: SETEND Disable - Only supported with 32 bit. */
+ uint32_t uma : 1; /**< [ 9: 9](R/W) User Mask Access. Controls access to interrupt masks from EL0,
+ when EL0 is using AArch64.
+ 0 = Disable access to the interrupt masks from EL0.
+ 1 = Enable access to the interrupt masks from EL0. */
+ uint32_t reserved_10 : 1;
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL0 and EL1:
+ When this bit is 0, all EL1 and EL0 Normal memory instruction
+ accesses are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of this bit.
+ 0 = Instruction caches disabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Non-cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Write-Through, Outer Write-Through. */
+ uint32_t reserved_13 : 1;
+ uint32_t dze : 1; /**< [ 14: 14](R/W) Access to DC ZVA instruction at EL0.
+ 0 = Execution of the DC ZVA instruction is prohibited at EL0, and
+ it is treated as UNdefined at EL0.
+ 1 = Execution of the DC ZVA instruction is allowed at EL0. */
+ uint32_t uct : 1; /**< [ 15: 15](R/W) When set, enables EL0 access in AArch64 to the AP_CTR_EL0
+ register. */
+ uint32_t ntwi : 1; /**< [ 16: 16](R/W) Not trap WFI.
+ Conditional WFI instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFI instruction executed at EL0 would cause execution to
+ be suspended, such as if there is not a pending WFI wakeup
+ event, it is taken as an exception to EL1 using the0x1
+ 1 = WFI instructions are executed as normal. */
+ uint32_t reserved_17 : 1;
+ uint32_t ntwe : 1; /**< [ 18: 18](R/W) Not trap WFE.
+ Conditional WFE instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFE instruction executed at EL0 would cause execution to
+ be suspended, such as if the event register is not set and
+ there is not a pending WFE wakeup event, it is taken as an
+ exception to EL1 using the0x1
+ 1 = WFE instructions are executed as normal. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+ The WXN bit is permitted to be cached in a TLB.
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t rsvd_20 : 1; /**< [ 20: 20](RO) Reserved 1. */
+ uint32_t reserved_21 : 1;
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL1 exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL1 exception level.
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL1 exception
+ level.
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t e0e : 1; /**< [ 24: 24](R/W) Endianness of explicit data accesses at EL0.
+
+ If an implementation only supports Little-endian accesses at
+ EL0 then this bit is RES0.
+ If an implementation only supports Big-endian accesses at EL0
+ then this bit is RES1.
+ This bit has no effect on the endianness of LDTR* and STTR*
+ instructions executed at EL1.
+ 0 = Explicit data accesses at EL0 are little-endian.
+ 1 = Explicit data accesses at EL0 are big-endian. */
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL1.
+ Stage 1 translation table walks at EL1 and EL0.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+ The EE bit is permitted to be cached in a TLB.
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t uci : 1; /**< [ 26: 26](R/W) When set, enables EL0 access in AArch64 for DC CVAU, DC CIVAC,
+ DC CVAC, and IC IVAU instructions. */
+ uint32_t reserved_27 : 1;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_sctlr_el1_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_27 : 1;
+ uint32_t uci : 1; /**< [ 26: 26](R/W) When set, enables EL0 access in AArch64 for DC CVAU, DC CIVAC,
+ DC CVAC, and IC IVAU instructions. */
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL1.
+ Stage 1 translation table walks at EL1 and EL0.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+ The EE bit is permitted to be cached in a TLB.
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t e0e : 1; /**< [ 24: 24](R/W) Endianness of explicit data accesses at EL0.
+
+ If an implementation only supports Little-endian accesses at
+ EL0 then this bit is RES0.
+ If an implementation only supports Big-endian accesses at EL0
+ then this bit is RES1.
+ This bit has no effect on the endianness of LDTR* and STTR*
+ instructions executed at EL1.
+ 0 = Explicit data accesses at EL0 are little-endian.
+ 1 = Explicit data accesses at EL0 are big-endian. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL1 exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL1 exception level.
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL1 exception
+ level.
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t reserved_21 : 1;
+ uint32_t rsvd_20 : 1; /**< [ 20: 20](RO) Reserved 1. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+ The WXN bit is permitted to be cached in a TLB.
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t ntwe : 1; /**< [ 18: 18](R/W) Not trap WFE.
+ Conditional WFE instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFE instruction executed at EL0 would cause execution to
+ be suspended, such as if the event register is not set and
+ there is not a pending WFE wakeup event, it is taken as an
+ exception to EL1 using the0x1
+ 1 = WFE instructions are executed as normal. */
+ uint32_t reserved_17 : 1;
+ uint32_t ntwi : 1; /**< [ 16: 16](R/W) Not trap WFI.
+ Conditional WFI instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFI instruction executed at EL0 would cause execution to
+ be suspended, such as if there is not a pending WFI wakeup
+ event, it is taken as an exception to EL1 using the0x1
+ 1 = WFI instructions are executed as normal. */
+ uint32_t uct : 1; /**< [ 15: 15](R/W) When set, enables EL0 access in AArch64 to the AP_CTR_EL0
+ register. */
+ uint32_t dze : 1; /**< [ 14: 14](R/W) Access to DC ZVA instruction at EL0.
+ 0 = Execution of the DC ZVA instruction is prohibited at EL0, and
+ it is treated as UNdefined at EL0.
+ 1 = Execution of the DC ZVA instruction is allowed at EL0. */
+ uint32_t reserved_13 : 1;
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL0 and EL1:
+ When this bit is 0, all EL1 and EL0 Normal memory instruction
+ accesses are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of this bit.
+ 0 = Instruction caches disabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Non-cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Write-Through, Outer Write-Through. */
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t reserved_10 : 1;
+ uint32_t uma : 1; /**< [ 9: 9](R/W) User Mask Access. Controls access to interrupt masks from EL0,
+ when EL0 is using AArch64.
+ 0 = Disable access to the interrupt masks from EL0.
+ 1 = Enable access to the interrupt masks from EL0. */
+ uint32_t rsvd_8 : 1; /**< [ 8: 8](RO) SETEND Disable.
+ If an implementation does not support mixed endian operation,
+ this bit is RES1.
+ 0 = The SETEND instruction is available.
+ 1 = The SETEND instruction is UNALLOCATED.
+
+ SED: SETEND Disable - Only supported with 32 bit. */
+ uint32_t rsvd_7 : 1; /**< [ 7: 7](RO) "IT Disable.
+ 0 = The IT instruction functionality is available.
+ 1 = It is implementation defined whether the IT instruction is
+ treated as either:
+
+ A 16-bit instruction, which can only be followed by another
+ 16-bit instruction.
+
+ The first half of a 32-bit instruction.
+
+ An implementation might vary dynamically as to whether IT is
+ treated as a 16-bit instruction or the first half of a 32-bit
+ instruction.
+ All encodings of the IT instruction with hw1[3:0]!=1000 are
+ UNdefined and treated as unallocated.
+ All encodings of the subsequent instruction with the following
+ values for hw1 are UNdefined (and treated as unallocated):
+
+ - 0b11xxxxxxxxxxxxxx: All 32-bit instructions, and the 16-bit
+ instructions B, UDF, SVC, LDM, and STM.
+ - 0b1x11xxxxxxxxxxxx: All instructions in.
+ - 0b1x100xxxxxxxxxxx: ADD Rd, PC, #imm
+ - 0b01001xxxxxxxxxxx: LDR Rd, [PC, #imm]
+ - 0b0100x1xxx1111xxx: ADD Rdn, PC; CMP Rn, PC; MOV Rd, PC; BX
+ PC; BLX PC.
+ - 0b010001xx1xxxx111: ADD PC, Rm; CMP PC, Rm; MOV PC, Rm. This
+ pattern also covers UNPREDICTABLE cases with BLX Rn.
+
+ Contrary to the standard treatment of conditional UNdefined
+ instructions in the ARM architecture, in this case these
+ instructions are always treated as UNdefined, regardless of
+ whether the instruction would pass or fail its condition codes
+ as a result of being in an IT block.
+
+ ITD: IT Disable - Only supported with 32 bit." */
+ uint32_t reserved_6 : 1;
+ uint32_t reserved_5 : 1;
+ uint32_t sa0 : 1; /**< [ 4: 4](R/W) Stack Alignment Check Enable for EL0. When set, use of the
+ stack pointer as the base address in a load/store instruction
+ at EL0 must be aligned to a 16-byte boundary, or a Stack
+ Alignment Fault exception will be raised. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL0 and EL1:
+ When this bit is 0, all EL0 and EL1 Normal memory data
+ accesses and all accesses to the EL1&0 stage 1 translation
+ tables are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of the AP_SCTLR_EL1[C] bit.
+ 0 = Data and unified caches disabled.
+ 1 = Data and unified caches enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking:
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL1 and EL0 stage 1 address translation.
+
+ If AP_HCR_EL2[DC] is set to 1, then in nonsecure state the
+ AP_SCTLR_EL1[M] bit behaves as 0 for all purposes other than
+ reading the value of the bit.
+ 0 = EL1 and EL0 stage 1 address translation disabled.
+ 1 = EL1 and EL0 stage 1 address translation enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL1 and EL0 stage 1 address translation.
+
+ If AP_HCR_EL2[DC] is set to 1, then in nonsecure state the
+ AP_SCTLR_EL1[M] bit behaves as 0 for all purposes other than
+ reading the value of the bit.
+ 0 = EL1 and EL0 stage 1 address translation disabled.
+ 1 = EL1 and EL0 stage 1 address translation enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking:
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL0 and EL1:
+ When this bit is 0, all EL0 and EL1 Normal memory data
+ accesses and all accesses to the EL1&0 stage 1 translation
+ tables are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of the AP_SCTLR_EL1[C] bit.
+ 0 = Data and unified caches disabled.
+ 1 = Data and unified caches enabled. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t sa0 : 1; /**< [ 4: 4](R/W) Stack Alignment Check Enable for EL0. When set, use of the
+ stack pointer as the base address in a load/store instruction
+ at EL0 must be aligned to a 16-byte boundary, or a Stack
+ Alignment Fault exception will be raised. */
+ uint32_t reserved_5 : 1;
+ uint32_t reserved_6 : 1;
+ uint32_t rsvd_7 : 1; /**< [ 7: 7](RO) "IT Disable.
+ 0 = The IT instruction functionality is available.
+ 1 = It is implementation defined whether the IT instruction is
+ treated as either:
+
+ A 16-bit instruction, which can only be followed by another
+ 16-bit instruction.
+
+ The first half of a 32-bit instruction.
+
+ An implementation might vary dynamically as to whether IT is
+ treated as a 16-bit instruction or the first half of a 32-bit
+ instruction.
+ All encodings of the IT instruction with hw1[3:0]!=1000 are
+ UNdefined and treated as unallocated.
+ All encodings of the subsequent instruction with the following
+ values for hw1 are UNdefined (and treated as unallocated):
+
+ - 0b11xxxxxxxxxxxxxx: All 32-bit instructions, and the 16-bit
+ instructions B, UDF, SVC, LDM, and STM.
+ - 0b1x11xxxxxxxxxxxx: All instructions in.
+ - 0b1x100xxxxxxxxxxx: ADD Rd, PC, #imm
+ - 0b01001xxxxxxxxxxx: LDR Rd, [PC, #imm]
+ - 0b0100x1xxx1111xxx: ADD Rdn, PC; CMP Rn, PC; MOV Rd, PC; BX
+ PC; BLX PC.
+ - 0b010001xx1xxxx111: ADD PC, Rm; CMP PC, Rm; MOV PC, Rm. This
+ pattern also covers UNPREDICTABLE cases with BLX Rn.
+
+ Contrary to the standard treatment of conditional UNdefined
+ instructions in the ARM architecture, in this case these
+ instructions are always treated as UNdefined, regardless of
+ whether the instruction would pass or fail its condition codes
+ as a result of being in an IT block.
+
+ ITD: IT Disable - Only supported with 32 bit." */
+ uint32_t rsvd_8 : 1; /**< [ 8: 8](RO) SETEND Disable.
+ If an implementation does not support mixed endian operation,
+ this bit is RES1.
+ 0 = The SETEND instruction is available.
+ 1 = The SETEND instruction is UNALLOCATED.
+
+ SED: SETEND Disable - Only supported with 32 bit. */
+ uint32_t uma : 1; /**< [ 9: 9](R/W) User Mask Access. Controls access to interrupt masks from EL0,
+ when EL0 is using AArch64.
+ 0 = Disable access to the interrupt masks from EL0.
+ 1 = Enable access to the interrupt masks from EL0. */
+ uint32_t reserved_10 : 1;
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL0 and EL1:
+ When this bit is 0, all EL1 and EL0 Normal memory instruction
+ accesses are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of this bit.
+ 0 = Instruction caches disabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Non-cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Write-Through, Outer Write-Through. */
+ uint32_t reserved_13 : 1;
+ uint32_t dze : 1; /**< [ 14: 14](R/W) Access to DC ZVA instruction at EL0.
+ 0 = Execution of the DC ZVA instruction is prohibited at EL0, and
+ it is treated as UNdefined at EL0.
+ 1 = Execution of the DC ZVA instruction is allowed at EL0. */
+ uint32_t uct : 1; /**< [ 15: 15](R/W) When set, enables EL0 access in AArch64 to the AP_CTR_EL0
+ register. */
+ uint32_t ntwi : 1; /**< [ 16: 16](R/W) Not trap WFI.
+ Conditional WFI instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFI instruction executed at EL0 would cause execution to
+ be suspended, such as if there is not a pending WFI wakeup
+ event, it is taken as an exception to EL1 using the0x1
+ 1 = WFI instructions are executed as normal. */
+ uint32_t reserved_17 : 1;
+ uint32_t ntwe : 1; /**< [ 18: 18](R/W) Not trap WFE.
+ Conditional WFE instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFE instruction executed at EL0 would cause execution to
+ be suspended, such as if the event register is not set and
+ there is not a pending WFE wakeup event, it is taken as an
+ exception to EL1 using the0x1
+ 1 = WFE instructions are executed as normal. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+ The WXN bit is permitted to be cached in a TLB.
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t rsvd_20 : 1; /**< [ 20: 20](RO) Reserved 1. */
+ uint32_t reserved_21 : 1;
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL1 exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL1 exception level.
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL1 exception
+ level.
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t e0e : 1; /**< [ 24: 24](R/W) Endianness of explicit data accesses at EL0.
+
+ If an implementation only supports Little-endian accesses at
+ EL0 then this bit is RES0.
+ If an implementation only supports Big-endian accesses at EL0
+ then this bit is RES1.
+ This bit has no effect on the endianness of LDTR* and STTR*
+ instructions executed at EL1.
+ 0 = Explicit data accesses at EL0 are little-endian.
+ 1 = Explicit data accesses at EL0 are big-endian. */
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL1.
+ Stage 1 translation table walks at EL1 and EL0.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+ The EE bit is permitted to be cached in a TLB.
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t uci : 1; /**< [ 26: 26](R/W) When set, enables EL0 access in AArch64 for DC CVAU, DC CIVAC,
+ DC CVAC, and IC IVAU instructions. */
+ uint32_t reserved_27 : 1;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_sctlr_el1 bdk_ap_sctlr_el1_t;
+
+#define BDK_AP_SCTLR_EL1 BDK_AP_SCTLR_EL1_FUNC()
+static inline uint64_t BDK_AP_SCTLR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SCTLR_EL1_FUNC(void)
+{
+ return 0x30001000000ll;
+}
+
+#define typedef_BDK_AP_SCTLR_EL1 bdk_ap_sctlr_el1_t
+#define bustype_BDK_AP_SCTLR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SCTLR_EL1 "AP_SCTLR_EL1"
+#define busnum_BDK_AP_SCTLR_EL1 0
+#define arguments_BDK_AP_SCTLR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sctlr_el12
+ *
+ * AP System Control EL1/2 Register
+ * Alias to allow access to AP_SCTLR_EL1 from EL2 when AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_sctlr_el12
+{
+ uint32_t u;
+ struct bdk_ap_sctlr_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_sctlr_el12_s cn; */
+};
+typedef union bdk_ap_sctlr_el12 bdk_ap_sctlr_el12_t;
+
+#define BDK_AP_SCTLR_EL12 BDK_AP_SCTLR_EL12_FUNC()
+static inline uint64_t BDK_AP_SCTLR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SCTLR_EL12_FUNC(void)
+{
+ return 0x30501000000ll;
+}
+
+#define typedef_BDK_AP_SCTLR_EL12 bdk_ap_sctlr_el12_t
+#define bustype_BDK_AP_SCTLR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SCTLR_EL12 "AP_SCTLR_EL12"
+#define busnum_BDK_AP_SCTLR_EL12 0
+#define arguments_BDK_AP_SCTLR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sctlr_el2
+ *
+ * AP System Control Non-E2H Register
+ * Provides top level control of the system, including its memory
+ * system, at EL2.
+ *
+ * This register is at the same select as AP_SCTLR_EL2_E2H and is used when E2H=0.
+ */
+union bdk_ap_sctlr_el2
+{
+ uint32_t u;
+ struct bdk_ap_sctlr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_26_27 : 2;
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL3.
+ Stage 1 translation table walks at EL3.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+ The EE bit is permitted to be cached in a TLB.
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t reserved_24 : 1;
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL* exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL* exception level.
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL* exception level.
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t reserved_20_21 : 2;
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+ The WXN bit is permitted to be cached in a TLB.
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t rsvd_18 : 1; /**< [ 18: 18](RO) Reserved 1. */
+ uint32_t reserved_17 : 1;
+ uint32_t rsvd_16 : 1; /**< [ 16: 16](RO) Reserved 1. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL3.
+ When this bit is 0, all EL3 Normal memory instruction accesses
+ are Non-cacheable. This bit has no effect on the EL1&0 or EL2
+ translation regimes.
+ 0 = Instruction caches disabled at EL3. If AP_SCTLR_EL3[M] is set to
+ 0, instruction accesses from stage 1 of the EL3 translation
+ regime are to Normal memory, Outer Shareable, Inner Non-
+ cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL3. If AP_SCTLR_EL3[M] is set to 0,
+ instruction accesses from stage 1 of the EL3 translation
+ regime are to Normal memory, Outer Shareable, Inner Write-
+ Through, Outer Write-Through. */
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t reserved_6_10 : 5;
+ uint32_t rsvd_4_5 : 2; /**< [ 5: 4](RO) Reserved 1. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL3:
+
+ When this bit is 0, all EL3 Normal memory data accesses and
+ all accesses to the EL3 translation tables are Non-cacheable.
+ This bit has no effect on the EL1&0 or EL2 translation
+ regimes.
+ 0 = Data and unified caches disabled at EL3.
+ 1 = Data and unified caches enabled at EL3. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking:
+
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL3 stage 1 address translation.
+ 0 = EL3 stage 1 address translation disabled.
+ 1 = EL3 stage 1 address translation enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL3 stage 1 address translation.
+ 0 = EL3 stage 1 address translation disabled.
+ 1 = EL3 stage 1 address translation enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking:
+
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL3:
+
+ When this bit is 0, all EL3 Normal memory data accesses and
+ all accesses to the EL3 translation tables are Non-cacheable.
+ This bit has no effect on the EL1&0 or EL2 translation
+ regimes.
+ 0 = Data and unified caches disabled at EL3.
+ 1 = Data and unified caches enabled at EL3. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t rsvd_4_5 : 2; /**< [ 5: 4](RO) Reserved 1. */
+ uint32_t reserved_6_10 : 5;
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL3.
+ When this bit is 0, all EL3 Normal memory instruction accesses
+ are Non-cacheable. This bit has no effect on the EL1&0 or EL2
+ translation regimes.
+ 0 = Instruction caches disabled at EL3. If AP_SCTLR_EL3[M] is set to
+ 0, instruction accesses from stage 1 of the EL3 translation
+ regime are to Normal memory, Outer Shareable, Inner Non-
+ cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL3. If AP_SCTLR_EL3[M] is set to 0,
+ instruction accesses from stage 1 of the EL3 translation
+ regime are to Normal memory, Outer Shareable, Inner Write-
+ Through, Outer Write-Through. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t rsvd_16 : 1; /**< [ 16: 16](RO) Reserved 1. */
+ uint32_t reserved_17 : 1;
+ uint32_t rsvd_18 : 1; /**< [ 18: 18](RO) Reserved 1. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+ The WXN bit is permitted to be cached in a TLB.
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t reserved_20_21 : 2;
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL* exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL* exception level.
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL* exception level.
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t reserved_24 : 1;
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL3.
+ Stage 1 translation table walks at EL3.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+ The EE bit is permitted to be cached in a TLB.
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t reserved_26_27 : 2;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_sctlr_el2_s cn; */
+};
+typedef union bdk_ap_sctlr_el2 bdk_ap_sctlr_el2_t;
+
+#define BDK_AP_SCTLR_EL2 BDK_AP_SCTLR_EL2_FUNC()
+static inline uint64_t BDK_AP_SCTLR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SCTLR_EL2_FUNC(void)
+{
+ return 0x30401000000ll;
+}
+
+#define typedef_BDK_AP_SCTLR_EL2 bdk_ap_sctlr_el2_t
+#define bustype_BDK_AP_SCTLR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SCTLR_EL2 "AP_SCTLR_EL2"
+#define busnum_BDK_AP_SCTLR_EL2 0
+#define arguments_BDK_AP_SCTLR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sctlr_el2_e2h
+ *
+ * AP System Control E2H Register
+ * Provides top level control of the system, including its memory
+ * system, at EL2.
+ *
+ * This register is at the same select as AP_SCTLR_EL2 and is used when E2H=1.
+ */
+union bdk_ap_sctlr_el2_e2h
+{
+ uint32_t u;
+ struct bdk_ap_sctlr_el2_e2h_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_27 : 1;
+ uint32_t uci : 1; /**< [ 26: 26](R/W) When set, enables EL0 access in AArch64 for DC CVAU, DC CIVAC,
+ DC CVAC, and IC IVAU instructions. */
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL1.
+ Stage 1 translation table walks at EL1 and EL0.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+
+ The EE bit is permitted to be cached in a TLB.
+
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t e0e : 1; /**< [ 24: 24](R/W) Endianness of explicit data accesses at EL0.
+
+ If an implementation only supports Little-endian accesses at
+ EL0 then this bit is RES0.
+
+ If an implementation only supports Big-endian accesses at EL0
+ then this bit is RES1.
+
+ This bit has no effect on the endianness of LDTR* and STTR*
+ instructions executed at EL1.
+
+ 0 = Explicit data accesses at EL0 are little-endian.
+ 1 = Explicit data accesses at EL0 are big-endian. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL1 exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL1 exception level
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL1 exception
+ level
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t reserved_21 : 1;
+ uint32_t rsvd_20 : 1; /**< [ 20: 20](RO) Reserved 1. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+
+ The WXN bit is permitted to be cached in a TLB.
+
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t ntwe : 1; /**< [ 18: 18](R/W) Not trap WFE.
+ Conditional WFE instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFE instruction executed at EL0 would cause execution to
+ be suspended, such as if the event register is not set and
+ there is not a pending WFE wakeup event, it is taken as an
+ exception to EL1 using the0x1
+ 1 = WFE instructions are executed as normal. */
+ uint32_t reserved_17 : 1;
+ uint32_t ntwi : 1; /**< [ 16: 16](R/W) Not trap WFI.
+ Conditional WFI instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFI instruction executed at EL0 would cause execution to
+ be suspended, such as if there is not a pending WFI wakeup
+ event, it is taken as an exception to EL1 using the0x1
+ 1 = WFI instructions are executed as normal. */
+ uint32_t uct : 1; /**< [ 15: 15](R/W) When set, enables EL0 access in AArch64 to the AP_CTR_EL0
+ register. */
+ uint32_t dze : 1; /**< [ 14: 14](R/W) Access to DC ZVA instruction at EL0.
+ 0 = Execution of the DC ZVA instruction is prohibited at EL0, and
+ it is treated as undefined at EL0.
+ 1 = Execution of the DC ZVA instruction is allowed at EL0. */
+ uint32_t reserved_13 : 1;
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL0 and EL1:
+ When this bit is 0, all EL1 and EL0 Normal memory instruction
+ accesses are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of this bit.
+ 0 = Instruction caches disabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Non-cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Write-Through, Outer Write-Through. */
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t reserved_10 : 1;
+ uint32_t uma : 1; /**< [ 9: 9](R/W) User Mask Access. Controls access to interrupt masks from EL0,
+ when EL0 is using AArch64.
+ 0 = Disable access to the interrupt masks from EL0.
+ 1 = Enable access to the interrupt masks from EL0. */
+ uint32_t rsvd_8 : 1; /**< [ 8: 8](RO) SETEND disable.
+ If an implementation does not support mixed endian operation,
+ this bit is RES1.
+ 0 = The SETEND instruction is available.
+ 1 = The SETEND instruction is UNALLOCATED.
+
+ SED: SETEND Disable - Only supported with 32 bit */
+ uint32_t rsvd_7 : 1; /**< [ 7: 7](RO) "IT Disable.
+ 0 = The IT instruction functionality is available.
+ 1 = It is implementation defined whether the IT instruction is
+ treated as either:
+ * A 16-bit instruction, which can only be followed by another
+ 16-bit instruction.
+ * The first half of a 32-bit instruction.
+
+ An implementation might vary dynamically as to whether IT is
+ treated as a 16-bit instruction or the first half of a 32-bit
+ instruction.
+
+ All encodings of the IT instruction with hw1[3:0]!=1000 are
+ UNdefined and treated as unallocated.
+
+ All encodings of the subsequent instruction with the following
+ values for hw1 are UNdefined (and treated as unallocated):
+
+ - 0b11xxxxxxxxxxxxxx: All 32-bit instructions, and the 16-bit
+ instructions B, UDF, SVC, LDM, and STM.
+
+ - 0b1x11xxxxxxxxxxxx: All instructions in.
+
+ - 0b1x100xxxxxxxxxxx: ADD Rd, PC, #imm
+
+ - 0b01001xxxxxxxxxxx: LDR Rd, [PC, #imm]
+
+ - 0b0100x1xxx1111xxx: ADD Rdn, PC; CMP Rn, PC; MOV Rd, PC; BX
+ PC; BLX PC.
+
+ - 0b010001xx1xxxx111: ADD PC, Rm; CMP PC, Rm; MOV PC, Rm. This
+ pattern also covers UNPREDICTABLE cases with BLX Rn.
+
+ Contrary to the standard treatment of conditional UNdefined
+ instructions in the ARM architecture, in this case these
+ instructions are always treated as UNdefined, regardless of
+ whether the instruction would pass or fail its condition codes
+ as a result of being in an IT block.
+
+ ITD: IT Disable - Only supported with 32 bit" */
+ uint32_t reserved_5_6 : 2;
+ uint32_t sa0 : 1; /**< [ 4: 4](R/W) Stack Alignment Check Enable for EL0. When set, use of the
+ stack pointer as the base address in a load/store instruction
+ at EL0 must be aligned to a 16-byte boundary, or a Stack
+ Alignment Fault exception will be raised. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL0 and EL1:
+ When this bit is 0, all EL0 and EL1 Normal memory data
+ accesses and all accesses to the EL1&0 stage 1 translation
+ tables are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of the AP_SCTLR_EL1[C] bit.
+ 0 = Data and unified caches disabled.
+ 1 = Data and unified caches enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking.
+
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL1 and EL0 stage 1 address translation.
+
+ If AP_HCR_EL2[DC] is set to 1, then in nonsecure state the
+ AP_SCTLR_EL1[M] bit behaves as 0 for all purposes other than
+ reading the value of the bit.
+ 0 = EL1 and EL0 stage 1 address translation disabled.
+ 1 = EL1 and EL0 stage 1 address translation enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL1 and EL0 stage 1 address translation.
+
+ If AP_HCR_EL2[DC] is set to 1, then in nonsecure state the
+ AP_SCTLR_EL1[M] bit behaves as 0 for all purposes other than
+ reading the value of the bit.
+ 0 = EL1 and EL0 stage 1 address translation disabled.
+ 1 = EL1 and EL0 stage 1 address translation enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking.
+
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL0 and EL1:
+ When this bit is 0, all EL0 and EL1 Normal memory data
+ accesses and all accesses to the EL1&0 stage 1 translation
+ tables are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of the AP_SCTLR_EL1[C] bit.
+ 0 = Data and unified caches disabled.
+ 1 = Data and unified caches enabled. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t sa0 : 1; /**< [ 4: 4](R/W) Stack Alignment Check Enable for EL0. When set, use of the
+ stack pointer as the base address in a load/store instruction
+ at EL0 must be aligned to a 16-byte boundary, or a Stack
+ Alignment Fault exception will be raised. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t rsvd_7 : 1; /**< [ 7: 7](RO) "IT Disable.
+ 0 = The IT instruction functionality is available.
+ 1 = It is implementation defined whether the IT instruction is
+ treated as either:
+ * A 16-bit instruction, which can only be followed by another
+ 16-bit instruction.
+ * The first half of a 32-bit instruction.
+
+ An implementation might vary dynamically as to whether IT is
+ treated as a 16-bit instruction or the first half of a 32-bit
+ instruction.
+
+ All encodings of the IT instruction with hw1[3:0]!=1000 are
+ UNdefined and treated as unallocated.
+
+ All encodings of the subsequent instruction with the following
+ values for hw1 are UNdefined (and treated as unallocated):
+
+ - 0b11xxxxxxxxxxxxxx: All 32-bit instructions, and the 16-bit
+ instructions B, UDF, SVC, LDM, and STM.
+
+ - 0b1x11xxxxxxxxxxxx: All instructions in.
+
+ - 0b1x100xxxxxxxxxxx: ADD Rd, PC, #imm
+
+ - 0b01001xxxxxxxxxxx: LDR Rd, [PC, #imm]
+
+ - 0b0100x1xxx1111xxx: ADD Rdn, PC; CMP Rn, PC; MOV Rd, PC; BX
+ PC; BLX PC.
+
+ - 0b010001xx1xxxx111: ADD PC, Rm; CMP PC, Rm; MOV PC, Rm. This
+ pattern also covers UNPREDICTABLE cases with BLX Rn.
+
+ Contrary to the standard treatment of conditional UNdefined
+ instructions in the ARM architecture, in this case these
+ instructions are always treated as UNdefined, regardless of
+ whether the instruction would pass or fail its condition codes
+ as a result of being in an IT block.
+
+ ITD: IT Disable - Only supported with 32 bit" */
+ uint32_t rsvd_8 : 1; /**< [ 8: 8](RO) SETEND disable.
+ If an implementation does not support mixed endian operation,
+ this bit is RES1.
+ 0 = The SETEND instruction is available.
+ 1 = The SETEND instruction is UNALLOCATED.
+
+ SED: SETEND Disable - Only supported with 32 bit */
+ uint32_t uma : 1; /**< [ 9: 9](R/W) User Mask Access. Controls access to interrupt masks from EL0,
+ when EL0 is using AArch64.
+ 0 = Disable access to the interrupt masks from EL0.
+ 1 = Enable access to the interrupt masks from EL0. */
+ uint32_t reserved_10 : 1;
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL0 and EL1:
+ When this bit is 0, all EL1 and EL0 Normal memory instruction
+ accesses are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of this bit.
+ 0 = Instruction caches disabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Non-cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Write-Through, Outer Write-Through. */
+ uint32_t reserved_13 : 1;
+ uint32_t dze : 1; /**< [ 14: 14](R/W) Access to DC ZVA instruction at EL0.
+ 0 = Execution of the DC ZVA instruction is prohibited at EL0, and
+ it is treated as undefined at EL0.
+ 1 = Execution of the DC ZVA instruction is allowed at EL0. */
+ uint32_t uct : 1; /**< [ 15: 15](R/W) When set, enables EL0 access in AArch64 to the AP_CTR_EL0
+ register. */
+ uint32_t ntwi : 1; /**< [ 16: 16](R/W) Not trap WFI.
+ Conditional WFI instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFI instruction executed at EL0 would cause execution to
+ be suspended, such as if there is not a pending WFI wakeup
+ event, it is taken as an exception to EL1 using the0x1
+ 1 = WFI instructions are executed as normal. */
+ uint32_t reserved_17 : 1;
+ uint32_t ntwe : 1; /**< [ 18: 18](R/W) Not trap WFE.
+ Conditional WFE instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFE instruction executed at EL0 would cause execution to
+ be suspended, such as if the event register is not set and
+ there is not a pending WFE wakeup event, it is taken as an
+ exception to EL1 using the0x1
+ 1 = WFE instructions are executed as normal. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+
+ The WXN bit is permitted to be cached in a TLB.
+
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t rsvd_20 : 1; /**< [ 20: 20](RO) Reserved 1. */
+ uint32_t reserved_21 : 1;
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL1 exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL1 exception level
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL1 exception
+ level
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t e0e : 1; /**< [ 24: 24](R/W) Endianness of explicit data accesses at EL0.
+
+ If an implementation only supports Little-endian accesses at
+ EL0 then this bit is RES0.
+
+ If an implementation only supports Big-endian accesses at EL0
+ then this bit is RES1.
+
+ This bit has no effect on the endianness of LDTR* and STTR*
+ instructions executed at EL1.
+
+ 0 = Explicit data accesses at EL0 are little-endian.
+ 1 = Explicit data accesses at EL0 are big-endian. */
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL1.
+ Stage 1 translation table walks at EL1 and EL0.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+
+ The EE bit is permitted to be cached in a TLB.
+
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t uci : 1; /**< [ 26: 26](R/W) When set, enables EL0 access in AArch64 for DC CVAU, DC CIVAC,
+ DC CVAC, and IC IVAU instructions. */
+ uint32_t reserved_27 : 1;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_sctlr_el2_e2h_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_27 : 1;
+ uint32_t uci : 1; /**< [ 26: 26](R/W) When set, enables EL0 access in AArch64 for DC CVAU, DC CIVAC,
+ DC CVAC, and IC IVAU instructions. */
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL1.
+ Stage 1 translation table walks at EL1 and EL0.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+
+ The EE bit is permitted to be cached in a TLB.
+
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t e0e : 1; /**< [ 24: 24](R/W) Endianness of explicit data accesses at EL0.
+
+ If an implementation only supports Little-endian accesses at
+ EL0 then this bit is RES0.
+
+ If an implementation only supports Big-endian accesses at EL0
+ then this bit is RES1.
+
+ This bit has no effect on the endianness of LDTR* and STTR*
+ instructions executed at EL1.
+
+ 0 = Explicit data accesses at EL0 are little-endian.
+ 1 = Explicit data accesses at EL0 are big-endian. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL1 exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL1 exception level
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL1 exception
+ level
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t reserved_21 : 1;
+ uint32_t rsvd_20 : 1; /**< [ 20: 20](RO) Reserved 1. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+
+ The WXN bit is permitted to be cached in a TLB.
+
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t ntwe : 1; /**< [ 18: 18](R/W) Not trap WFE.
+ Conditional WFE instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFE instruction executed at EL0 would cause execution to
+ be suspended, such as if the event register is not set and
+ there is not a pending WFE wakeup event, it is taken as an
+ exception to EL1 using the0x1
+ 1 = WFE instructions are executed as normal. */
+ uint32_t reserved_17 : 1;
+ uint32_t ntwi : 1; /**< [ 16: 16](R/W) Not trap WFI.
+ Conditional WFI instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFI instruction executed at EL0 would cause execution to
+ be suspended, such as if there is not a pending WFI wakeup
+ event, it is taken as an exception to EL1 using the0x1
+ 1 = WFI instructions are executed as normal. */
+ uint32_t uct : 1; /**< [ 15: 15](R/W) When set, enables EL0 access in AArch64 to the AP_CTR_EL0
+ register. */
+ uint32_t dze : 1; /**< [ 14: 14](R/W) Access to DC ZVA instruction at EL0.
+ 0 = Execution of the DC ZVA instruction is prohibited at EL0, and
+ it is treated as undefined at EL0.
+ 1 = Execution of the DC ZVA instruction is allowed at EL0. */
+ uint32_t reserved_13 : 1;
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL0 and EL1:
+ When this bit is 0, all EL1 and EL0 Normal memory instruction
+ accesses are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of this bit.
+ 0 = Instruction caches disabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Non-cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Write-Through, Outer Write-Through. */
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t reserved_10 : 1;
+ uint32_t uma : 1; /**< [ 9: 9](R/W) User Mask Access. Controls access to interrupt masks from EL0,
+ when EL0 is using AArch64.
+ 0 = Disable access to the interrupt masks from EL0.
+ 1 = Enable access to the interrupt masks from EL0. */
+ uint32_t rsvd_8 : 1; /**< [ 8: 8](RO) SETEND disable.
+ If an implementation does not support mixed endian operation,
+ this bit is RES1.
+ 0 = The SETEND instruction is available.
+ 1 = The SETEND instruction is UNALLOCATED.
+
+ SED: SETEND Disable - Only supported with 32 bit */
+ uint32_t rsvd_7 : 1; /**< [ 7: 7](RO) "IT Disable.
+ 0 = The IT instruction functionality is available.
+ 1 = It is implementation defined whether the IT instruction is
+ treated as either:
+ * A 16-bit instruction, which can only be followed by another
+ 16-bit instruction.
+ * The first half of a 32-bit instruction.
+
+ An implementation might vary dynamically as to whether IT is
+ treated as a 16-bit instruction or the first half of a 32-bit
+ instruction.
+
+ All encodings of the IT instruction with hw1[3:0]!=1000 are
+ UNdefined and treated as unallocated.
+
+ All encodings of the subsequent instruction with the following
+ values for hw1 are UNdefined (and treated as unallocated):
+
+ - 0b11xxxxxxxxxxxxxx: All 32-bit instructions, and the 16-bit
+ instructions B, UDF, SVC, LDM, and STM.
+
+ - 0b1x11xxxxxxxxxxxx: All instructions in.
+
+ - 0b1x100xxxxxxxxxxx: ADD Rd, PC, #imm
+
+ - 0b01001xxxxxxxxxxx: LDR Rd, [PC, #imm]
+
+ - 0b0100x1xxx1111xxx: ADD Rdn, PC; CMP Rn, PC; MOV Rd, PC; BX
+ PC; BLX PC.
+
+ - 0b010001xx1xxxx111: ADD PC, Rm; CMP PC, Rm; MOV PC, Rm. This
+ pattern also covers UNPREDICTABLE cases with BLX Rn.
+
+ Contrary to the standard treatment of conditional UNdefined
+ instructions in the ARM architecture, in this case these
+ instructions are always treated as UNdefined, regardless of
+ whether the instruction would pass or fail its condition codes
+ as a result of being in an IT block.
+
+ ITD: IT Disable - Only supported with 32 bit" */
+ uint32_t reserved_6 : 1;
+ uint32_t reserved_5 : 1;
+ uint32_t sa0 : 1; /**< [ 4: 4](R/W) Stack Alignment Check Enable for EL0. When set, use of the
+ stack pointer as the base address in a load/store instruction
+ at EL0 must be aligned to a 16-byte boundary, or a Stack
+ Alignment Fault exception will be raised. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL0 and EL1:
+ When this bit is 0, all EL0 and EL1 Normal memory data
+ accesses and all accesses to the EL1&0 stage 1 translation
+ tables are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of the AP_SCTLR_EL1[C] bit.
+ 0 = Data and unified caches disabled.
+ 1 = Data and unified caches enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking.
+
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL1 and EL0 stage 1 address translation.
+
+ If AP_HCR_EL2[DC] is set to 1, then in nonsecure state the
+ AP_SCTLR_EL1[M] bit behaves as 0 for all purposes other than
+ reading the value of the bit.
+ 0 = EL1 and EL0 stage 1 address translation disabled.
+ 1 = EL1 and EL0 stage 1 address translation enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL1 and EL0 stage 1 address translation.
+
+ If AP_HCR_EL2[DC] is set to 1, then in nonsecure state the
+ AP_SCTLR_EL1[M] bit behaves as 0 for all purposes other than
+ reading the value of the bit.
+ 0 = EL1 and EL0 stage 1 address translation disabled.
+ 1 = EL1 and EL0 stage 1 address translation enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking.
+
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL0 and EL1:
+ When this bit is 0, all EL0 and EL1 Normal memory data
+ accesses and all accesses to the EL1&0 stage 1 translation
+ tables are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of the AP_SCTLR_EL1[C] bit.
+ 0 = Data and unified caches disabled.
+ 1 = Data and unified caches enabled. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack Alignment Check Enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's Exception level must be aligned to a 16-byte
+ boundary, or a Stack Alignment Fault exception will be raised. */
+ uint32_t sa0 : 1; /**< [ 4: 4](R/W) Stack Alignment Check Enable for EL0. When set, use of the
+ stack pointer as the base address in a load/store instruction
+ at EL0 must be aligned to a 16-byte boundary, or a Stack
+ Alignment Fault exception will be raised. */
+ uint32_t reserved_5 : 1;
+ uint32_t reserved_6 : 1;
+ uint32_t rsvd_7 : 1; /**< [ 7: 7](RO) "IT Disable.
+ 0 = The IT instruction functionality is available.
+ 1 = It is implementation defined whether the IT instruction is
+ treated as either:
+ * A 16-bit instruction, which can only be followed by another
+ 16-bit instruction.
+ * The first half of a 32-bit instruction.
+
+ An implementation might vary dynamically as to whether IT is
+ treated as a 16-bit instruction or the first half of a 32-bit
+ instruction.
+
+ All encodings of the IT instruction with hw1[3:0]!=1000 are
+ UNdefined and treated as unallocated.
+
+ All encodings of the subsequent instruction with the following
+ values for hw1 are UNdefined (and treated as unallocated):
+
+ - 0b11xxxxxxxxxxxxxx: All 32-bit instructions, and the 16-bit
+ instructions B, UDF, SVC, LDM, and STM.
+
+ - 0b1x11xxxxxxxxxxxx: All instructions in.
+
+ - 0b1x100xxxxxxxxxxx: ADD Rd, PC, #imm
+
+ - 0b01001xxxxxxxxxxx: LDR Rd, [PC, #imm]
+
+ - 0b0100x1xxx1111xxx: ADD Rdn, PC; CMP Rn, PC; MOV Rd, PC; BX
+ PC; BLX PC.
+
+ - 0b010001xx1xxxx111: ADD PC, Rm; CMP PC, Rm; MOV PC, Rm. This
+ pattern also covers UNPREDICTABLE cases with BLX Rn.
+
+ Contrary to the standard treatment of conditional UNdefined
+ instructions in the ARM architecture, in this case these
+ instructions are always treated as UNdefined, regardless of
+ whether the instruction would pass or fail its condition codes
+ as a result of being in an IT block.
+
+ ITD: IT Disable - Only supported with 32 bit" */
+ uint32_t rsvd_8 : 1; /**< [ 8: 8](RO) SETEND disable.
+ If an implementation does not support mixed endian operation,
+ this bit is RES1.
+ 0 = The SETEND instruction is available.
+ 1 = The SETEND instruction is UNALLOCATED.
+
+ SED: SETEND Disable - Only supported with 32 bit */
+ uint32_t uma : 1; /**< [ 9: 9](R/W) User Mask Access. Controls access to interrupt masks from EL0,
+ when EL0 is using AArch64.
+ 0 = Disable access to the interrupt masks from EL0.
+ 1 = Enable access to the interrupt masks from EL0. */
+ uint32_t reserved_10 : 1;
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL0 and EL1:
+ When this bit is 0, all EL1 and EL0 Normal memory instruction
+ accesses are Non-cacheable.
+ If the AP_HCR_EL2[DC] bit is set to 1, then the nonsecure stage 1
+ EL1&0 translation regime is Cacheable regardless of the value
+ of this bit.
+ 0 = Instruction caches disabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Non-cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL0 and EL1. If AP_SCTLR_EL1[M] is
+ set to 0, instruction accesses from stage 1 of the EL1&0
+ translation regime are to Normal memory, Outer Shareable,
+ Inner Write-Through, Outer Write-Through. */
+ uint32_t reserved_13 : 1;
+ uint32_t dze : 1; /**< [ 14: 14](R/W) Access to DC ZVA instruction at EL0.
+ 0 = Execution of the DC ZVA instruction is prohibited at EL0, and
+ it is treated as undefined at EL0.
+ 1 = Execution of the DC ZVA instruction is allowed at EL0. */
+ uint32_t uct : 1; /**< [ 15: 15](R/W) When set, enables EL0 access in AArch64 to the AP_CTR_EL0
+ register. */
+ uint32_t ntwi : 1; /**< [ 16: 16](R/W) Not trap WFI.
+ Conditional WFI instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFI instruction executed at EL0 would cause execution to
+ be suspended, such as if there is not a pending WFI wakeup
+ event, it is taken as an exception to EL1 using the0x1
+ 1 = WFI instructions are executed as normal. */
+ uint32_t reserved_17 : 1;
+ uint32_t ntwe : 1; /**< [ 18: 18](R/W) Not trap WFE.
+ Conditional WFE instructions that fail their condition do not
+ cause an exception if this bit is 0.
+ 0 = If a WFE instruction executed at EL0 would cause execution to
+ be suspended, such as if the event register is not set and
+ there is not a pending WFE wakeup event, it is taken as an
+ exception to EL1 using the0x1
+ 1 = WFE instructions are executed as normal. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+
+ The WXN bit is permitted to be cached in a TLB.
+
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t rsvd_20 : 1; /**< [ 20: 20](RO) Reserved 1. */
+ uint32_t reserved_21 : 1;
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t span : 1; /**< [ 23: 23](R/W) v8.1: Bit[23]: SPAN set PSTATE/CPSR[AP_PAN] bit on taking an exception
+ to the EL1 exception level.
+ 0 = PSTATE/CPSR[AP_PAN] is set on taking an exception to the EL1 exception level
+ 1 = PSTATE/CPSR[AP_PAN] is left unchanged on taking an exception to the EL1 exception
+ level
+
+ This bit has no effect on the PSTATE/CPSR[AP_PAN] when taking exceptions to any other
+ exception level. */
+ uint32_t e0e : 1; /**< [ 24: 24](R/W) Endianness of explicit data accesses at EL0.
+
+ If an implementation only supports Little-endian accesses at
+ EL0 then this bit is RES0.
+
+ If an implementation only supports Big-endian accesses at EL0
+ then this bit is RES1.
+
+ This bit has no effect on the endianness of LDTR* and STTR*
+ instructions executed at EL1.
+
+ 0 = Explicit data accesses at EL0 are little-endian.
+ 1 = Explicit data accesses at EL0 are big-endian. */
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL1.
+ Stage 1 translation table walks at EL1 and EL0.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+
+ The EE bit is permitted to be cached in a TLB.
+
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t uci : 1; /**< [ 26: 26](R/W) When set, enables EL0 access in AArch64 for DC CVAU, DC CIVAC,
+ DC CVAC, and IC IVAU instructions. */
+ uint32_t reserved_27 : 1;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_sctlr_el2_e2h bdk_ap_sctlr_el2_e2h_t;
+
+#define BDK_AP_SCTLR_EL2_E2H BDK_AP_SCTLR_EL2_E2H_FUNC()
+static inline uint64_t BDK_AP_SCTLR_EL2_E2H_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SCTLR_EL2_E2H_FUNC(void)
+{
+ return 0x30401000010ll;
+}
+
+#define typedef_BDK_AP_SCTLR_EL2_E2H bdk_ap_sctlr_el2_e2h_t
+#define bustype_BDK_AP_SCTLR_EL2_E2H BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SCTLR_EL2_E2H "AP_SCTLR_EL2_E2H"
+#define busnum_BDK_AP_SCTLR_EL2_E2H 0
+#define arguments_BDK_AP_SCTLR_EL2_E2H -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sctlr_el3
+ *
+ * AP System Control Register
+ * Provides top level control of the system, including its memory
+ * system, at EL3.
+ */
+union bdk_ap_sctlr_el3
+{
+ uint32_t u;
+ struct bdk_ap_sctlr_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_26_27 : 2;
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL3.
+ Stage 1 translation table walks at EL3.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+
+ The EE bit is permitted to be cached in a TLB.
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t reserved_24 : 1;
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t reserved_20_21 : 2;
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+ The WXN bit is permitted to be cached in a TLB.
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t rsvd_18 : 1; /**< [ 18: 18](RO) Reserved 1. */
+ uint32_t reserved_17 : 1;
+ uint32_t rsvd_16 : 1; /**< [ 16: 16](RO) Reserved 1. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL3:
+
+ When this bit is 0, all EL3 Normal memory instruction accesses
+ are Non-cacheable. This bit has no effect on the EL1&0 or EL2
+ translation regimes.
+
+ 0 = Instruction caches disabled at EL3. If AP_SCTLR_EL3[M] is set to
+ 0, instruction accesses from stage 1 of the EL3 translation
+ regime are to Normal memory, Outer Shareable, Inner Non-
+ cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL3. If AP_SCTLR_EL3[M] is set to 0,
+ instruction accesses from stage 1 of the EL3 translation
+ regime are to Normal memory, Outer Shareable, Inner Write-
+ Through, Outer Write-Through. */
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t reserved_6_10 : 5;
+ uint32_t rsvd_4_5 : 2; /**< [ 5: 4](RO) Reserved 1. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack alignment check enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's exception level must be aligned to a 16-byte
+ boundary, or a stack alignment fault exception will be raised. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL3.
+
+ When this bit is 0, all EL3 normal memory data accesses and
+ all accesses to the EL3 translation tables are Non-cacheable.
+ This bit has no effect on the EL1&0 or EL2 translation
+ regimes.
+
+ 0 = Data and unified caches disabled at EL3.
+ 1 = Data and unified caches enabled at EL3. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking:
+
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL3 stage 1 address translation.
+ 0 = EL3 stage 1 address translation disabled.
+ 1 = EL3 stage 1 address translation enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t m : 1; /**< [ 0: 0](R/W) MMU enable for EL3 stage 1 address translation.
+ 0 = EL3 stage 1 address translation disabled.
+ 1 = EL3 stage 1 address translation enabled. */
+ uint32_t aa : 1; /**< [ 1: 1](R/W) Alignment check enable. This is the enable bit for Alignment
+ fault checking:
+
+ Load/store exclusive and load-acquire/store-release
+ instructions have an alignment check regardless of the value
+ of the A bit.
+
+ 0 = Alignment fault checking disabled.
+ Instructions that load or store one or more registers, other
+ than load/store exclusive and load-acquire/store-release, do
+ not check that the address being accessed is aligned to the
+ size of the data element(s) being accessed.
+
+ 1 = Alignment fault checking enabled.
+ All instructions that load or store one or more registers have
+ an alignment check that the address being accessed is aligned
+ to the size of the data element(s) being accessed. If this
+ check fails it causes an Alignment fault, which is taken as a
+ Data Abort exception. */
+ uint32_t cc : 1; /**< [ 2: 2](R/W) Cache enable. This is an enable bit for data and unified
+ caches at EL3.
+
+ When this bit is 0, all EL3 normal memory data accesses and
+ all accesses to the EL3 translation tables are Non-cacheable.
+ This bit has no effect on the EL1&0 or EL2 translation
+ regimes.
+
+ 0 = Data and unified caches disabled at EL3.
+ 1 = Data and unified caches enabled at EL3. */
+ uint32_t sa : 1; /**< [ 3: 3](R/W) Stack alignment check enable. When set, use of the stack
+ pointer as the base address in a load/store instruction at
+ this register's exception level must be aligned to a 16-byte
+ boundary, or a stack alignment fault exception will be raised. */
+ uint32_t rsvd_4_5 : 2; /**< [ 5: 4](RO) Reserved 1. */
+ uint32_t reserved_6_10 : 5;
+ uint32_t rsvd_11 : 1; /**< [ 11: 11](RO) Reserved 1. */
+ uint32_t i : 1; /**< [ 12: 12](R/W) Instruction cache enable. This is an enable bit for
+ instruction caches at EL3:
+
+ When this bit is 0, all EL3 Normal memory instruction accesses
+ are Non-cacheable. This bit has no effect on the EL1&0 or EL2
+ translation regimes.
+
+ 0 = Instruction caches disabled at EL3. If AP_SCTLR_EL3[M] is set to
+ 0, instruction accesses from stage 1 of the EL3 translation
+ regime are to Normal memory, Outer Shareable, Inner Non-
+ cacheable, Outer Non-cacheable.
+ 1 = Instruction caches enabled at EL3. If AP_SCTLR_EL3[M] is set to 0,
+ instruction accesses from stage 1 of the EL3 translation
+ regime are to Normal memory, Outer Shareable, Inner Write-
+ Through, Outer Write-Through. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t rsvd_16 : 1; /**< [ 16: 16](RO) Reserved 1. */
+ uint32_t reserved_17 : 1;
+ uint32_t rsvd_18 : 1; /**< [ 18: 18](RO) Reserved 1. */
+ uint32_t wxn : 1; /**< [ 19: 19](R/W) Write permission implies XN (Execute Never). This bit can be
+ used to require all memory regions with write permission to be
+ treated as XN.
+ The WXN bit is permitted to be cached in a TLB.
+ 0 = Regions with write permission are not forced to XN.
+ 1 = Regions with write permission are forced to XN. */
+ uint32_t reserved_20_21 : 2;
+ uint32_t rsvd_22 : 1; /**< [ 22: 22](RO) Reserved 1. */
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t reserved_24 : 1;
+ uint32_t ee : 1; /**< [ 25: 25](R/W) Exception Endianness. This bit controls the endianness for:
+ Explicit data accesses at EL3.
+ Stage 1 translation table walks at EL3.
+
+ If an implementation does not provide Big-endian support, this
+ bit is RES0. If it does not provide Little-endian support,
+ this bit is RES1.
+
+ The EE bit is permitted to be cached in a TLB.
+ 0 = Little-endian.
+ 1 = Big-endian. */
+ uint32_t reserved_26_27 : 2;
+ uint32_t rsvd_28_29 : 2; /**< [ 29: 28](RO) Reserved 1. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_sctlr_el3_s cn; */
+};
+typedef union bdk_ap_sctlr_el3 bdk_ap_sctlr_el3_t;
+
+#define BDK_AP_SCTLR_EL3 BDK_AP_SCTLR_EL3_FUNC()
+static inline uint64_t BDK_AP_SCTLR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SCTLR_EL3_FUNC(void)
+{
+ return 0x30601000000ll;
+}
+
+#define typedef_BDK_AP_SCTLR_EL3 bdk_ap_sctlr_el3_t
+#define bustype_BDK_AP_SCTLR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SCTLR_EL3 "AP_SCTLR_EL3"
+#define busnum_BDK_AP_SCTLR_EL3 0
+#define arguments_BDK_AP_SCTLR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sder32_el3
+ *
+ * AP AArch32 Secure Debug Enable Register
+ * Allows access to the AArch32 register SDER from AArch64 state
+ * only. Its value has no effect on execution in AArch64 state.
+ */
+union bdk_ap_sder32_el3
+{
+ uint32_t u;
+ struct bdk_ap_sder32_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t suniden : 1; /**< [ 1: 1](R/W) Secure User Non-Invasive Debug Enable:
+ 0 = Non-invasive debug not permitted in Secure EL0 mode.
+ 1 = Non-invasive debug permitted in Secure EL0 mode. */
+ uint32_t suiden : 1; /**< [ 0: 0](R/W) Secure User Invasive Debug Enable:
+ 0 = Invasive debug not permitted in Secure EL0 mode.
+ 1 = Invasive debug permitted in Secure EL0 mode. */
+#else /* Word 0 - Little Endian */
+ uint32_t suiden : 1; /**< [ 0: 0](R/W) Secure User Invasive Debug Enable:
+ 0 = Invasive debug not permitted in Secure EL0 mode.
+ 1 = Invasive debug permitted in Secure EL0 mode. */
+ uint32_t suniden : 1; /**< [ 1: 1](R/W) Secure User Non-Invasive Debug Enable:
+ 0 = Non-invasive debug not permitted in Secure EL0 mode.
+ 1 = Non-invasive debug permitted in Secure EL0 mode. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_sder32_el3_s cn8; */
+ struct bdk_ap_sder32_el3_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t suniden : 1; /**< [ 1: 1](RAZ) Secure User Non-Invasive Debug Enable:
+ 0 = Non-invasive debug not permitted in Secure EL0 mode.
+ 1 = Non-invasive debug permitted in Secure EL0 mode. */
+ uint32_t suiden : 1; /**< [ 0: 0](RAZ) Secure User Invasive Debug Enable:
+ 0 = Invasive debug not permitted in Secure EL0 mode.
+ 1 = Invasive debug permitted in Secure EL0 mode. */
+#else /* Word 0 - Little Endian */
+ uint32_t suiden : 1; /**< [ 0: 0](RAZ) Secure User Invasive Debug Enable:
+ 0 = Invasive debug not permitted in Secure EL0 mode.
+ 1 = Invasive debug permitted in Secure EL0 mode. */
+ uint32_t suniden : 1; /**< [ 1: 1](RAZ) Secure User Non-Invasive Debug Enable:
+ 0 = Non-invasive debug not permitted in Secure EL0 mode.
+ 1 = Non-invasive debug permitted in Secure EL0 mode. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_sder32_el3 bdk_ap_sder32_el3_t;
+
+#define BDK_AP_SDER32_EL3 BDK_AP_SDER32_EL3_FUNC()
+static inline uint64_t BDK_AP_SDER32_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SDER32_EL3_FUNC(void)
+{
+ return 0x30601010100ll;
+}
+
+#define typedef_BDK_AP_SDER32_EL3 bdk_ap_sder32_el3_t
+#define bustype_BDK_AP_SDER32_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SDER32_EL3 "AP_SDER32_EL3"
+#define busnum_BDK_AP_SDER32_EL3 0
+#define arguments_BDK_AP_SDER32_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sp_el0
+ *
+ * AP Stack Pointer EL0 Register
+ * Holds the stack pointer if AP_SPSel[SP] is 0, or the stack pointer
+ * for EL0 if AP_SPSel[SP] is 1.
+ */
+union bdk_ap_sp_el0
+{
+ uint64_t u;
+ struct bdk_ap_sp_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Stack pointer. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Stack pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_sp_el0_s cn; */
+};
+typedef union bdk_ap_sp_el0 bdk_ap_sp_el0_t;
+
+#define BDK_AP_SP_EL0 BDK_AP_SP_EL0_FUNC()
+static inline uint64_t BDK_AP_SP_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SP_EL0_FUNC(void)
+{
+ return 0x30004010000ll;
+}
+
+#define typedef_BDK_AP_SP_EL0 bdk_ap_sp_el0_t
+#define bustype_BDK_AP_SP_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SP_EL0 "AP_SP_EL0"
+#define busnum_BDK_AP_SP_EL0 0
+#define arguments_BDK_AP_SP_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sp_el1
+ *
+ * AP Stack Pointer EL1 Register
+ * Holds the stack pointer for EL1 if AP_SPSel[SP] is 1 (the stack
+ * pointer selected is SP_ELx).
+ */
+union bdk_ap_sp_el1
+{
+ uint64_t u;
+ struct bdk_ap_sp_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Stack pointer. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Stack pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_sp_el1_s cn; */
+};
+typedef union bdk_ap_sp_el1 bdk_ap_sp_el1_t;
+
+#define BDK_AP_SP_EL1 BDK_AP_SP_EL1_FUNC()
+static inline uint64_t BDK_AP_SP_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SP_EL1_FUNC(void)
+{
+ return 0x30404010000ll;
+}
+
+#define typedef_BDK_AP_SP_EL1 bdk_ap_sp_el1_t
+#define bustype_BDK_AP_SP_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SP_EL1 "AP_SP_EL1"
+#define busnum_BDK_AP_SP_EL1 0
+#define arguments_BDK_AP_SP_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_sp_el2
+ *
+ * AP Stack Pointer EL2 Register
+ * Holds the stack pointer for EL2 if AP_SPSel[SP] is 1 (the stack
+ * pointer selected is SP_ELx).
+ */
+union bdk_ap_sp_el2
+{
+ uint64_t u;
+ struct bdk_ap_sp_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Stack pointer. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Stack pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_sp_el2_s cn; */
+};
+typedef union bdk_ap_sp_el2 bdk_ap_sp_el2_t;
+
+#define BDK_AP_SP_EL2 BDK_AP_SP_EL2_FUNC()
+static inline uint64_t BDK_AP_SP_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SP_EL2_FUNC(void)
+{
+ return 0x30604010000ll;
+}
+
+#define typedef_BDK_AP_SP_EL2 bdk_ap_sp_el2_t
+#define bustype_BDK_AP_SP_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SP_EL2 "AP_SP_EL2"
+#define busnum_BDK_AP_SP_EL2 0
+#define arguments_BDK_AP_SP_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_spsel
+ *
+ * AP Stack Pointer Select Register
+ * Allows the Stack Pointer to be selected between AP_SP_EL0 and
+ * SP_ELx.
+ */
+union bdk_ap_spsel
+{
+ uint32_t u;
+ struct bdk_ap_spsel_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t sp : 1; /**< [ 0: 0](R/W) Stack pointer to use.
+ 0 = Use AP_SP_EL0 at all Exception levels.
+ 1 = Use SP_ELx for Exception level ELx. */
+#else /* Word 0 - Little Endian */
+ uint32_t sp : 1; /**< [ 0: 0](R/W) Stack pointer to use.
+ 0 = Use AP_SP_EL0 at all Exception levels.
+ 1 = Use SP_ELx for Exception level ELx. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_spsel_s cn; */
+};
+typedef union bdk_ap_spsel bdk_ap_spsel_t;
+
+#define BDK_AP_SPSEL BDK_AP_SPSEL_FUNC()
+static inline uint64_t BDK_AP_SPSEL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SPSEL_FUNC(void)
+{
+ return 0x30004020000ll;
+}
+
+#define typedef_BDK_AP_SPSEL bdk_ap_spsel_t
+#define bustype_BDK_AP_SPSEL BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SPSEL "AP_SPSEL"
+#define busnum_BDK_AP_SPSEL 0
+#define arguments_BDK_AP_SPSEL -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_spsr_abt
+ *
+ * AP Saved Program Status Abort-mode Register
+ * Holds the saved processor state when an exception is taken to
+ * Abort mode.
+ * If EL1 does not support execution in AArch32, this register is RES0.
+ */
+union bdk_ap_spsr_abt
+{
+ uint32_t u;
+ struct bdk_ap_spsr_abt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_spsr_abt_s cn; */
+};
+typedef union bdk_ap_spsr_abt bdk_ap_spsr_abt_t;
+
+#define BDK_AP_SPSR_ABT BDK_AP_SPSR_ABT_FUNC()
+static inline uint64_t BDK_AP_SPSR_ABT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SPSR_ABT_FUNC(void)
+{
+ return 0x30404030100ll;
+}
+
+#define typedef_BDK_AP_SPSR_ABT bdk_ap_spsr_abt_t
+#define bustype_BDK_AP_SPSR_ABT BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SPSR_ABT "AP_SPSR_ABT"
+#define busnum_BDK_AP_SPSR_ABT 0
+#define arguments_BDK_AP_SPSR_ABT -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_spsr_el#
+ *
+ * AP Saved Processor State Register
+ * Holds the saved processor state when an exception is taken to
+ * EL*.
+ */
+union bdk_ap_spsr_elx
+{
+ uint32_t u;
+ struct bdk_ap_spsr_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Set to the value of CPSR[N] on taking an exception to Monitor
+ mode, and copied to CPSR[N] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Set to the value of CPSR[Z] on taking an exception to Monitor
+ mode, and copied to CPSR[Z] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Set to the value of CPSR[C] on taking an exception to Monitor
+ mode, and copied to CPSR[C] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t v : 1; /**< [ 28: 28](R/W) Set to the value of CPSR[V] on taking an exception to Monitor
+ mode, and copied to CPSR[V] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t reserved_24_27 : 4;
+ uint32_t uao : 1; /**< [ 23: 23](R/W) User access override. SPSR_EL[23] only. */
+ uint32_t pan : 1; /**< [ 22: 22](R/W) 0 = Has no effect on the translation system.
+ 1 = Disables data read or data write access from EL1 (or EL2
+ when AP_HCR_EL2[E2H] == 1 && AP_HCR_EL2[TGE] == 1) to a virtual
+ address where access to the virtual address at EL0 is
+ permitted at stage 1 by the combination of the AP[1] bit
+ and the APTable[0] bits (if appropriate). That is, when
+ AP[1] == 1 && APTable[0] == 0 for all APTable bits
+ associated with that virtual address.
+
+ The AP_PAN bit has no effect on instruction accesses.
+
+ If access is disabled, then the access will give rise to
+ a stage 1 permission fault, taken in the same way as all
+ other stage 1 permission faults. */
+ uint32_t ss : 1; /**< [ 21: 21](R/W) Software step. Indicates whether software step was
+ enabled when an exception was taken. */
+ uint32_t il : 1; /**< [ 20: 20](R/W) The IL bit is added to process state to indicate that on
+ exception return or as a result of an explicit change of the
+ CPSR mode field in AArch32, an illegal state or mode was
+ indicated, as described in section 3.5.6.3. Its value is
+ reflected in the SPSR when it is set at a time when the
+ process state IL bit was set either:
+
+ - As a result of an UNdefined exception caused by the process
+ state IL bit being set, or
+
+ - Where execution was pre-empted between setting the process
+ state IL bit and an UNdefined exception being taken.
+
+ The IL bit is added as part of the ARMv8 architecture, but
+ applies to execution in both AArch32 and AArch64. It is
+ allocated into bit[20] of the SPSR. It is impossible for
+ software to observe the value 1 in the CPSR in AArch32, or
+ to observe the current Process State value in AArch64. */
+ uint32_t reserved_10_19 : 10;
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t f : 1; /**< [ 6: 6](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t reserved_5 : 1;
+ uint32_t from32 : 1; /**< [ 4: 4](R/W) 0 = Exception came from 64bit
+ 1 = Exception came from 32bit
+ If 32bit is not implemented, then this causes an illegal state
+ exception. */
+ uint32_t el : 2; /**< [ 3: 2](R/W) Current exception level 00 - EL0 01 -EL1, 10 - EL2, 11 - EL3. */
+ uint32_t reserved_1 : 1;
+ uint32_t sp : 1; /**< [ 0: 0](R/W) AArch64 only - Stack Pointer selection - 0 - SP0, 1 - SPx. */
+#else /* Word 0 - Little Endian */
+ uint32_t sp : 1; /**< [ 0: 0](R/W) AArch64 only - Stack Pointer selection - 0 - SP0, 1 - SPx. */
+ uint32_t reserved_1 : 1;
+ uint32_t el : 2; /**< [ 3: 2](R/W) Current exception level 00 - EL0 01 -EL1, 10 - EL2, 11 - EL3. */
+ uint32_t from32 : 1; /**< [ 4: 4](R/W) 0 = Exception came from 64bit
+ 1 = Exception came from 32bit
+ If 32bit is not implemented, then this causes an illegal state
+ exception. */
+ uint32_t reserved_5 : 1;
+ uint32_t f : 1; /**< [ 6: 6](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t reserved_10_19 : 10;
+ uint32_t il : 1; /**< [ 20: 20](R/W) The IL bit is added to process state to indicate that on
+ exception return or as a result of an explicit change of the
+ CPSR mode field in AArch32, an illegal state or mode was
+ indicated, as described in section 3.5.6.3. Its value is
+ reflected in the SPSR when it is set at a time when the
+ process state IL bit was set either:
+
+ - As a result of an UNdefined exception caused by the process
+ state IL bit being set, or
+
+ - Where execution was pre-empted between setting the process
+ state IL bit and an UNdefined exception being taken.
+
+ The IL bit is added as part of the ARMv8 architecture, but
+ applies to execution in both AArch32 and AArch64. It is
+ allocated into bit[20] of the SPSR. It is impossible for
+ software to observe the value 1 in the CPSR in AArch32, or
+ to observe the current Process State value in AArch64. */
+ uint32_t ss : 1; /**< [ 21: 21](R/W) Software step. Indicates whether software step was
+ enabled when an exception was taken. */
+ uint32_t pan : 1; /**< [ 22: 22](R/W) 0 = Has no effect on the translation system.
+ 1 = Disables data read or data write access from EL1 (or EL2
+ when AP_HCR_EL2[E2H] == 1 && AP_HCR_EL2[TGE] == 1) to a virtual
+ address where access to the virtual address at EL0 is
+ permitted at stage 1 by the combination of the AP[1] bit
+ and the APTable[0] bits (if appropriate). That is, when
+ AP[1] == 1 && APTable[0] == 0 for all APTable bits
+ associated with that virtual address.
+
+ The AP_PAN bit has no effect on instruction accesses.
+
+ If access is disabled, then the access will give rise to
+ a stage 1 permission fault, taken in the same way as all
+ other stage 1 permission faults. */
+ uint32_t uao : 1; /**< [ 23: 23](R/W) User access override. SPSR_EL[23] only. */
+ uint32_t reserved_24_27 : 4;
+ uint32_t v : 1; /**< [ 28: 28](R/W) Set to the value of CPSR[V] on taking an exception to Monitor
+ mode, and copied to CPSR[V] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Set to the value of CPSR[C] on taking an exception to Monitor
+ mode, and copied to CPSR[C] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Set to the value of CPSR[Z] on taking an exception to Monitor
+ mode, and copied to CPSR[Z] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Set to the value of CPSR[N] on taking an exception to Monitor
+ mode, and copied to CPSR[N] on executing an exception return
+ operation in Monitor mode. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_spsr_elx_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Set to the value of CPSR[N] on taking an exception to Monitor
+ mode, and copied to CPSR[N] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Set to the value of CPSR[Z] on taking an exception to Monitor
+ mode, and copied to CPSR[Z] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Set to the value of CPSR[C] on taking an exception to Monitor
+ mode, and copied to CPSR[C] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t v : 1; /**< [ 28: 28](R/W) Set to the value of CPSR[V] on taking an exception to Monitor
+ mode, and copied to CPSR[V] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t reserved_23_27 : 5;
+ uint32_t pan : 1; /**< [ 22: 22](R/W) 0 = Has no effect on the translation system.
+ 1 = Disables data read or data write access from EL1 (or EL2
+ when AP_HCR_EL2[E2H] == 1 && AP_HCR_EL2[TGE] == 1) to a virtual
+ address where access to the virtual address at EL0 is
+ permitted at stage 1 by the combination of the AP[1] bit
+ and the APTable[0] bits (if appropriate). That is, when
+ AP[1] == 1 && APTable[0] == 0 for all APTable bits
+ associated with that virtual address.
+
+ The AP_PAN bit has no effect on instruction accesses.
+
+ If access is disabled, then the access will give rise to
+ a stage 1 permission fault, taken in the same way as all
+ other stage 1 permission faults. */
+ uint32_t ss : 1; /**< [ 21: 21](R/W) Software step. Indicates whether software step was
+ enabled when an exception was taken. */
+ uint32_t il : 1; /**< [ 20: 20](R/W) The IL bit is added to process state to indicate that on
+ exception return or as a result of an explicit change of the
+ CPSR mode field in AArch32, an illegal state or mode was
+ indicated, as described in section 3.5.6.3. Its value is
+ reflected in the SPSR when it is set at a time when the
+ process state IL bit was set either:
+
+ - As a result of an UNdefined exception caused by the process
+ state IL bit being set, or
+
+ - Where execution was pre-empted between setting the process
+ state IL bit and an UNdefined exception being taken.
+
+ The IL bit is added as part of the ARMv8 architecture, but
+ applies to execution in both AArch32 and AArch64. It is
+ allocated into bit[20] of the SPSR. It is impossible for
+ software to observe the value 1 in the CPSR in AArch32, or
+ to observe the current Process State value in AArch64. */
+ uint32_t reserved_10_19 : 10;
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t f : 1; /**< [ 6: 6](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t reserved_5 : 1;
+ uint32_t from32 : 1; /**< [ 4: 4](R/W) 0 = Exception came from 64bit
+ 1 = Exception came from 32bit
+ If 32bit is not implemented, then this causes an illegal state
+ exception. */
+ uint32_t el : 2; /**< [ 3: 2](R/W) Current exception level 00 - EL0 01 -EL1, 10 - EL2, 11 - EL3. */
+ uint32_t reserved_1 : 1;
+ uint32_t sp : 1; /**< [ 0: 0](R/W) AArch64 only - Stack Pointer selection - 0 - SP0, 1 - SPx. */
+#else /* Word 0 - Little Endian */
+ uint32_t sp : 1; /**< [ 0: 0](R/W) AArch64 only - Stack Pointer selection - 0 - SP0, 1 - SPx. */
+ uint32_t reserved_1 : 1;
+ uint32_t el : 2; /**< [ 3: 2](R/W) Current exception level 00 - EL0 01 -EL1, 10 - EL2, 11 - EL3. */
+ uint32_t from32 : 1; /**< [ 4: 4](R/W) 0 = Exception came from 64bit
+ 1 = Exception came from 32bit
+ If 32bit is not implemented, then this causes an illegal state
+ exception. */
+ uint32_t reserved_5 : 1;
+ uint32_t f : 1; /**< [ 6: 6](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t i : 1; /**< [ 7: 7](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t aa : 1; /**< [ 8: 8](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t dd : 1; /**< [ 9: 9](R/W) Interrupt masks - can also be accessed as PSTATE[D,A,I,F]. */
+ uint32_t reserved_10_19 : 10;
+ uint32_t il : 1; /**< [ 20: 20](R/W) The IL bit is added to process state to indicate that on
+ exception return or as a result of an explicit change of the
+ CPSR mode field in AArch32, an illegal state or mode was
+ indicated, as described in section 3.5.6.3. Its value is
+ reflected in the SPSR when it is set at a time when the
+ process state IL bit was set either:
+
+ - As a result of an UNdefined exception caused by the process
+ state IL bit being set, or
+
+ - Where execution was pre-empted between setting the process
+ state IL bit and an UNdefined exception being taken.
+
+ The IL bit is added as part of the ARMv8 architecture, but
+ applies to execution in both AArch32 and AArch64. It is
+ allocated into bit[20] of the SPSR. It is impossible for
+ software to observe the value 1 in the CPSR in AArch32, or
+ to observe the current Process State value in AArch64. */
+ uint32_t ss : 1; /**< [ 21: 21](R/W) Software step. Indicates whether software step was
+ enabled when an exception was taken. */
+ uint32_t pan : 1; /**< [ 22: 22](R/W) 0 = Has no effect on the translation system.
+ 1 = Disables data read or data write access from EL1 (or EL2
+ when AP_HCR_EL2[E2H] == 1 && AP_HCR_EL2[TGE] == 1) to a virtual
+ address where access to the virtual address at EL0 is
+ permitted at stage 1 by the combination of the AP[1] bit
+ and the APTable[0] bits (if appropriate). That is, when
+ AP[1] == 1 && APTable[0] == 0 for all APTable bits
+ associated with that virtual address.
+
+ The AP_PAN bit has no effect on instruction accesses.
+
+ If access is disabled, then the access will give rise to
+ a stage 1 permission fault, taken in the same way as all
+ other stage 1 permission faults. */
+ uint32_t reserved_23_27 : 5;
+ uint32_t v : 1; /**< [ 28: 28](R/W) Set to the value of CPSR[V] on taking an exception to Monitor
+ mode, and copied to CPSR[V] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t cc : 1; /**< [ 29: 29](R/W) Set to the value of CPSR[C] on taking an exception to Monitor
+ mode, and copied to CPSR[C] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t z : 1; /**< [ 30: 30](R/W) Set to the value of CPSR[Z] on taking an exception to Monitor
+ mode, and copied to CPSR[Z] on executing an exception return
+ operation in Monitor mode. */
+ uint32_t n : 1; /**< [ 31: 31](R/W) Set to the value of CPSR[N] on taking an exception to Monitor
+ mode, and copied to CPSR[N] on executing an exception return
+ operation in Monitor mode. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_spsr_elx_s cn9; */
+};
+typedef union bdk_ap_spsr_elx bdk_ap_spsr_elx_t;
+
+static inline uint64_t BDK_AP_SPSR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SPSR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=3))
+ return 0x30004000000ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_SPSR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_SPSR_ELX(a) bdk_ap_spsr_elx_t
+#define bustype_BDK_AP_SPSR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SPSR_ELX(a) "AP_SPSR_ELX"
+#define busnum_BDK_AP_SPSR_ELX(a) (a)
+#define arguments_BDK_AP_SPSR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_spsr_el12
+ *
+ * AP Saved Processor State EL2/3 Alias Register
+ * Allows EL2 and EL3 access to SPSR_EL1 when AP_HCR_EL2[E2H]==1.
+ */
+union bdk_ap_spsr_el12
+{
+ uint32_t u;
+ struct bdk_ap_spsr_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_spsr_el12_s cn; */
+};
+typedef union bdk_ap_spsr_el12 bdk_ap_spsr_el12_t;
+
+#define BDK_AP_SPSR_EL12 BDK_AP_SPSR_EL12_FUNC()
+static inline uint64_t BDK_AP_SPSR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SPSR_EL12_FUNC(void)
+{
+ return 0x30504000000ll;
+}
+
+#define typedef_BDK_AP_SPSR_EL12 bdk_ap_spsr_el12_t
+#define bustype_BDK_AP_SPSR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SPSR_EL12 "AP_SPSR_EL12"
+#define busnum_BDK_AP_SPSR_EL12 0
+#define arguments_BDK_AP_SPSR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_spsr_fiq
+ *
+ * AP Saved Program Status FIQ-mode Register
+ * Holds the saved processor state when an exception is taken to
+ * FIQ mode.
+ * If EL1 does not support execution in AArch32, this register is RES0.
+ */
+union bdk_ap_spsr_fiq
+{
+ uint32_t u;
+ struct bdk_ap_spsr_fiq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_spsr_fiq_s cn; */
+};
+typedef union bdk_ap_spsr_fiq bdk_ap_spsr_fiq_t;
+
+#define BDK_AP_SPSR_FIQ BDK_AP_SPSR_FIQ_FUNC()
+static inline uint64_t BDK_AP_SPSR_FIQ_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SPSR_FIQ_FUNC(void)
+{
+ return 0x30404030300ll;
+}
+
+#define typedef_BDK_AP_SPSR_FIQ bdk_ap_spsr_fiq_t
+#define bustype_BDK_AP_SPSR_FIQ BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SPSR_FIQ "AP_SPSR_FIQ"
+#define busnum_BDK_AP_SPSR_FIQ 0
+#define arguments_BDK_AP_SPSR_FIQ -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_spsr_irq
+ *
+ * AP Saved Program Status IRQ-mode Register
+ * Holds the saved processor state when an exception is taken to
+ * IRQ mode.
+ * If EL1 does not support execution in AArch32, this register is RES0.
+ */
+union bdk_ap_spsr_irq
+{
+ uint32_t u;
+ struct bdk_ap_spsr_irq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_spsr_irq_s cn; */
+};
+typedef union bdk_ap_spsr_irq bdk_ap_spsr_irq_t;
+
+#define BDK_AP_SPSR_IRQ BDK_AP_SPSR_IRQ_FUNC()
+static inline uint64_t BDK_AP_SPSR_IRQ_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SPSR_IRQ_FUNC(void)
+{
+ return 0x30404030000ll;
+}
+
+#define typedef_BDK_AP_SPSR_IRQ bdk_ap_spsr_irq_t
+#define bustype_BDK_AP_SPSR_IRQ BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SPSR_IRQ "AP_SPSR_IRQ"
+#define busnum_BDK_AP_SPSR_IRQ 0
+#define arguments_BDK_AP_SPSR_IRQ -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_spsr_und
+ *
+ * AP Saved Program Status Undefined-mode Register
+ * Holds the saved processor state when an exception is taken to
+ * Undefined mode.
+ * If EL1 does not support execution in AArch32, this register is RES0.
+ */
+union bdk_ap_spsr_und
+{
+ uint32_t u;
+ struct bdk_ap_spsr_und_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_spsr_und_s cn; */
+};
+typedef union bdk_ap_spsr_und bdk_ap_spsr_und_t;
+
+#define BDK_AP_SPSR_UND BDK_AP_SPSR_UND_FUNC()
+static inline uint64_t BDK_AP_SPSR_UND_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_SPSR_UND_FUNC(void)
+{
+ return 0x30404030200ll;
+}
+
+#define typedef_BDK_AP_SPSR_UND bdk_ap_spsr_und_t
+#define bustype_BDK_AP_SPSR_UND BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_SPSR_UND "AP_SPSR_UND"
+#define busnum_BDK_AP_SPSR_UND 0
+#define arguments_BDK_AP_SPSR_UND -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tcr_el1
+ *
+ * AP Translation Control EL1 Register
+ * Determines which of the Translation Table Base Registers
+ * defined the base address for a translation table walk required
+ * for the stage 1 translation of a memory access from EL0 or
+ * EL1. Also controls the translation table format and holds
+ * cacheability and shareability information.
+ */
+union bdk_ap_tcr_el1
+{
+ uint64_t u;
+ struct bdk_ap_tcr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t had1 : 1; /**< [ 42: 42](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD1(bit[42]): Hierarchical Attribute Disable for the TTBR1 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t had0 : 1; /**< [ 41: 41](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD0(bit[41]): Hierarchical Attribute Disable for the TTBR0 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t reserved_39_40 : 2;
+ uint64_t tbi1 : 1; /**< [ 38: 38](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR1_EL1 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL0 and EL1 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR1_EL1. It has an effect whether the EL1&0 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI1 is 1 and bit [55] of the target address is 1, caused
+ by:
+
+ A branch or procedure return within EL0 or EL1.
+
+ An exception taken to EL1.
+
+ An exception return to EL0 or EL1.
+
+ In these cases bits [63:56] of the address are also set to 1
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t tbi0 : 1; /**< [ 37: 37](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL1 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL0 and EL1 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR0_EL1. It has an effect whether the EL1&0 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI0 is 1 and bit [55] of the target address is 0, caused
+ by:
+
+ A branch or procedure return within EL0 or EL1.
+
+ An exception taken to EL1.
+
+ An exception return to EL0 or EL1.
+
+ In these cases bits [63:56] of the address are also set to 0
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t as : 1; /**< [ 36: 36](R/W) ASID Size.
+ If the implementation has only 8 bits of ASID, this field is
+ RES0.
+ 0 = 8 bit - the upper 8 bits of AP_TTBR0_EL1 and AP_TTBR1_EL1 are
+ ignored by hardware for every purpose except reading back the
+ register, and are treated as if they are all zeros for when
+ used for allocation and matching entries in the TLB.
+ 1 = 16 bit - the upper 16 bits of AP_TTBR0_EL1 and AP_TTBR1_EL1 are used
+ for allocation and matching in the TLB. */
+ uint64_t reserved_35 : 1;
+ uint64_t ips : 3; /**< [ 34: 32](R/W) Intermediate Physical Address Size.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint64_t tg1 : 2; /**< [ 31: 30](R/W) AP_TTBR1_EL1 Granule size.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x1 = 16KB
+ 0x2 = 4KB
+ 0x3 = 64KB */
+ uint64_t sh1 : 2; /**< [ 29: 28](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR1_EL1.
+ 0x0 = Non-shareable
+ 0x2 = Outer Shareable
+ 0x3 = Inner Shareable */
+ uint64_t orgn1 : 2; /**< [ 27: 26](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL1.
+ 0x0 = Normal memory, Outer Non-cacheable
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable
+ 0x2 = Normal memory, Outer Write-Through Cacheable
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable */
+ uint64_t irgn1 : 2; /**< [ 25: 24](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL1.
+ 0x0 = Normal memory, Inner Non-cacheable
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable
+ 0x2 = Normal memory, Inner Write-Through Cacheable
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable */
+ uint64_t epd1 : 1; /**< [ 23: 23](R/W) Translation table walk disable for translations using
+ AP_TTBR1_EL1. This bit controls whether a translation table walk
+ is performed on a TLB miss, for an address that is translated
+ using AP_TTBR1_EL1. The encoding of this bit is:
+ 0 = Perform translation table walks using AP_TTBR1_EL1.
+ 1 = A TLB miss on an address that is translated using AP_TTBR1_EL1
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t a1 : 1; /**< [ 22: 22](R/W) Selects whether AP_TTBR0_EL1 or AP_TTBR1_EL1 defines the ASID.
+ 0 = AP_TTBR0_EL1[ASID] defines the ASID.
+ 1 = AP_TTBR1_EL1[ASID] defines the ASID. */
+ uint64_t t1sz : 6; /**< [ 21: 16](R/W) The size offset of the memory region addressed by AP_TTBR1_EL1.
+ The region size is 22^(64-T1SZ) bytes.
+ The maximum and minimum possible values for T1SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint64_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL1.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL1.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL1.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t epd0 : 1; /**< [ 7: 7](R/W) Translation table walk disable for translations using TTBR0.
+ This bit controls whether a translation table walk is
+ performed on a TLB miss, for an address that is translated
+ using TTBR0.
+ 0 = Perform translation table walks using TTBR0.
+ 1 = A TLB miss on an address that is translated using TTBR0
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t reserved_6 : 1;
+ uint64_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL1.
+ The region size is 2^(64-T0SZ) bytes.
+
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint64_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL1.
+ The region size is 2^(64-T0SZ) bytes.
+
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t reserved_6 : 1;
+ uint64_t epd0 : 1; /**< [ 7: 7](R/W) Translation table walk disable for translations using TTBR0.
+ This bit controls whether a translation table walk is
+ performed on a TLB miss, for an address that is translated
+ using TTBR0.
+ 0 = Perform translation table walks using TTBR0.
+ 1 = A TLB miss on an address that is translated using TTBR0
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL1.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL1.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL1.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint64_t t1sz : 6; /**< [ 21: 16](R/W) The size offset of the memory region addressed by AP_TTBR1_EL1.
+ The region size is 22^(64-T1SZ) bytes.
+ The maximum and minimum possible values for T1SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t a1 : 1; /**< [ 22: 22](R/W) Selects whether AP_TTBR0_EL1 or AP_TTBR1_EL1 defines the ASID.
+ 0 = AP_TTBR0_EL1[ASID] defines the ASID.
+ 1 = AP_TTBR1_EL1[ASID] defines the ASID. */
+ uint64_t epd1 : 1; /**< [ 23: 23](R/W) Translation table walk disable for translations using
+ AP_TTBR1_EL1. This bit controls whether a translation table walk
+ is performed on a TLB miss, for an address that is translated
+ using AP_TTBR1_EL1. The encoding of this bit is:
+ 0 = Perform translation table walks using AP_TTBR1_EL1.
+ 1 = A TLB miss on an address that is translated using AP_TTBR1_EL1
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t irgn1 : 2; /**< [ 25: 24](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL1.
+ 0x0 = Normal memory, Inner Non-cacheable
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable
+ 0x2 = Normal memory, Inner Write-Through Cacheable
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable */
+ uint64_t orgn1 : 2; /**< [ 27: 26](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL1.
+ 0x0 = Normal memory, Outer Non-cacheable
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable
+ 0x2 = Normal memory, Outer Write-Through Cacheable
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable */
+ uint64_t sh1 : 2; /**< [ 29: 28](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR1_EL1.
+ 0x0 = Non-shareable
+ 0x2 = Outer Shareable
+ 0x3 = Inner Shareable */
+ uint64_t tg1 : 2; /**< [ 31: 30](R/W) AP_TTBR1_EL1 Granule size.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x1 = 16KB
+ 0x2 = 4KB
+ 0x3 = 64KB */
+ uint64_t ips : 3; /**< [ 34: 32](R/W) Intermediate Physical Address Size.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint64_t reserved_35 : 1;
+ uint64_t as : 1; /**< [ 36: 36](R/W) ASID Size.
+ If the implementation has only 8 bits of ASID, this field is
+ RES0.
+ 0 = 8 bit - the upper 8 bits of AP_TTBR0_EL1 and AP_TTBR1_EL1 are
+ ignored by hardware for every purpose except reading back the
+ register, and are treated as if they are all zeros for when
+ used for allocation and matching entries in the TLB.
+ 1 = 16 bit - the upper 16 bits of AP_TTBR0_EL1 and AP_TTBR1_EL1 are used
+ for allocation and matching in the TLB. */
+ uint64_t tbi0 : 1; /**< [ 37: 37](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL1 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL0 and EL1 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR0_EL1. It has an effect whether the EL1&0 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI0 is 1 and bit [55] of the target address is 0, caused
+ by:
+
+ A branch or procedure return within EL0 or EL1.
+
+ An exception taken to EL1.
+
+ An exception return to EL0 or EL1.
+
+ In these cases bits [63:56] of the address are also set to 0
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t tbi1 : 1; /**< [ 38: 38](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR1_EL1 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL0 and EL1 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR1_EL1. It has an effect whether the EL1&0 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI1 is 1 and bit [55] of the target address is 1, caused
+ by:
+
+ A branch or procedure return within EL0 or EL1.
+
+ An exception taken to EL1.
+
+ An exception return to EL0 or EL1.
+
+ In these cases bits [63:56] of the address are also set to 1
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t reserved_39_40 : 2;
+ uint64_t had0 : 1; /**< [ 41: 41](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD0(bit[41]): Hierarchical Attribute Disable for the TTBR0 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t had1 : 1; /**< [ 42: 42](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD1(bit[42]): Hierarchical Attribute Disable for the TTBR1 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_tcr_el1_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t had1 : 1; /**< [ 42: 42](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD1(bit[42]): Hierarchical Attribute Disable for the TTBR1 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t had0 : 1; /**< [ 41: 41](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD0(bit[41]): Hierarchical Attribute Disable for the TTBR0 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t reserved_40 : 1;
+ uint64_t reserved_39 : 1;
+ uint64_t tbi1 : 1; /**< [ 38: 38](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR1_EL1 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL0 and EL1 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR1_EL1. It has an effect whether the EL1&0 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI1 is 1 and bit [55] of the target address is 1, caused
+ by:
+
+ A branch or procedure return within EL0 or EL1.
+
+ An exception taken to EL1.
+
+ An exception return to EL0 or EL1.
+
+ In these cases bits [63:56] of the address are also set to 1
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t tbi0 : 1; /**< [ 37: 37](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL1 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL0 and EL1 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR0_EL1. It has an effect whether the EL1&0 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI0 is 1 and bit [55] of the target address is 0, caused
+ by:
+
+ A branch or procedure return within EL0 or EL1.
+
+ An exception taken to EL1.
+
+ An exception return to EL0 or EL1.
+
+ In these cases bits [63:56] of the address are also set to 0
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t as : 1; /**< [ 36: 36](R/W) ASID Size.
+ If the implementation has only 8 bits of ASID, this field is
+ RES0.
+ 0 = 8 bit - the upper 8 bits of AP_TTBR0_EL1 and AP_TTBR1_EL1 are
+ ignored by hardware for every purpose except reading back the
+ register, and are treated as if they are all zeros for when
+ used for allocation and matching entries in the TLB.
+ 1 = 16 bit - the upper 16 bits of AP_TTBR0_EL1 and AP_TTBR1_EL1 are used
+ for allocation and matching in the TLB. */
+ uint64_t reserved_35 : 1;
+ uint64_t ips : 3; /**< [ 34: 32](R/W) Intermediate Physical Address Size.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint64_t tg1 : 2; /**< [ 31: 30](R/W) AP_TTBR1_EL1 Granule size.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x1 = 16KB
+ 0x2 = 4KB
+ 0x3 = 64KB */
+ uint64_t sh1 : 2; /**< [ 29: 28](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR1_EL1.
+ 0x0 = Non-shareable
+ 0x2 = Outer Shareable
+ 0x3 = Inner Shareable */
+ uint64_t orgn1 : 2; /**< [ 27: 26](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL1.
+ 0x0 = Normal memory, Outer Non-cacheable
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable
+ 0x2 = Normal memory, Outer Write-Through Cacheable
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable */
+ uint64_t irgn1 : 2; /**< [ 25: 24](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL1.
+ 0x0 = Normal memory, Inner Non-cacheable
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable
+ 0x2 = Normal memory, Inner Write-Through Cacheable
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable */
+ uint64_t epd1 : 1; /**< [ 23: 23](R/W) Translation table walk disable for translations using
+ AP_TTBR1_EL1. This bit controls whether a translation table walk
+ is performed on a TLB miss, for an address that is translated
+ using AP_TTBR1_EL1. The encoding of this bit is:
+ 0 = Perform translation table walks using AP_TTBR1_EL1.
+ 1 = A TLB miss on an address that is translated using AP_TTBR1_EL1
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t a1 : 1; /**< [ 22: 22](R/W) Selects whether AP_TTBR0_EL1 or AP_TTBR1_EL1 defines the ASID.
+ 0 = AP_TTBR0_EL1[ASID] defines the ASID.
+ 1 = AP_TTBR1_EL1[ASID] defines the ASID. */
+ uint64_t t1sz : 6; /**< [ 21: 16](R/W) The size offset of the memory region addressed by AP_TTBR1_EL1.
+ The region size is 22^(64-T1SZ) bytes.
+ The maximum and minimum possible values for T1SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint64_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL1.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL1.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL1.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t epd0 : 1; /**< [ 7: 7](R/W) Translation table walk disable for translations using TTBR0.
+ This bit controls whether a translation table walk is
+ performed on a TLB miss, for an address that is translated
+ using TTBR0.
+ 0 = Perform translation table walks using TTBR0.
+ 1 = A TLB miss on an address that is translated using TTBR0
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t reserved_6 : 1;
+ uint64_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL1.
+ The region size is 2^(64-T0SZ) bytes.
+
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint64_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL1.
+ The region size is 2^(64-T0SZ) bytes.
+
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t reserved_6 : 1;
+ uint64_t epd0 : 1; /**< [ 7: 7](R/W) Translation table walk disable for translations using TTBR0.
+ This bit controls whether a translation table walk is
+ performed on a TLB miss, for an address that is translated
+ using TTBR0.
+ 0 = Perform translation table walks using TTBR0.
+ 1 = A TLB miss on an address that is translated using TTBR0
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL1.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL1.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL1.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint64_t t1sz : 6; /**< [ 21: 16](R/W) The size offset of the memory region addressed by AP_TTBR1_EL1.
+ The region size is 22^(64-T1SZ) bytes.
+ The maximum and minimum possible values for T1SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t a1 : 1; /**< [ 22: 22](R/W) Selects whether AP_TTBR0_EL1 or AP_TTBR1_EL1 defines the ASID.
+ 0 = AP_TTBR0_EL1[ASID] defines the ASID.
+ 1 = AP_TTBR1_EL1[ASID] defines the ASID. */
+ uint64_t epd1 : 1; /**< [ 23: 23](R/W) Translation table walk disable for translations using
+ AP_TTBR1_EL1. This bit controls whether a translation table walk
+ is performed on a TLB miss, for an address that is translated
+ using AP_TTBR1_EL1. The encoding of this bit is:
+ 0 = Perform translation table walks using AP_TTBR1_EL1.
+ 1 = A TLB miss on an address that is translated using AP_TTBR1_EL1
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t irgn1 : 2; /**< [ 25: 24](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL1.
+ 0x0 = Normal memory, Inner Non-cacheable
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable
+ 0x2 = Normal memory, Inner Write-Through Cacheable
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable */
+ uint64_t orgn1 : 2; /**< [ 27: 26](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL1.
+ 0x0 = Normal memory, Outer Non-cacheable
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable
+ 0x2 = Normal memory, Outer Write-Through Cacheable
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable */
+ uint64_t sh1 : 2; /**< [ 29: 28](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR1_EL1.
+ 0x0 = Non-shareable
+ 0x2 = Outer Shareable
+ 0x3 = Inner Shareable */
+ uint64_t tg1 : 2; /**< [ 31: 30](R/W) AP_TTBR1_EL1 Granule size.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x1 = 16KB
+ 0x2 = 4KB
+ 0x3 = 64KB */
+ uint64_t ips : 3; /**< [ 34: 32](R/W) Intermediate Physical Address Size.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint64_t reserved_35 : 1;
+ uint64_t as : 1; /**< [ 36: 36](R/W) ASID Size.
+ If the implementation has only 8 bits of ASID, this field is
+ RES0.
+ 0 = 8 bit - the upper 8 bits of AP_TTBR0_EL1 and AP_TTBR1_EL1 are
+ ignored by hardware for every purpose except reading back the
+ register, and are treated as if they are all zeros for when
+ used for allocation and matching entries in the TLB.
+ 1 = 16 bit - the upper 16 bits of AP_TTBR0_EL1 and AP_TTBR1_EL1 are used
+ for allocation and matching in the TLB. */
+ uint64_t tbi0 : 1; /**< [ 37: 37](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL1 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL0 and EL1 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR0_EL1. It has an effect whether the EL1&0 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI0 is 1 and bit [55] of the target address is 0, caused
+ by:
+
+ A branch or procedure return within EL0 or EL1.
+
+ An exception taken to EL1.
+
+ An exception return to EL0 or EL1.
+
+ In these cases bits [63:56] of the address are also set to 0
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t tbi1 : 1; /**< [ 38: 38](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR1_EL1 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL0 and EL1 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR1_EL1. It has an effect whether the EL1&0 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI1 is 1 and bit [55] of the target address is 1, caused
+ by:
+
+ A branch or procedure return within EL0 or EL1.
+
+ An exception taken to EL1.
+
+ An exception return to EL0 or EL1.
+
+ In these cases bits [63:56] of the address are also set to 1
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t reserved_39 : 1;
+ uint64_t reserved_40 : 1;
+ uint64_t had0 : 1; /**< [ 41: 41](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD0(bit[41]): Hierarchical Attribute Disable for the TTBR0 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t had1 : 1; /**< [ 42: 42](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD1(bit[42]): Hierarchical Attribute Disable for the TTBR1 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_tcr_el1 bdk_ap_tcr_el1_t;
+
+#define BDK_AP_TCR_EL1 BDK_AP_TCR_EL1_FUNC()
+static inline uint64_t BDK_AP_TCR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TCR_EL1_FUNC(void)
+{
+ return 0x30002000200ll;
+}
+
+#define typedef_BDK_AP_TCR_EL1 bdk_ap_tcr_el1_t
+#define bustype_BDK_AP_TCR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TCR_EL1 "AP_TCR_EL1"
+#define busnum_BDK_AP_TCR_EL1 0
+#define arguments_BDK_AP_TCR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tcr_el12
+ *
+ * AP Translation Control EL1/2 Register
+ * Alias of AP_TCR_EL1 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_tcr_el12
+{
+ uint64_t u;
+ struct bdk_ap_tcr_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_tcr_el12_s cn; */
+};
+typedef union bdk_ap_tcr_el12 bdk_ap_tcr_el12_t;
+
+#define BDK_AP_TCR_EL12 BDK_AP_TCR_EL12_FUNC()
+static inline uint64_t BDK_AP_TCR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TCR_EL12_FUNC(void)
+{
+ return 0x30502000200ll;
+}
+
+#define typedef_BDK_AP_TCR_EL12 bdk_ap_tcr_el12_t
+#define bustype_BDK_AP_TCR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TCR_EL12 "AP_TCR_EL12"
+#define busnum_BDK_AP_TCR_EL12 0
+#define arguments_BDK_AP_TCR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tcr_el2
+ *
+ * AP Translation Control Non-E2H (EL2) Register
+ * Controls translation table walks required for the stage 1
+ * translation of memory accesses from EL2, and holds
+ * cacheability and shareability information for the accesses.
+ *
+ * This register is at the same select as AP_TCR_EL2_E2H and is used when E2H=0.
+ */
+union bdk_ap_tcr_el2
+{
+ uint32_t u;
+ struct bdk_ap_tcr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t had : 1; /**< [ 24: 24](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD (bit[24]): Hierarchical Attribute Disable.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t reserved_21_22 : 2;
+ uint32_t tbi : 1; /**< [ 20: 20](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL3 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL3 using AArch64 where
+ the address would be translated by tables pointed to by
+ AP_TTBR0_EL3. It has an effect whether the EL3 translation regime
+ is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI is 1, caused by:
+ * A branch or procedure return within EL3.
+ * A exception taken to EL3.
+ * An exception return to EL3.
+
+ In these cases bits [63:56] of the address are set to 0 before
+ it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint32_t reserved_19 : 1;
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ The reserved values behave in the same way as the0b101
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL3.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL3.
+ The region size is 22^(64-T0SZ) bytes.
+
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL3.
+ The region size is 22^(64-T0SZ) bytes.
+
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL3.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ The reserved values behave in the same way as the0b101
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t reserved_19 : 1;
+ uint32_t tbi : 1; /**< [ 20: 20](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL3 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL3 using AArch64 where
+ the address would be translated by tables pointed to by
+ AP_TTBR0_EL3. It has an effect whether the EL3 translation regime
+ is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI is 1, caused by:
+ * A branch or procedure return within EL3.
+ * A exception taken to EL3.
+ * An exception return to EL3.
+
+ In these cases bits [63:56] of the address are set to 0 before
+ it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint32_t reserved_21_22 : 2;
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t had : 1; /**< [ 24: 24](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD (bit[24]): Hierarchical Attribute Disable.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_tcr_el2_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t had : 1; /**< [ 24: 24](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD (bit[24]): Hierarchical Attribute Disable.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t reserved_22 : 1;
+ uint32_t reserved_21 : 1;
+ uint32_t tbi : 1; /**< [ 20: 20](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL3 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL3 using AArch64 where
+ the address would be translated by tables pointed to by
+ AP_TTBR0_EL3. It has an effect whether the EL3 translation regime
+ is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI is 1, caused by:
+ * A branch or procedure return within EL3.
+ * A exception taken to EL3.
+ * An exception return to EL3.
+
+ In these cases bits [63:56] of the address are set to 0 before
+ it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint32_t reserved_19 : 1;
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ The reserved values behave in the same way as the0b101
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL3.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL3.
+ The region size is 22^(64-T0SZ) bytes.
+
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL3.
+ The region size is 22^(64-T0SZ) bytes.
+
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL3.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ The reserved values behave in the same way as the0b101
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t reserved_19 : 1;
+ uint32_t tbi : 1; /**< [ 20: 20](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL3 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL3 using AArch64 where
+ the address would be translated by tables pointed to by
+ AP_TTBR0_EL3. It has an effect whether the EL3 translation regime
+ is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI is 1, caused by:
+ * A branch or procedure return within EL3.
+ * A exception taken to EL3.
+ * An exception return to EL3.
+
+ In these cases bits [63:56] of the address are set to 0 before
+ it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint32_t reserved_21 : 1;
+ uint32_t reserved_22 : 1;
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t had : 1; /**< [ 24: 24](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD (bit[24]): Hierarchical Attribute Disable.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_tcr_el2 bdk_ap_tcr_el2_t;
+
+#define BDK_AP_TCR_EL2 BDK_AP_TCR_EL2_FUNC()
+static inline uint64_t BDK_AP_TCR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TCR_EL2_FUNC(void)
+{
+ return 0x30402000200ll;
+}
+
+#define typedef_BDK_AP_TCR_EL2 bdk_ap_tcr_el2_t
+#define bustype_BDK_AP_TCR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TCR_EL2 "AP_TCR_EL2"
+#define busnum_BDK_AP_TCR_EL2 0
+#define arguments_BDK_AP_TCR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tcr_el2_e2h
+ *
+ * AP Translation Control EL2 E2H (v8.1) Register
+ * [v8.1] Determines which of the Translation Table Base Registers
+ * defined the base address for a translation table walk required
+ * for the stage 1 translation of a memory access from EL2. Also
+ * controls the translation table format and holds cacheability and
+ * shareability information.
+ *
+ * This register is at the same select as AP_TCR_EL2 and is used when E2H=1.
+ */
+union bdk_ap_tcr_el2_e2h
+{
+ uint64_t u;
+ struct bdk_ap_tcr_el2_e2h_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t had1 : 1; /**< [ 42: 42](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD1(bit[42]): Hierarchical Attribute Disable for the TTBR1 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t had0 : 1; /**< [ 41: 41](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD0(bit[41]): Hierarchical Attribute Disable for the TTBR0 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t reserved_39_40 : 2;
+ uint64_t tbi1 : 1; /**< [ 38: 38](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR1_EL2 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL2 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR1_EL2. It has an effect whether the EL2 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI1 is 1 and bit [55] of the target address is 1, caused
+ by:
+ * A branch or procedure return within EL2.
+ * An exception taken to EL2.
+ * An exception return to EL2.
+
+ In these cases bits [63:56] of the address are also set to 1
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t tbi0 : 1; /**< [ 37: 37](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL2 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL2 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR0_EL2. It has an effect whether the EL2 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI0 is 1 and bit [55] of the target address is 0, caused
+ by:
+ * A branch or procedure return within EL2.
+ * An exception taken to EL2.
+ * An exception return to EL2.
+
+ In these cases bits [63:56] of the address are also set to 0
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t as : 1; /**< [ 36: 36](R/W) ASID Size.
+ If the implementation has only 8 bits of ASID, this field is
+ RES0.
+ 0 = 8 bit - the upper 8 bits of AP_TTBR0_EL2 and AP_TTBR1_EL2 are
+ ignored by hardware for every purpose except reading back the
+ register, and are treated as if they are all zeros for when
+ used for allocation and matching entries in the TLB.
+ 1 = 16 bit - the upper 16 bits of AP_TTBR0_EL2 and AP_TTBR1_EL2 are used
+ for allocation and matching in the TLB. */
+ uint64_t reserved_35 : 1;
+ uint64_t ips : 3; /**< [ 34: 32](R/W) Intermediate Physical Address Size.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint64_t tg1 : 2; /**< [ 31: 30](R/W) AP_TTBR1_EL2 Granule size.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x1 = 16KB.
+ 0x2 = 4KB.
+ 0x3 = 64KB. */
+ uint64_t sh1 : 2; /**< [ 29: 28](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR1_EL2.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t orgn1 : 2; /**< [ 27: 26](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL2.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t irgn1 : 2; /**< [ 25: 24](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL2.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t epd1 : 1; /**< [ 23: 23](R/W) Translation table walk disable for translations using
+ AP_TTBR1_EL2. This bit controls whether a translation table walk
+ is performed on a TLB miss, for an address that is translated
+ using AP_TTBR1_EL2. The encoding of this bit is:
+ 0 = Perform translation table walks using AP_TTBR1_EL2.
+ 1 = A TLB miss on an address that is translated using AP_TTBR1_EL2
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t a1 : 1; /**< [ 22: 22](R/W) Selects whether AP_TTBR0_EL2 or AP_TTBR1_EL2 defines the ASID. The
+ encoding of this bit is:
+ 0 = AP_TTBR0_EL2[ASID] defines the ASID.
+ 1 = AP_TTBR1_EL2[ASID] defines the ASID. */
+ uint64_t t1sz : 6; /**< [ 21: 16](R/W) The size offset of the memory region addressed by AP_TTBR1_EL2.
+ The region size is 22^(64-T1SZ) bytes.
+ The maximum and minimum possible values for T1SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint64_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL2.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL2.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL2.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t epd0 : 1; /**< [ 7: 7](R/W) Translation table walk disable for translations using TTBR0.
+ This bit controls whether a translation table walk is
+ performed on a TLB miss, for an address that is translated
+ using TTBR0. The encoding of this bit is:
+ 0 = Perform translation table walks using TTBR0.
+ 1 = A TLB miss on an address that is translated using TTBR0
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t reserved_6 : 1;
+ uint64_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL2.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint64_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL2.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t reserved_6 : 1;
+ uint64_t epd0 : 1; /**< [ 7: 7](R/W) Translation table walk disable for translations using TTBR0.
+ This bit controls whether a translation table walk is
+ performed on a TLB miss, for an address that is translated
+ using TTBR0. The encoding of this bit is:
+ 0 = Perform translation table walks using TTBR0.
+ 1 = A TLB miss on an address that is translated using TTBR0
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL2.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL2.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL2.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint64_t t1sz : 6; /**< [ 21: 16](R/W) The size offset of the memory region addressed by AP_TTBR1_EL2.
+ The region size is 22^(64-T1SZ) bytes.
+ The maximum and minimum possible values for T1SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t a1 : 1; /**< [ 22: 22](R/W) Selects whether AP_TTBR0_EL2 or AP_TTBR1_EL2 defines the ASID. The
+ encoding of this bit is:
+ 0 = AP_TTBR0_EL2[ASID] defines the ASID.
+ 1 = AP_TTBR1_EL2[ASID] defines the ASID. */
+ uint64_t epd1 : 1; /**< [ 23: 23](R/W) Translation table walk disable for translations using
+ AP_TTBR1_EL2. This bit controls whether a translation table walk
+ is performed on a TLB miss, for an address that is translated
+ using AP_TTBR1_EL2. The encoding of this bit is:
+ 0 = Perform translation table walks using AP_TTBR1_EL2.
+ 1 = A TLB miss on an address that is translated using AP_TTBR1_EL2
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t irgn1 : 2; /**< [ 25: 24](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL2.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t orgn1 : 2; /**< [ 27: 26](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL2.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t sh1 : 2; /**< [ 29: 28](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR1_EL2.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t tg1 : 2; /**< [ 31: 30](R/W) AP_TTBR1_EL2 Granule size.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x1 = 16KB.
+ 0x2 = 4KB.
+ 0x3 = 64KB. */
+ uint64_t ips : 3; /**< [ 34: 32](R/W) Intermediate Physical Address Size.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint64_t reserved_35 : 1;
+ uint64_t as : 1; /**< [ 36: 36](R/W) ASID Size.
+ If the implementation has only 8 bits of ASID, this field is
+ RES0.
+ 0 = 8 bit - the upper 8 bits of AP_TTBR0_EL2 and AP_TTBR1_EL2 are
+ ignored by hardware for every purpose except reading back the
+ register, and are treated as if they are all zeros for when
+ used for allocation and matching entries in the TLB.
+ 1 = 16 bit - the upper 16 bits of AP_TTBR0_EL2 and AP_TTBR1_EL2 are used
+ for allocation and matching in the TLB. */
+ uint64_t tbi0 : 1; /**< [ 37: 37](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL2 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL2 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR0_EL2. It has an effect whether the EL2 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI0 is 1 and bit [55] of the target address is 0, caused
+ by:
+ * A branch or procedure return within EL2.
+ * An exception taken to EL2.
+ * An exception return to EL2.
+
+ In these cases bits [63:56] of the address are also set to 0
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t tbi1 : 1; /**< [ 38: 38](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR1_EL2 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL2 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR1_EL2. It has an effect whether the EL2 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI1 is 1 and bit [55] of the target address is 1, caused
+ by:
+ * A branch or procedure return within EL2.
+ * An exception taken to EL2.
+ * An exception return to EL2.
+
+ In these cases bits [63:56] of the address are also set to 1
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t reserved_39_40 : 2;
+ uint64_t had0 : 1; /**< [ 41: 41](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD0(bit[41]): Hierarchical Attribute Disable for the TTBR0 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t had1 : 1; /**< [ 42: 42](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD1(bit[42]): Hierarchical Attribute Disable for the TTBR1 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_tcr_el2_e2h_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t had1 : 1; /**< [ 42: 42](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD1(bit[42]): Hierarchical Attribute Disable for the TTBR1 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t had0 : 1; /**< [ 41: 41](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD0(bit[41]): Hierarchical Attribute Disable for the TTBR0 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t reserved_40 : 1;
+ uint64_t reserved_39 : 1;
+ uint64_t tbi1 : 1; /**< [ 38: 38](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR1_EL2 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL2 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR1_EL2. It has an effect whether the EL2 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI1 is 1 and bit [55] of the target address is 1, caused
+ by:
+ * A branch or procedure return within EL2.
+ * An exception taken to EL2.
+ * An exception return to EL2.
+
+ In these cases bits [63:56] of the address are also set to 1
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t tbi0 : 1; /**< [ 37: 37](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL2 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL2 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR0_EL2. It has an effect whether the EL2 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI0 is 1 and bit [55] of the target address is 0, caused
+ by:
+ * A branch or procedure return within EL2.
+ * An exception taken to EL2.
+ * An exception return to EL2.
+
+ In these cases bits [63:56] of the address are also set to 0
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t as : 1; /**< [ 36: 36](R/W) ASID Size.
+ If the implementation has only 8 bits of ASID, this field is
+ RES0.
+ 0 = 8 bit - the upper 8 bits of AP_TTBR0_EL2 and AP_TTBR1_EL2 are
+ ignored by hardware for every purpose except reading back the
+ register, and are treated as if they are all zeros for when
+ used for allocation and matching entries in the TLB.
+ 1 = 16 bit - the upper 16 bits of AP_TTBR0_EL2 and AP_TTBR1_EL2 are used
+ for allocation and matching in the TLB. */
+ uint64_t reserved_35 : 1;
+ uint64_t ips : 3; /**< [ 34: 32](R/W) Intermediate Physical Address Size.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint64_t tg1 : 2; /**< [ 31: 30](R/W) AP_TTBR1_EL2 Granule size.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x1 = 16KB.
+ 0x2 = 4KB.
+ 0x3 = 64KB. */
+ uint64_t sh1 : 2; /**< [ 29: 28](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR1_EL2.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t orgn1 : 2; /**< [ 27: 26](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL2.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t irgn1 : 2; /**< [ 25: 24](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL2.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t epd1 : 1; /**< [ 23: 23](R/W) Translation table walk disable for translations using
+ AP_TTBR1_EL2. This bit controls whether a translation table walk
+ is performed on a TLB miss, for an address that is translated
+ using AP_TTBR1_EL2. The encoding of this bit is:
+ 0 = Perform translation table walks using AP_TTBR1_EL2.
+ 1 = A TLB miss on an address that is translated using AP_TTBR1_EL2
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t a1 : 1; /**< [ 22: 22](R/W) Selects whether AP_TTBR0_EL2 or AP_TTBR1_EL2 defines the ASID. The
+ encoding of this bit is:
+ 0 = AP_TTBR0_EL2[ASID] defines the ASID.
+ 1 = AP_TTBR1_EL2[ASID] defines the ASID. */
+ uint64_t t1sz : 6; /**< [ 21: 16](R/W) The size offset of the memory region addressed by AP_TTBR1_EL2.
+ The region size is 22^(64-T1SZ) bytes.
+ The maximum and minimum possible values for T1SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint64_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL2.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL2.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL2.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t epd0 : 1; /**< [ 7: 7](R/W) Translation table walk disable for translations using TTBR0.
+ This bit controls whether a translation table walk is
+ performed on a TLB miss, for an address that is translated
+ using TTBR0. The encoding of this bit is:
+ 0 = Perform translation table walks using TTBR0.
+ 1 = A TLB miss on an address that is translated using TTBR0
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t reserved_6 : 1;
+ uint64_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL2.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint64_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL2.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t reserved_6 : 1;
+ uint64_t epd0 : 1; /**< [ 7: 7](R/W) Translation table walk disable for translations using TTBR0.
+ This bit controls whether a translation table walk is
+ performed on a TLB miss, for an address that is translated
+ using TTBR0. The encoding of this bit is:
+ 0 = Perform translation table walks using TTBR0.
+ 1 = A TLB miss on an address that is translated using TTBR0
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL2.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL2.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL2.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint64_t t1sz : 6; /**< [ 21: 16](R/W) The size offset of the memory region addressed by AP_TTBR1_EL2.
+ The region size is 22^(64-T1SZ) bytes.
+ The maximum and minimum possible values for T1SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint64_t a1 : 1; /**< [ 22: 22](R/W) Selects whether AP_TTBR0_EL2 or AP_TTBR1_EL2 defines the ASID. The
+ encoding of this bit is:
+ 0 = AP_TTBR0_EL2[ASID] defines the ASID.
+ 1 = AP_TTBR1_EL2[ASID] defines the ASID. */
+ uint64_t epd1 : 1; /**< [ 23: 23](R/W) Translation table walk disable for translations using
+ AP_TTBR1_EL2. This bit controls whether a translation table walk
+ is performed on a TLB miss, for an address that is translated
+ using AP_TTBR1_EL2. The encoding of this bit is:
+ 0 = Perform translation table walks using AP_TTBR1_EL2.
+ 1 = A TLB miss on an address that is translated using AP_TTBR1_EL2
+ generates a Translation fault. No translation table walk is
+ performed. */
+ uint64_t irgn1 : 2; /**< [ 25: 24](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL2.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint64_t orgn1 : 2; /**< [ 27: 26](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR1_EL2.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint64_t sh1 : 2; /**< [ 29: 28](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR1_EL2.
+
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint64_t tg1 : 2; /**< [ 31: 30](R/W) AP_TTBR1_EL2 Granule size.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x1 = 16KB.
+ 0x2 = 4KB.
+ 0x3 = 64KB. */
+ uint64_t ips : 3; /**< [ 34: 32](R/W) Intermediate Physical Address Size.
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint64_t reserved_35 : 1;
+ uint64_t as : 1; /**< [ 36: 36](R/W) ASID Size.
+ If the implementation has only 8 bits of ASID, this field is
+ RES0.
+ 0 = 8 bit - the upper 8 bits of AP_TTBR0_EL2 and AP_TTBR1_EL2 are
+ ignored by hardware for every purpose except reading back the
+ register, and are treated as if they are all zeros for when
+ used for allocation and matching entries in the TLB.
+ 1 = 16 bit - the upper 16 bits of AP_TTBR0_EL2 and AP_TTBR1_EL2 are used
+ for allocation and matching in the TLB. */
+ uint64_t tbi0 : 1; /**< [ 37: 37](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL2 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL2 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR0_EL2. It has an effect whether the EL2 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI0 is 1 and bit [55] of the target address is 0, caused
+ by:
+ * A branch or procedure return within EL2.
+ * An exception taken to EL2.
+ * An exception return to EL2.
+
+ In these cases bits [63:56] of the address are also set to 0
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t tbi1 : 1; /**< [ 38: 38](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR1_EL2 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL2 using AArch64
+ where the address would be translated by tables pointed to by
+ AP_TTBR1_EL2. It has an effect whether the EL2 translation
+ regime is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI1 is 1 and bit [55] of the target address is 1, caused
+ by:
+ * A branch or procedure return within EL2.
+ * An exception taken to EL2.
+ * An exception return to EL2.
+
+ In these cases bits [63:56] of the address are also set to 1
+ before it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint64_t reserved_39 : 1;
+ uint64_t reserved_40 : 1;
+ uint64_t had0 : 1; /**< [ 41: 41](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD0(bit[41]): Hierarchical Attribute Disable for the TTBR0 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t had1 : 1; /**< [ 42: 42](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD1(bit[42]): Hierarchical Attribute Disable for the TTBR1 region.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_tcr_el2_e2h bdk_ap_tcr_el2_e2h_t;
+
+#define BDK_AP_TCR_EL2_E2H BDK_AP_TCR_EL2_E2H_FUNC()
+static inline uint64_t BDK_AP_TCR_EL2_E2H_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TCR_EL2_E2H_FUNC(void)
+{
+ return 0x30402000210ll;
+}
+
+#define typedef_BDK_AP_TCR_EL2_E2H bdk_ap_tcr_el2_e2h_t
+#define bustype_BDK_AP_TCR_EL2_E2H BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TCR_EL2_E2H "AP_TCR_EL2_E2H"
+#define busnum_BDK_AP_TCR_EL2_E2H 0
+#define arguments_BDK_AP_TCR_EL2_E2H -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tcr_el3
+ *
+ * AP Translation Control EL3 Registers
+ * Controls translation table walks required for the stage 1
+ * translation of memory accesses from EL3, and holds
+ * cacheability and shareability information for the accesses.
+ */
+union bdk_ap_tcr_el3
+{
+ uint32_t u;
+ struct bdk_ap_tcr_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t had : 1; /**< [ 24: 24](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD (bit[24]): Hierarchical Attribute Disable.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t reserved_21_22 : 2;
+ uint32_t tbi : 1; /**< [ 20: 20](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL3 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL3 using AArch64 where
+ the address would be translated by tables pointed to by
+ AP_TTBR0_EL3. It has an effect whether the EL3 translation regime
+ is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI is 1, caused by:
+ * A branch or procedure return within EL3.
+ * A exception taken to EL3.
+ * An exception return to EL3.
+
+ In these cases bits [63:56] of the address are set to 0 before
+ it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint32_t reserved_19 : 1;
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ The reserved values behave in the same way as the0b101
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL3.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL3.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL3.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL3.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ The reserved values behave in the same way as the0b101
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t reserved_19 : 1;
+ uint32_t tbi : 1; /**< [ 20: 20](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL3 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL3 using AArch64 where
+ the address would be translated by tables pointed to by
+ AP_TTBR0_EL3. It has an effect whether the EL3 translation regime
+ is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI is 1, caused by:
+ * A branch or procedure return within EL3.
+ * A exception taken to EL3.
+ * An exception return to EL3.
+
+ In these cases bits [63:56] of the address are set to 0 before
+ it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint32_t reserved_21_22 : 2;
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t had : 1; /**< [ 24: 24](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD (bit[24]): Hierarchical Attribute Disable.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_tcr_el3_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t had : 1; /**< [ 24: 24](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD (bit[24]): Hierarchical Attribute Disable.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t reserved_22 : 1;
+ uint32_t reserved_21 : 1;
+ uint32_t tbi : 1; /**< [ 20: 20](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL3 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL3 using AArch64 where
+ the address would be translated by tables pointed to by
+ AP_TTBR0_EL3. It has an effect whether the EL3 translation regime
+ is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI is 1, caused by:
+ * A branch or procedure return within EL3.
+ * A exception taken to EL3.
+ * An exception return to EL3.
+
+ In these cases bits [63:56] of the address are set to 0 before
+ it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint32_t reserved_19 : 1;
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ The reserved values behave in the same way as the0b101
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL3.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL3.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_TTBR0_EL3.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Inner Non-cacheable.
+ 0x1 = Normal memory, Inner Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Inner Write-Through Cacheable.
+ 0x3 = Normal memory, Inner Write-Back no Write-Allocate Cacheable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_TTBR0_EL3.
+ 0x0 = Normal memory, Outer Non-cacheable.
+ 0x1 = Normal memory, Outer Write-Back Write-Allocate Cacheable.
+ 0x2 = Normal memory, Outer Write-Through Cacheable.
+ 0x3 = Normal memory, Outer Write-Back no Write-Allocate Cacheable. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_TTBR0_EL3.
+ 0x0 = Non-shareable.
+ 0x2 = Outer Shareable.
+ 0x3 = Inner Shareable. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+ 0x0 = 4KB.
+ 0x1 = 64KB.
+ 0x2 = 16KB. */
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ The reserved values behave in the same way as the0b101
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t reserved_19 : 1;
+ uint32_t tbi : 1; /**< [ 20: 20](R/W) Top Byte ignored - indicates whether the top byte of an
+ address is used for address match for the AP_TTBR0_EL3 region, or
+ ignored and used for tagged addresses.
+
+ This affects addresses generated in EL3 using AArch64 where
+ the address would be translated by tables pointed to by
+ AP_TTBR0_EL3. It has an effect whether the EL3 translation regime
+ is enabled or not.
+
+ Additionally, this affects changes to the program counter,
+ when TBI is 1, caused by:
+ * A branch or procedure return within EL3.
+ * A exception taken to EL3.
+ * An exception return to EL3.
+
+ In these cases bits [63:56] of the address are set to 0 before
+ it is stored in the PC.
+ 0 = Top Byte used in the address calculation.
+ 1 = Top Byte ignored in the address calculation. */
+ uint32_t reserved_21 : 1;
+ uint32_t reserved_22 : 1;
+ uint32_t rsvd_23 : 1; /**< [ 23: 23](RO) Reserved 1. */
+ uint32_t had : 1; /**< [ 24: 24](R/W) V8.1: Hierarchical Attribute Disable.
+ HAD (bit[24]): Hierarchical Attribute Disable.
+ 0 = Hierarchical Attributes are enabled.
+ 1 = Hierarchical Attributes are disabled. */
+ uint32_t reserved_25_30 : 6;
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_tcr_el3 bdk_ap_tcr_el3_t;
+
+#define BDK_AP_TCR_EL3 BDK_AP_TCR_EL3_FUNC()
+static inline uint64_t BDK_AP_TCR_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TCR_EL3_FUNC(void)
+{
+ return 0x30602000200ll;
+}
+
+#define typedef_BDK_AP_TCR_EL3 bdk_ap_tcr_el3_t
+#define bustype_BDK_AP_TCR_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TCR_EL3 "AP_TCR_EL3"
+#define busnum_BDK_AP_TCR_EL3 0
+#define arguments_BDK_AP_TCR_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_teecr32_el1
+ *
+ * AP T32EE Configuration Register
+ * Allows access to the AArch32 register TEECR from AArch64 state
+ * only. Its value has no effect on execution in AArch64 state.
+ */
+union bdk_ap_teecr32_el1
+{
+ uint32_t u;
+ struct bdk_ap_teecr32_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t xed : 1; /**< [ 0: 0](R/W) Execution Environment Disable bit. Control unprivileged access
+ to TEEHBR.
+
+ The effects of a write to this register on T32EE configuration
+ are only guaranteed to be visible to subsequent instructions
+ after the execution of a context synchronization operation.
+ However, a read of this register always returns the value most
+ recently written to the register.
+ 0 = Unprivileged access permitted.
+ 1 = Unprivileged access disabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t xed : 1; /**< [ 0: 0](R/W) Execution Environment Disable bit. Control unprivileged access
+ to TEEHBR.
+
+ The effects of a write to this register on T32EE configuration
+ are only guaranteed to be visible to subsequent instructions
+ after the execution of a context synchronization operation.
+ However, a read of this register always returns the value most
+ recently written to the register.
+ 0 = Unprivileged access permitted.
+ 1 = Unprivileged access disabled. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_teecr32_el1_s cn8; */
+ struct bdk_ap_teecr32_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t xed : 1; /**< [ 0: 0](RAZ) Execution Environment Disable bit. Control unprivileged access
+ to TEEHBR.
+
+ The effects of a write to this register on T32EE configuration
+ are only guaranteed to be visible to subsequent instructions
+ after the execution of a context synchronization operation.
+ However, a read of this register always returns the value most
+ recently written to the register.
+ 0 = Unprivileged access permitted.
+ 1 = Unprivileged access disabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t xed : 1; /**< [ 0: 0](RAZ) Execution Environment Disable bit. Control unprivileged access
+ to TEEHBR.
+
+ The effects of a write to this register on T32EE configuration
+ are only guaranteed to be visible to subsequent instructions
+ after the execution of a context synchronization operation.
+ However, a read of this register always returns the value most
+ recently written to the register.
+ 0 = Unprivileged access permitted.
+ 1 = Unprivileged access disabled. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_teecr32_el1 bdk_ap_teecr32_el1_t;
+
+#define BDK_AP_TEECR32_EL1 BDK_AP_TEECR32_EL1_FUNC()
+static inline uint64_t BDK_AP_TEECR32_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TEECR32_EL1_FUNC(void)
+{
+ return 0x20200000000ll;
+}
+
+#define typedef_BDK_AP_TEECR32_EL1 bdk_ap_teecr32_el1_t
+#define bustype_BDK_AP_TEECR32_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TEECR32_EL1 "AP_TEECR32_EL1"
+#define busnum_BDK_AP_TEECR32_EL1 0
+#define arguments_BDK_AP_TEECR32_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_teehbr32_el1
+ *
+ * AP T32EE Handler Base Register
+ * Allows access to the AArch32 register TEEHBR from AArch64
+ * state only. Its value has no effect on execution in AArch64
+ * state.
+ */
+union bdk_ap_teehbr32_el1
+{
+ uint32_t u;
+ struct bdk_ap_teehbr32_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t handlerbase : 30; /**< [ 31: 2](R/W) The address of the T32EE Handler_00 implementation. This is
+ the address of the first of the T32EE handlers. */
+ uint32_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_1 : 2;
+ uint32_t handlerbase : 30; /**< [ 31: 2](R/W) The address of the T32EE Handler_00 implementation. This is
+ the address of the first of the T32EE handlers. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_teehbr32_el1_s cn8; */
+ struct bdk_ap_teehbr32_el1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t handlerbase : 30; /**< [ 31: 2](RAZ) The address of the T32EE Handler_00 implementation. This is
+ the address of the first of the T32EE handlers. */
+ uint32_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_1 : 2;
+ uint32_t handlerbase : 30; /**< [ 31: 2](RAZ) The address of the T32EE Handler_00 implementation. This is
+ the address of the first of the T32EE handlers. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_ap_teehbr32_el1 bdk_ap_teehbr32_el1_t;
+
+#define BDK_AP_TEEHBR32_EL1 BDK_AP_TEEHBR32_EL1_FUNC()
+static inline uint64_t BDK_AP_TEEHBR32_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TEEHBR32_EL1_FUNC(void)
+{
+ return 0x20201000000ll;
+}
+
+#define typedef_BDK_AP_TEEHBR32_EL1 bdk_ap_teehbr32_el1_t
+#define bustype_BDK_AP_TEEHBR32_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TEEHBR32_EL1 "AP_TEEHBR32_EL1"
+#define busnum_BDK_AP_TEEHBR32_EL1 0
+#define arguments_BDK_AP_TEEHBR32_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tpidr_el#
+ *
+ * AP Thread Pointer / ID Register
+ * Provides a location where software executing at EL3 can store
+ * thread identifying information, for OS management purposes.
+ */
+union bdk_ap_tpidr_elx
+{
+ uint64_t u;
+ struct bdk_ap_tpidr_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Thread ID. Thread identifying information stored by software
+ running at this Exception level. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Thread ID. Thread identifying information stored by software
+ running at this Exception level. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_tpidr_elx_s cn; */
+};
+typedef union bdk_ap_tpidr_elx bdk_ap_tpidr_elx_t;
+
+static inline uint64_t BDK_AP_TPIDR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TPIDR_ELX(unsigned long a)
+{
+ if ((a>=2)&&(a<=3))
+ return 0x3000d000200ll + 0x200000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_TPIDR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TPIDR_ELX(a) bdk_ap_tpidr_elx_t
+#define bustype_BDK_AP_TPIDR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TPIDR_ELX(a) "AP_TPIDR_ELX"
+#define busnum_BDK_AP_TPIDR_ELX(a) (a)
+#define arguments_BDK_AP_TPIDR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tpidr_el0
+ *
+ * AP EL0 Read/Write Software Thread ID Register
+ * Provides a location where software executing at EL0 can store
+ * thread identifying information, for OS management purposes.
+ */
+union bdk_ap_tpidr_el0
+{
+ uint64_t u;
+ struct bdk_ap_tpidr_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Thread ID. Thread identifying information stored by software
+ running at this Exception level. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Thread ID. Thread identifying information stored by software
+ running at this Exception level. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_tpidr_el0_s cn; */
+};
+typedef union bdk_ap_tpidr_el0 bdk_ap_tpidr_el0_t;
+
+#define BDK_AP_TPIDR_EL0 BDK_AP_TPIDR_EL0_FUNC()
+static inline uint64_t BDK_AP_TPIDR_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TPIDR_EL0_FUNC(void)
+{
+ return 0x3030d000200ll;
+}
+
+#define typedef_BDK_AP_TPIDR_EL0 bdk_ap_tpidr_el0_t
+#define bustype_BDK_AP_TPIDR_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TPIDR_EL0 "AP_TPIDR_EL0"
+#define busnum_BDK_AP_TPIDR_EL0 0
+#define arguments_BDK_AP_TPIDR_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tpidr_el1
+ *
+ * AP EL1 Software Thread ID Register
+ * Provides a location where software executing at EL1 can store
+ * thread identifying information, for OS management purposes.
+ */
+union bdk_ap_tpidr_el1
+{
+ uint64_t u;
+ struct bdk_ap_tpidr_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Thread ID. Thread identifying information stored by software
+ running at this Exception level. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Thread ID. Thread identifying information stored by software
+ running at this Exception level. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_tpidr_el1_s cn; */
+};
+typedef union bdk_ap_tpidr_el1 bdk_ap_tpidr_el1_t;
+
+#define BDK_AP_TPIDR_EL1 BDK_AP_TPIDR_EL1_FUNC()
+static inline uint64_t BDK_AP_TPIDR_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TPIDR_EL1_FUNC(void)
+{
+ return 0x3000d000400ll;
+}
+
+#define typedef_BDK_AP_TPIDR_EL1 bdk_ap_tpidr_el1_t
+#define bustype_BDK_AP_TPIDR_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TPIDR_EL1 "AP_TPIDR_EL1"
+#define busnum_BDK_AP_TPIDR_EL1 0
+#define arguments_BDK_AP_TPIDR_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_tpidrro_el0
+ *
+ * AP Thread Pointer / ID Read-Only EL0 Register
+ * Provides a location where software executing at EL1 or higher
+ * can store thread identifying information that is visible to
+ * software executing at EL0, for OS management purposes.
+ */
+union bdk_ap_tpidrro_el0
+{
+ uint64_t u;
+ struct bdk_ap_tpidrro_el0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Thread ID. Thread identifying information stored by software
+ running at this Exception level. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Thread ID. Thread identifying information stored by software
+ running at this Exception level. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_tpidrro_el0_s cn; */
+};
+typedef union bdk_ap_tpidrro_el0 bdk_ap_tpidrro_el0_t;
+
+#define BDK_AP_TPIDRRO_EL0 BDK_AP_TPIDRRO_EL0_FUNC()
+static inline uint64_t BDK_AP_TPIDRRO_EL0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TPIDRRO_EL0_FUNC(void)
+{
+ return 0x3030d000300ll;
+}
+
+#define typedef_BDK_AP_TPIDRRO_EL0 bdk_ap_tpidrro_el0_t
+#define bustype_BDK_AP_TPIDRRO_EL0 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TPIDRRO_EL0 "AP_TPIDRRO_EL0"
+#define busnum_BDK_AP_TPIDRRO_EL0 0
+#define arguments_BDK_AP_TPIDRRO_EL0 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcacatr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcacatrx
+{
+ uint64_t u;
+ struct bdk_ap_trcacatrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcacatrx_s cn; */
+};
+typedef union bdk_ap_trcacatrx bdk_ap_trcacatrx_t;
+
+static inline uint64_t BDK_AP_TRCACATRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCACATRX(unsigned long a)
+{
+ if (a<=15)
+ return 0x20102000200ll + 0x20000ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_TRCACATRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCACATRX(a) bdk_ap_trcacatrx_t
+#define bustype_BDK_AP_TRCACATRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCACATRX(a) "AP_TRCACATRX"
+#define busnum_BDK_AP_TRCACATRX(a) (a)
+#define arguments_BDK_AP_TRCACATRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcacvr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcacvrx
+{
+ uint64_t u;
+ struct bdk_ap_trcacvrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcacvrx_s cn; */
+};
+typedef union bdk_ap_trcacvrx bdk_ap_trcacvrx_t;
+
+static inline uint64_t BDK_AP_TRCACVRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCACVRX(unsigned long a)
+{
+ if (a<=15)
+ return 0x20102000000ll + 0x20000ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_TRCACVRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCACVRX(a) bdk_ap_trcacvrx_t
+#define bustype_BDK_AP_TRCACVRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCACVRX(a) "AP_TRCACVRX"
+#define busnum_BDK_AP_TRCACVRX(a) (a)
+#define arguments_BDK_AP_TRCACVRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcauthstatus
+ *
+ * AP Register
+ */
+union bdk_ap_trcauthstatus
+{
+ uint64_t u;
+ struct bdk_ap_trcauthstatus_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcauthstatus_s cn; */
+};
+typedef union bdk_ap_trcauthstatus bdk_ap_trcauthstatus_t;
+
+#define BDK_AP_TRCAUTHSTATUS BDK_AP_TRCAUTHSTATUS_FUNC()
+static inline uint64_t BDK_AP_TRCAUTHSTATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCAUTHSTATUS_FUNC(void)
+{
+ return 0x201070e0600ll;
+}
+
+#define typedef_BDK_AP_TRCAUTHSTATUS bdk_ap_trcauthstatus_t
+#define bustype_BDK_AP_TRCAUTHSTATUS BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCAUTHSTATUS "AP_TRCAUTHSTATUS"
+#define busnum_BDK_AP_TRCAUTHSTATUS 0
+#define arguments_BDK_AP_TRCAUTHSTATUS -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcauxctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcauxctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcauxctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcauxctlr_s cn; */
+};
+typedef union bdk_ap_trcauxctlr bdk_ap_trcauxctlr_t;
+
+#define BDK_AP_TRCAUXCTLR BDK_AP_TRCAUXCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCAUXCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCAUXCTLR_FUNC(void)
+{
+ return 0x20100060000ll;
+}
+
+#define typedef_BDK_AP_TRCAUXCTLR bdk_ap_trcauxctlr_t
+#define bustype_BDK_AP_TRCAUXCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCAUXCTLR "AP_TRCAUXCTLR"
+#define busnum_BDK_AP_TRCAUXCTLR 0
+#define arguments_BDK_AP_TRCAUXCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcbbctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcbbctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcbbctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcbbctlr_s cn; */
+};
+typedef union bdk_ap_trcbbctlr bdk_ap_trcbbctlr_t;
+
+#define BDK_AP_TRCBBCTLR BDK_AP_TRCBBCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCBBCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCBBCTLR_FUNC(void)
+{
+ return 0x201000f0000ll;
+}
+
+#define typedef_BDK_AP_TRCBBCTLR bdk_ap_trcbbctlr_t
+#define bustype_BDK_AP_TRCBBCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCBBCTLR "AP_TRCBBCTLR"
+#define busnum_BDK_AP_TRCBBCTLR 0
+#define arguments_BDK_AP_TRCBBCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcccctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcccctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcccctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcccctlr_s cn; */
+};
+typedef union bdk_ap_trcccctlr bdk_ap_trcccctlr_t;
+
+#define BDK_AP_TRCCCCTLR BDK_AP_TRCCCCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCCCCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCCCTLR_FUNC(void)
+{
+ return 0x201000e0000ll;
+}
+
+#define typedef_BDK_AP_TRCCCCTLR bdk_ap_trcccctlr_t
+#define bustype_BDK_AP_TRCCCCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCCCTLR "AP_TRCCCCTLR"
+#define busnum_BDK_AP_TRCCCCTLR 0
+#define arguments_BDK_AP_TRCCCCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trccidcctlr#
+ *
+ * AP Register
+ */
+union bdk_ap_trccidcctlrx
+{
+ uint64_t u;
+ struct bdk_ap_trccidcctlrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trccidcctlrx_s cn; */
+};
+typedef union bdk_ap_trccidcctlrx bdk_ap_trccidcctlrx_t;
+
+static inline uint64_t BDK_AP_TRCCIDCCTLRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCIDCCTLRX(unsigned long a)
+{
+ if (a<=1)
+ return 0x20103000200ll + 0x10000ll * ((a) & 0x1);
+ __bdk_csr_fatal("AP_TRCCIDCCTLRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCCIDCCTLRX(a) bdk_ap_trccidcctlrx_t
+#define bustype_BDK_AP_TRCCIDCCTLRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCIDCCTLRX(a) "AP_TRCCIDCCTLRX"
+#define busnum_BDK_AP_TRCCIDCCTLRX(a) (a)
+#define arguments_BDK_AP_TRCCIDCCTLRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trccidcvr#
+ *
+ * AP Register
+ */
+union bdk_ap_trccidcvrx
+{
+ uint64_t u;
+ struct bdk_ap_trccidcvrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trccidcvrx_s cn; */
+};
+typedef union bdk_ap_trccidcvrx bdk_ap_trccidcvrx_t;
+
+static inline uint64_t BDK_AP_TRCCIDCVRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCIDCVRX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20103000000ll + 0x20000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCCIDCVRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCCIDCVRX(a) bdk_ap_trccidcvrx_t
+#define bustype_BDK_AP_TRCCIDCVRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCIDCVRX(a) "AP_TRCCIDCVRX"
+#define busnum_BDK_AP_TRCCIDCVRX(a) (a)
+#define arguments_BDK_AP_TRCCIDCVRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trccidr#
+ *
+ * AP Register
+ */
+union bdk_ap_trccidrx
+{
+ uint64_t u;
+ struct bdk_ap_trccidrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trccidrx_s cn; */
+};
+typedef union bdk_ap_trccidrx bdk_ap_trccidrx_t;
+
+static inline uint64_t BDK_AP_TRCCIDRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCIDRX(unsigned long a)
+{
+ if (a<=3)
+ return 0x201070c0700ll + 0x10000ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_TRCCIDRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCCIDRX(a) bdk_ap_trccidrx_t
+#define bustype_BDK_AP_TRCCIDRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCIDRX(a) "AP_TRCCIDRX"
+#define busnum_BDK_AP_TRCCIDRX(a) (a)
+#define arguments_BDK_AP_TRCCIDRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcclaimclr
+ *
+ * AP Register
+ */
+union bdk_ap_trcclaimclr
+{
+ uint64_t u;
+ struct bdk_ap_trcclaimclr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcclaimclr_s cn; */
+};
+typedef union bdk_ap_trcclaimclr bdk_ap_trcclaimclr_t;
+
+#define BDK_AP_TRCCLAIMCLR BDK_AP_TRCCLAIMCLR_FUNC()
+static inline uint64_t BDK_AP_TRCCLAIMCLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCLAIMCLR_FUNC(void)
+{
+ return 0x20107090600ll;
+}
+
+#define typedef_BDK_AP_TRCCLAIMCLR bdk_ap_trcclaimclr_t
+#define bustype_BDK_AP_TRCCLAIMCLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCLAIMCLR "AP_TRCCLAIMCLR"
+#define busnum_BDK_AP_TRCCLAIMCLR 0
+#define arguments_BDK_AP_TRCCLAIMCLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcclaimset
+ *
+ * AP Register
+ */
+union bdk_ap_trcclaimset
+{
+ uint64_t u;
+ struct bdk_ap_trcclaimset_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcclaimset_s cn; */
+};
+typedef union bdk_ap_trcclaimset bdk_ap_trcclaimset_t;
+
+#define BDK_AP_TRCCLAIMSET BDK_AP_TRCCLAIMSET_FUNC()
+static inline uint64_t BDK_AP_TRCCLAIMSET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCLAIMSET_FUNC(void)
+{
+ return 0x20107080600ll;
+}
+
+#define typedef_BDK_AP_TRCCLAIMSET bdk_ap_trcclaimset_t
+#define bustype_BDK_AP_TRCCLAIMSET BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCLAIMSET "AP_TRCCLAIMSET"
+#define busnum_BDK_AP_TRCCLAIMSET 0
+#define arguments_BDK_AP_TRCCLAIMSET -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trccntctlr#
+ *
+ * AP Register
+ */
+union bdk_ap_trccntctlrx
+{
+ uint64_t u;
+ struct bdk_ap_trccntctlrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trccntctlrx_s cn; */
+};
+typedef union bdk_ap_trccntctlrx bdk_ap_trccntctlrx_t;
+
+static inline uint64_t BDK_AP_TRCCNTCTLRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCNTCTLRX(unsigned long a)
+{
+ if (a<=3)
+ return 0x20100040500ll + 0x10000ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_TRCCNTCTLRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCCNTCTLRX(a) bdk_ap_trccntctlrx_t
+#define bustype_BDK_AP_TRCCNTCTLRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCNTCTLRX(a) "AP_TRCCNTCTLRX"
+#define busnum_BDK_AP_TRCCNTCTLRX(a) (a)
+#define arguments_BDK_AP_TRCCNTCTLRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trccntrldvr#
+ *
+ * AP Register
+ */
+union bdk_ap_trccntrldvrx
+{
+ uint64_t u;
+ struct bdk_ap_trccntrldvrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trccntrldvrx_s cn; */
+};
+typedef union bdk_ap_trccntrldvrx bdk_ap_trccntrldvrx_t;
+
+static inline uint64_t BDK_AP_TRCCNTRLDVRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCNTRLDVRX(unsigned long a)
+{
+ if (a<=3)
+ return 0x20100000500ll + 0x10000ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_TRCCNTRLDVRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCCNTRLDVRX(a) bdk_ap_trccntrldvrx_t
+#define bustype_BDK_AP_TRCCNTRLDVRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCNTRLDVRX(a) "AP_TRCCNTRLDVRX"
+#define busnum_BDK_AP_TRCCNTRLDVRX(a) (a)
+#define arguments_BDK_AP_TRCCNTRLDVRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trccntvr#
+ *
+ * AP Register
+ */
+union bdk_ap_trccntvrx
+{
+ uint64_t u;
+ struct bdk_ap_trccntvrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trccntvrx_s cn; */
+};
+typedef union bdk_ap_trccntvrx bdk_ap_trccntvrx_t;
+
+static inline uint64_t BDK_AP_TRCCNTVRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCNTVRX(unsigned long a)
+{
+ if (a<=3)
+ return 0x20100080500ll + 0x10000ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_TRCCNTVRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCCNTVRX(a) bdk_ap_trccntvrx_t
+#define bustype_BDK_AP_TRCCNTVRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCNTVRX(a) "AP_TRCCNTVRX"
+#define busnum_BDK_AP_TRCCNTVRX(a) (a)
+#define arguments_BDK_AP_TRCCNTVRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcconfigr
+ *
+ * AP Register
+ */
+union bdk_ap_trcconfigr
+{
+ uint64_t u;
+ struct bdk_ap_trcconfigr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcconfigr_s cn; */
+};
+typedef union bdk_ap_trcconfigr bdk_ap_trcconfigr_t;
+
+#define BDK_AP_TRCCONFIGR BDK_AP_TRCCONFIGR_FUNC()
+static inline uint64_t BDK_AP_TRCCONFIGR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCCONFIGR_FUNC(void)
+{
+ return 0x20100040000ll;
+}
+
+#define typedef_BDK_AP_TRCCONFIGR bdk_ap_trcconfigr_t
+#define bustype_BDK_AP_TRCCONFIGR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCCONFIGR "AP_TRCCONFIGR"
+#define busnum_BDK_AP_TRCCONFIGR 0
+#define arguments_BDK_AP_TRCCONFIGR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcdevaff#
+ *
+ * AP Register
+ */
+union bdk_ap_trcdevaffx
+{
+ uint64_t u;
+ struct bdk_ap_trcdevaffx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcdevaffx_s cn; */
+};
+typedef union bdk_ap_trcdevaffx bdk_ap_trcdevaffx_t;
+
+static inline uint64_t BDK_AP_TRCDEVAFFX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCDEVAFFX(unsigned long a)
+{
+ if (a<=1)
+ return 0x201070a0600ll + 0x10000ll * ((a) & 0x1);
+ __bdk_csr_fatal("AP_TRCDEVAFFX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCDEVAFFX(a) bdk_ap_trcdevaffx_t
+#define bustype_BDK_AP_TRCDEVAFFX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCDEVAFFX(a) "AP_TRCDEVAFFX"
+#define busnum_BDK_AP_TRCDEVAFFX(a) (a)
+#define arguments_BDK_AP_TRCDEVAFFX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcdevarch
+ *
+ * AP Register
+ */
+union bdk_ap_trcdevarch
+{
+ uint64_t u;
+ struct bdk_ap_trcdevarch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcdevarch_s cn; */
+};
+typedef union bdk_ap_trcdevarch bdk_ap_trcdevarch_t;
+
+#define BDK_AP_TRCDEVARCH BDK_AP_TRCDEVARCH_FUNC()
+static inline uint64_t BDK_AP_TRCDEVARCH_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCDEVARCH_FUNC(void)
+{
+ return 0x201070f0600ll;
+}
+
+#define typedef_BDK_AP_TRCDEVARCH bdk_ap_trcdevarch_t
+#define bustype_BDK_AP_TRCDEVARCH BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCDEVARCH "AP_TRCDEVARCH"
+#define busnum_BDK_AP_TRCDEVARCH 0
+#define arguments_BDK_AP_TRCDEVARCH -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcdevid
+ *
+ * AP Register
+ */
+union bdk_ap_trcdevid
+{
+ uint64_t u;
+ struct bdk_ap_trcdevid_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcdevid_s cn; */
+};
+typedef union bdk_ap_trcdevid bdk_ap_trcdevid_t;
+
+#define BDK_AP_TRCDEVID BDK_AP_TRCDEVID_FUNC()
+static inline uint64_t BDK_AP_TRCDEVID_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCDEVID_FUNC(void)
+{
+ return 0x20107020700ll;
+}
+
+#define typedef_BDK_AP_TRCDEVID bdk_ap_trcdevid_t
+#define bustype_BDK_AP_TRCDEVID BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCDEVID "AP_TRCDEVID"
+#define busnum_BDK_AP_TRCDEVID 0
+#define arguments_BDK_AP_TRCDEVID -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcdevtype
+ *
+ * AP Register
+ */
+union bdk_ap_trcdevtype
+{
+ uint64_t u;
+ struct bdk_ap_trcdevtype_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcdevtype_s cn; */
+};
+typedef union bdk_ap_trcdevtype bdk_ap_trcdevtype_t;
+
+#define BDK_AP_TRCDEVTYPE BDK_AP_TRCDEVTYPE_FUNC()
+static inline uint64_t BDK_AP_TRCDEVTYPE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCDEVTYPE_FUNC(void)
+{
+ return 0x20107030700ll;
+}
+
+#define typedef_BDK_AP_TRCDEVTYPE bdk_ap_trcdevtype_t
+#define bustype_BDK_AP_TRCDEVTYPE BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCDEVTYPE "AP_TRCDEVTYPE"
+#define busnum_BDK_AP_TRCDEVTYPE 0
+#define arguments_BDK_AP_TRCDEVTYPE -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcdvcmr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcdvcmrx
+{
+ uint64_t u;
+ struct bdk_ap_trcdvcmrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcdvcmrx_s cn; */
+};
+typedef union bdk_ap_trcdvcmrx bdk_ap_trcdvcmrx_t;
+
+static inline uint64_t BDK_AP_TRCDVCMRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCDVCMRX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20102000600ll + 0x40000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCDVCMRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCDVCMRX(a) bdk_ap_trcdvcmrx_t
+#define bustype_BDK_AP_TRCDVCMRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCDVCMRX(a) "AP_TRCDVCMRX"
+#define busnum_BDK_AP_TRCDVCMRX(a) (a)
+#define arguments_BDK_AP_TRCDVCMRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcdvcvr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcdvcvrx
+{
+ uint64_t u;
+ struct bdk_ap_trcdvcvrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcdvcvrx_s cn; */
+};
+typedef union bdk_ap_trcdvcvrx bdk_ap_trcdvcvrx_t;
+
+static inline uint64_t BDK_AP_TRCDVCVRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCDVCVRX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20102000400ll + 0x40000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCDVCVRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCDVCVRX(a) bdk_ap_trcdvcvrx_t
+#define bustype_BDK_AP_TRCDVCVRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCDVCVRX(a) "AP_TRCDVCVRX"
+#define busnum_BDK_AP_TRCDVCVRX(a) (a)
+#define arguments_BDK_AP_TRCDVCVRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trceventctl#r
+ *
+ * AP Register
+ */
+union bdk_ap_trceventctlxr
+{
+ uint64_t u;
+ struct bdk_ap_trceventctlxr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trceventctlxr_s cn; */
+};
+typedef union bdk_ap_trceventctlxr bdk_ap_trceventctlxr_t;
+
+static inline uint64_t BDK_AP_TRCEVENTCTLXR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCEVENTCTLXR(unsigned long a)
+{
+ if (a<=1)
+ return 0x20100080000ll + 0x10000ll * ((a) & 0x1);
+ __bdk_csr_fatal("AP_TRCEVENTCTLXR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCEVENTCTLXR(a) bdk_ap_trceventctlxr_t
+#define bustype_BDK_AP_TRCEVENTCTLXR(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCEVENTCTLXR(a) "AP_TRCEVENTCTLXR"
+#define busnum_BDK_AP_TRCEVENTCTLXR(a) (a)
+#define arguments_BDK_AP_TRCEVENTCTLXR(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcextinselr
+ *
+ * AP Register
+ */
+union bdk_ap_trcextinselr
+{
+ uint64_t u;
+ struct bdk_ap_trcextinselr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcextinselr_s cn; */
+};
+typedef union bdk_ap_trcextinselr bdk_ap_trcextinselr_t;
+
+#define BDK_AP_TRCEXTINSELR BDK_AP_TRCEXTINSELR_FUNC()
+static inline uint64_t BDK_AP_TRCEXTINSELR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCEXTINSELR_FUNC(void)
+{
+ return 0x20100080400ll;
+}
+
+#define typedef_BDK_AP_TRCEXTINSELR bdk_ap_trcextinselr_t
+#define bustype_BDK_AP_TRCEXTINSELR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCEXTINSELR "AP_TRCEXTINSELR"
+#define busnum_BDK_AP_TRCEXTINSELR 0
+#define arguments_BDK_AP_TRCEXTINSELR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcidr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcidrx
+{
+ uint64_t u;
+ struct bdk_ap_trcidrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcidrx_s cn; */
+};
+typedef union bdk_ap_trcidrx bdk_ap_trcidrx_t;
+
+static inline uint64_t BDK_AP_TRCIDRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCIDRX(unsigned long a)
+{
+ if (a<=13)
+ return 0x20100080700ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("AP_TRCIDRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCIDRX(a) bdk_ap_trcidrx_t
+#define bustype_BDK_AP_TRCIDRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCIDRX(a) "AP_TRCIDRX"
+#define busnum_BDK_AP_TRCIDRX(a) (a)
+#define arguments_BDK_AP_TRCIDRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcimspec#
+ *
+ * AP Register
+ */
+union bdk_ap_trcimspecx
+{
+ uint64_t u;
+ struct bdk_ap_trcimspecx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcimspecx_s cn; */
+};
+typedef union bdk_ap_trcimspecx bdk_ap_trcimspecx_t;
+
+static inline uint64_t BDK_AP_TRCIMSPECX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCIMSPECX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20100000700ll + 0x10000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCIMSPECX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCIMSPECX(a) bdk_ap_trcimspecx_t
+#define bustype_BDK_AP_TRCIMSPECX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCIMSPECX(a) "AP_TRCIMSPECX"
+#define busnum_BDK_AP_TRCIMSPECX(a) (a)
+#define arguments_BDK_AP_TRCIMSPECX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcitctrl
+ *
+ * AP Register
+ */
+union bdk_ap_trcitctrl
+{
+ uint64_t u;
+ struct bdk_ap_trcitctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcitctrl_s cn; */
+};
+typedef union bdk_ap_trcitctrl bdk_ap_trcitctrl_t;
+
+#define BDK_AP_TRCITCTRL BDK_AP_TRCITCTRL_FUNC()
+static inline uint64_t BDK_AP_TRCITCTRL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCITCTRL_FUNC(void)
+{
+ return 0x20107000400ll;
+}
+
+#define typedef_BDK_AP_TRCITCTRL bdk_ap_trcitctrl_t
+#define bustype_BDK_AP_TRCITCTRL BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCITCTRL "AP_TRCITCTRL"
+#define busnum_BDK_AP_TRCITCTRL 0
+#define arguments_BDK_AP_TRCITCTRL -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trclar
+ *
+ * AP Register
+ */
+union bdk_ap_trclar
+{
+ uint64_t u;
+ struct bdk_ap_trclar_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trclar_s cn; */
+};
+typedef union bdk_ap_trclar bdk_ap_trclar_t;
+
+#define BDK_AP_TRCLAR BDK_AP_TRCLAR_FUNC()
+static inline uint64_t BDK_AP_TRCLAR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCLAR_FUNC(void)
+{
+ return 0x201070c0600ll;
+}
+
+#define typedef_BDK_AP_TRCLAR bdk_ap_trclar_t
+#define bustype_BDK_AP_TRCLAR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCLAR "AP_TRCLAR"
+#define busnum_BDK_AP_TRCLAR 0
+#define arguments_BDK_AP_TRCLAR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trclsr
+ *
+ * AP Register
+ */
+union bdk_ap_trclsr
+{
+ uint64_t u;
+ struct bdk_ap_trclsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trclsr_s cn; */
+};
+typedef union bdk_ap_trclsr bdk_ap_trclsr_t;
+
+#define BDK_AP_TRCLSR BDK_AP_TRCLSR_FUNC()
+static inline uint64_t BDK_AP_TRCLSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCLSR_FUNC(void)
+{
+ return 0x201070d0600ll;
+}
+
+#define typedef_BDK_AP_TRCLSR bdk_ap_trclsr_t
+#define bustype_BDK_AP_TRCLSR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCLSR "AP_TRCLSR"
+#define busnum_BDK_AP_TRCLSR 0
+#define arguments_BDK_AP_TRCLSR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcoslar
+ *
+ * AP Register
+ */
+union bdk_ap_trcoslar
+{
+ uint64_t u;
+ struct bdk_ap_trcoslar_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcoslar_s cn; */
+};
+typedef union bdk_ap_trcoslar bdk_ap_trcoslar_t;
+
+#define BDK_AP_TRCOSLAR BDK_AP_TRCOSLAR_FUNC()
+static inline uint64_t BDK_AP_TRCOSLAR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCOSLAR_FUNC(void)
+{
+ return 0x20101000400ll;
+}
+
+#define typedef_BDK_AP_TRCOSLAR bdk_ap_trcoslar_t
+#define bustype_BDK_AP_TRCOSLAR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCOSLAR "AP_TRCOSLAR"
+#define busnum_BDK_AP_TRCOSLAR 0
+#define arguments_BDK_AP_TRCOSLAR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcoslsr
+ *
+ * AP Register
+ */
+union bdk_ap_trcoslsr
+{
+ uint64_t u;
+ struct bdk_ap_trcoslsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcoslsr_s cn; */
+};
+typedef union bdk_ap_trcoslsr bdk_ap_trcoslsr_t;
+
+#define BDK_AP_TRCOSLSR BDK_AP_TRCOSLSR_FUNC()
+static inline uint64_t BDK_AP_TRCOSLSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCOSLSR_FUNC(void)
+{
+ return 0x20101010400ll;
+}
+
+#define typedef_BDK_AP_TRCOSLSR bdk_ap_trcoslsr_t
+#define bustype_BDK_AP_TRCOSLSR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCOSLSR "AP_TRCOSLSR"
+#define busnum_BDK_AP_TRCOSLSR 0
+#define arguments_BDK_AP_TRCOSLSR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcpdcr
+ *
+ * AP Register
+ */
+union bdk_ap_trcpdcr
+{
+ uint64_t u;
+ struct bdk_ap_trcpdcr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcpdcr_s cn; */
+};
+typedef union bdk_ap_trcpdcr bdk_ap_trcpdcr_t;
+
+#define BDK_AP_TRCPDCR BDK_AP_TRCPDCR_FUNC()
+static inline uint64_t BDK_AP_TRCPDCR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCPDCR_FUNC(void)
+{
+ return 0x20101040400ll;
+}
+
+#define typedef_BDK_AP_TRCPDCR bdk_ap_trcpdcr_t
+#define bustype_BDK_AP_TRCPDCR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCPDCR "AP_TRCPDCR"
+#define busnum_BDK_AP_TRCPDCR 0
+#define arguments_BDK_AP_TRCPDCR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcpdsr
+ *
+ * AP Register
+ */
+union bdk_ap_trcpdsr
+{
+ uint64_t u;
+ struct bdk_ap_trcpdsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcpdsr_s cn; */
+};
+typedef union bdk_ap_trcpdsr bdk_ap_trcpdsr_t;
+
+#define BDK_AP_TRCPDSR BDK_AP_TRCPDSR_FUNC()
+static inline uint64_t BDK_AP_TRCPDSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCPDSR_FUNC(void)
+{
+ return 0x20101050400ll;
+}
+
+#define typedef_BDK_AP_TRCPDSR bdk_ap_trcpdsr_t
+#define bustype_BDK_AP_TRCPDSR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCPDSR "AP_TRCPDSR"
+#define busnum_BDK_AP_TRCPDSR 0
+#define arguments_BDK_AP_TRCPDSR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcpidr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcpidrx
+{
+ uint64_t u;
+ struct bdk_ap_trcpidrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcpidrx_s cn; */
+};
+typedef union bdk_ap_trcpidrx bdk_ap_trcpidrx_t;
+
+static inline uint64_t BDK_AP_TRCPIDRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCPIDRX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20107080700ll + 0x10000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCPIDRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCPIDRX(a) bdk_ap_trcpidrx_t
+#define bustype_BDK_AP_TRCPIDRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCPIDRX(a) "AP_TRCPIDRX"
+#define busnum_BDK_AP_TRCPIDRX(a) (a)
+#define arguments_BDK_AP_TRCPIDRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcprgctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcprgctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcprgctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcprgctlr_s cn; */
+};
+typedef union bdk_ap_trcprgctlr bdk_ap_trcprgctlr_t;
+
+#define BDK_AP_TRCPRGCTLR BDK_AP_TRCPRGCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCPRGCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCPRGCTLR_FUNC(void)
+{
+ return 0x20100010000ll;
+}
+
+#define typedef_BDK_AP_TRCPRGCTLR bdk_ap_trcprgctlr_t
+#define bustype_BDK_AP_TRCPRGCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCPRGCTLR "AP_TRCPRGCTLR"
+#define busnum_BDK_AP_TRCPRGCTLR 0
+#define arguments_BDK_AP_TRCPRGCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcprocselr
+ *
+ * AP Register
+ */
+union bdk_ap_trcprocselr
+{
+ uint64_t u;
+ struct bdk_ap_trcprocselr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcprocselr_s cn; */
+};
+typedef union bdk_ap_trcprocselr bdk_ap_trcprocselr_t;
+
+#define BDK_AP_TRCPROCSELR BDK_AP_TRCPROCSELR_FUNC()
+static inline uint64_t BDK_AP_TRCPROCSELR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCPROCSELR_FUNC(void)
+{
+ return 0x20100020000ll;
+}
+
+#define typedef_BDK_AP_TRCPROCSELR bdk_ap_trcprocselr_t
+#define bustype_BDK_AP_TRCPROCSELR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCPROCSELR "AP_TRCPROCSELR"
+#define busnum_BDK_AP_TRCPROCSELR 0
+#define arguments_BDK_AP_TRCPROCSELR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcqctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcqctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcqctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcqctlr_s cn; */
+};
+typedef union bdk_ap_trcqctlr bdk_ap_trcqctlr_t;
+
+#define BDK_AP_TRCQCTLR BDK_AP_TRCQCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCQCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCQCTLR_FUNC(void)
+{
+ return 0x20100010100ll;
+}
+
+#define typedef_BDK_AP_TRCQCTLR bdk_ap_trcqctlr_t
+#define bustype_BDK_AP_TRCQCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCQCTLR "AP_TRCQCTLR"
+#define busnum_BDK_AP_TRCQCTLR 0
+#define arguments_BDK_AP_TRCQCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcrsctlr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcrsctlrx
+{
+ uint64_t u;
+ struct bdk_ap_trcrsctlrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcrsctlrx_s cn; */
+};
+typedef union bdk_ap_trcrsctlrx bdk_ap_trcrsctlrx_t;
+
+static inline uint64_t BDK_AP_TRCRSCTLRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCRSCTLRX(unsigned long a)
+{
+ if ((a>=2)&&(a<=31))
+ return 0x20101000000ll + 0x10000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("AP_TRCRSCTLRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCRSCTLRX(a) bdk_ap_trcrsctlrx_t
+#define bustype_BDK_AP_TRCRSCTLRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCRSCTLRX(a) "AP_TRCRSCTLRX"
+#define busnum_BDK_AP_TRCRSCTLRX(a) (a)
+#define arguments_BDK_AP_TRCRSCTLRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcseqevr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcseqevrx
+{
+ uint64_t u;
+ struct bdk_ap_trcseqevrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcseqevrx_s cn; */
+};
+typedef union bdk_ap_trcseqevrx bdk_ap_trcseqevrx_t;
+
+static inline uint64_t BDK_AP_TRCSEQEVRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSEQEVRX(unsigned long a)
+{
+ if (a<=2)
+ return 0x20100000400ll + 0x10000ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_TRCSEQEVRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCSEQEVRX(a) bdk_ap_trcseqevrx_t
+#define bustype_BDK_AP_TRCSEQEVRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSEQEVRX(a) "AP_TRCSEQEVRX"
+#define busnum_BDK_AP_TRCSEQEVRX(a) (a)
+#define arguments_BDK_AP_TRCSEQEVRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcseqrstevr
+ *
+ * AP Register
+ */
+union bdk_ap_trcseqrstevr
+{
+ uint64_t u;
+ struct bdk_ap_trcseqrstevr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcseqrstevr_s cn; */
+};
+typedef union bdk_ap_trcseqrstevr bdk_ap_trcseqrstevr_t;
+
+#define BDK_AP_TRCSEQRSTEVR BDK_AP_TRCSEQRSTEVR_FUNC()
+static inline uint64_t BDK_AP_TRCSEQRSTEVR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSEQRSTEVR_FUNC(void)
+{
+ return 0x20100060400ll;
+}
+
+#define typedef_BDK_AP_TRCSEQRSTEVR bdk_ap_trcseqrstevr_t
+#define bustype_BDK_AP_TRCSEQRSTEVR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSEQRSTEVR "AP_TRCSEQRSTEVR"
+#define busnum_BDK_AP_TRCSEQRSTEVR 0
+#define arguments_BDK_AP_TRCSEQRSTEVR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcseqstr
+ *
+ * AP Register
+ */
+union bdk_ap_trcseqstr
+{
+ uint64_t u;
+ struct bdk_ap_trcseqstr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcseqstr_s cn; */
+};
+typedef union bdk_ap_trcseqstr bdk_ap_trcseqstr_t;
+
+#define BDK_AP_TRCSEQSTR BDK_AP_TRCSEQSTR_FUNC()
+static inline uint64_t BDK_AP_TRCSEQSTR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSEQSTR_FUNC(void)
+{
+ return 0x20100070400ll;
+}
+
+#define typedef_BDK_AP_TRCSEQSTR bdk_ap_trcseqstr_t
+#define bustype_BDK_AP_TRCSEQSTR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSEQSTR "AP_TRCSEQSTR"
+#define busnum_BDK_AP_TRCSEQSTR 0
+#define arguments_BDK_AP_TRCSEQSTR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcssccr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcssccrx
+{
+ uint64_t u;
+ struct bdk_ap_trcssccrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcssccrx_s cn; */
+};
+typedef union bdk_ap_trcssccrx bdk_ap_trcssccrx_t;
+
+static inline uint64_t BDK_AP_TRCSSCCRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSSCCRX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20101000200ll + 0x10000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCSSCCRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCSSCCRX(a) bdk_ap_trcssccrx_t
+#define bustype_BDK_AP_TRCSSCCRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSSCCRX(a) "AP_TRCSSCCRX"
+#define busnum_BDK_AP_TRCSSCCRX(a) (a)
+#define arguments_BDK_AP_TRCSSCCRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcsscsr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcsscsrx
+{
+ uint64_t u;
+ struct bdk_ap_trcsscsrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcsscsrx_s cn; */
+};
+typedef union bdk_ap_trcsscsrx bdk_ap_trcsscsrx_t;
+
+static inline uint64_t BDK_AP_TRCSSCSRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSSCSRX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20101080200ll + 0x10000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCSSCSRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCSSCSRX(a) bdk_ap_trcsscsrx_t
+#define bustype_BDK_AP_TRCSSCSRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSSCSRX(a) "AP_TRCSSCSRX"
+#define busnum_BDK_AP_TRCSSCSRX(a) (a)
+#define arguments_BDK_AP_TRCSSCSRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcsspcicr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcsspcicrx
+{
+ uint64_t u;
+ struct bdk_ap_trcsspcicrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcsspcicrx_s cn; */
+};
+typedef union bdk_ap_trcsspcicrx bdk_ap_trcsspcicrx_t;
+
+static inline uint64_t BDK_AP_TRCSSPCICRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSSPCICRX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20101000300ll + 0x10000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCSSPCICRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCSSPCICRX(a) bdk_ap_trcsspcicrx_t
+#define bustype_BDK_AP_TRCSSPCICRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSSPCICRX(a) "AP_TRCSSPCICRX"
+#define busnum_BDK_AP_TRCSSPCICRX(a) (a)
+#define arguments_BDK_AP_TRCSSPCICRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcstallctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcstallctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcstallctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcstallctlr_s cn; */
+};
+typedef union bdk_ap_trcstallctlr bdk_ap_trcstallctlr_t;
+
+#define BDK_AP_TRCSTALLCTLR BDK_AP_TRCSTALLCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCSTALLCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSTALLCTLR_FUNC(void)
+{
+ return 0x201000b0000ll;
+}
+
+#define typedef_BDK_AP_TRCSTALLCTLR bdk_ap_trcstallctlr_t
+#define bustype_BDK_AP_TRCSTALLCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSTALLCTLR "AP_TRCSTALLCTLR"
+#define busnum_BDK_AP_TRCSTALLCTLR 0
+#define arguments_BDK_AP_TRCSTALLCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcstatr
+ *
+ * AP Register
+ */
+union bdk_ap_trcstatr
+{
+ uint64_t u;
+ struct bdk_ap_trcstatr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcstatr_s cn; */
+};
+typedef union bdk_ap_trcstatr bdk_ap_trcstatr_t;
+
+#define BDK_AP_TRCSTATR BDK_AP_TRCSTATR_FUNC()
+static inline uint64_t BDK_AP_TRCSTATR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSTATR_FUNC(void)
+{
+ return 0x20100030000ll;
+}
+
+#define typedef_BDK_AP_TRCSTATR bdk_ap_trcstatr_t
+#define bustype_BDK_AP_TRCSTATR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSTATR "AP_TRCSTATR"
+#define busnum_BDK_AP_TRCSTATR 0
+#define arguments_BDK_AP_TRCSTATR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcsyncpr
+ *
+ * AP Register
+ */
+union bdk_ap_trcsyncpr
+{
+ uint64_t u;
+ struct bdk_ap_trcsyncpr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcsyncpr_s cn; */
+};
+typedef union bdk_ap_trcsyncpr bdk_ap_trcsyncpr_t;
+
+#define BDK_AP_TRCSYNCPR BDK_AP_TRCSYNCPR_FUNC()
+static inline uint64_t BDK_AP_TRCSYNCPR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCSYNCPR_FUNC(void)
+{
+ return 0x201000d0000ll;
+}
+
+#define typedef_BDK_AP_TRCSYNCPR bdk_ap_trcsyncpr_t
+#define bustype_BDK_AP_TRCSYNCPR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCSYNCPR "AP_TRCSYNCPR"
+#define busnum_BDK_AP_TRCSYNCPR 0
+#define arguments_BDK_AP_TRCSYNCPR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trctraceidr
+ *
+ * AP Register
+ */
+union bdk_ap_trctraceidr
+{
+ uint64_t u;
+ struct bdk_ap_trctraceidr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trctraceidr_s cn; */
+};
+typedef union bdk_ap_trctraceidr bdk_ap_trctraceidr_t;
+
+#define BDK_AP_TRCTRACEIDR BDK_AP_TRCTRACEIDR_FUNC()
+static inline uint64_t BDK_AP_TRCTRACEIDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCTRACEIDR_FUNC(void)
+{
+ return 0x20100000100ll;
+}
+
+#define typedef_BDK_AP_TRCTRACEIDR bdk_ap_trctraceidr_t
+#define bustype_BDK_AP_TRCTRACEIDR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCTRACEIDR "AP_TRCTRACEIDR"
+#define busnum_BDK_AP_TRCTRACEIDR 0
+#define arguments_BDK_AP_TRCTRACEIDR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trctsctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trctsctlr
+{
+ uint64_t u;
+ struct bdk_ap_trctsctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trctsctlr_s cn; */
+};
+typedef union bdk_ap_trctsctlr bdk_ap_trctsctlr_t;
+
+#define BDK_AP_TRCTSCTLR BDK_AP_TRCTSCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCTSCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCTSCTLR_FUNC(void)
+{
+ return 0x201000c0000ll;
+}
+
+#define typedef_BDK_AP_TRCTSCTLR bdk_ap_trctsctlr_t
+#define bustype_BDK_AP_TRCTSCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCTSCTLR "AP_TRCTSCTLR"
+#define busnum_BDK_AP_TRCTSCTLR 0
+#define arguments_BDK_AP_TRCTSCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcvdarcctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcvdarcctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcvdarcctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcvdarcctlr_s cn; */
+};
+typedef union bdk_ap_trcvdarcctlr bdk_ap_trcvdarcctlr_t;
+
+#define BDK_AP_TRCVDARCCTLR BDK_AP_TRCVDARCCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCVDARCCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVDARCCTLR_FUNC(void)
+{
+ return 0x201000a0200ll;
+}
+
+#define typedef_BDK_AP_TRCVDARCCTLR bdk_ap_trcvdarcctlr_t
+#define bustype_BDK_AP_TRCVDARCCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVDARCCTLR "AP_TRCVDARCCTLR"
+#define busnum_BDK_AP_TRCVDARCCTLR 0
+#define arguments_BDK_AP_TRCVDARCCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcvdctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcvdctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcvdctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcvdctlr_s cn; */
+};
+typedef union bdk_ap_trcvdctlr bdk_ap_trcvdctlr_t;
+
+#define BDK_AP_TRCVDCTLR BDK_AP_TRCVDCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCVDCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVDCTLR_FUNC(void)
+{
+ return 0x20100080200ll;
+}
+
+#define typedef_BDK_AP_TRCVDCTLR bdk_ap_trcvdctlr_t
+#define bustype_BDK_AP_TRCVDCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVDCTLR "AP_TRCVDCTLR"
+#define busnum_BDK_AP_TRCVDCTLR 0
+#define arguments_BDK_AP_TRCVDCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcvdsacctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcvdsacctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcvdsacctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcvdsacctlr_s cn; */
+};
+typedef union bdk_ap_trcvdsacctlr bdk_ap_trcvdsacctlr_t;
+
+#define BDK_AP_TRCVDSACCTLR BDK_AP_TRCVDSACCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCVDSACCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVDSACCTLR_FUNC(void)
+{
+ return 0x20100090200ll;
+}
+
+#define typedef_BDK_AP_TRCVDSACCTLR bdk_ap_trcvdsacctlr_t
+#define bustype_BDK_AP_TRCVDSACCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVDSACCTLR "AP_TRCVDSACCTLR"
+#define busnum_BDK_AP_TRCVDSACCTLR 0
+#define arguments_BDK_AP_TRCVDSACCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcvictlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcvictlr
+{
+ uint64_t u;
+ struct bdk_ap_trcvictlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcvictlr_s cn; */
+};
+typedef union bdk_ap_trcvictlr bdk_ap_trcvictlr_t;
+
+#define BDK_AP_TRCVICTLR BDK_AP_TRCVICTLR_FUNC()
+static inline uint64_t BDK_AP_TRCVICTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVICTLR_FUNC(void)
+{
+ return 0x20100000200ll;
+}
+
+#define typedef_BDK_AP_TRCVICTLR bdk_ap_trcvictlr_t
+#define bustype_BDK_AP_TRCVICTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVICTLR "AP_TRCVICTLR"
+#define busnum_BDK_AP_TRCVICTLR 0
+#define arguments_BDK_AP_TRCVICTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcviiectlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcviiectlr
+{
+ uint64_t u;
+ struct bdk_ap_trcviiectlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcviiectlr_s cn; */
+};
+typedef union bdk_ap_trcviiectlr bdk_ap_trcviiectlr_t;
+
+#define BDK_AP_TRCVIIECTLR BDK_AP_TRCVIIECTLR_FUNC()
+static inline uint64_t BDK_AP_TRCVIIECTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVIIECTLR_FUNC(void)
+{
+ return 0x20100010200ll;
+}
+
+#define typedef_BDK_AP_TRCVIIECTLR bdk_ap_trcviiectlr_t
+#define bustype_BDK_AP_TRCVIIECTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVIIECTLR "AP_TRCVIIECTLR"
+#define busnum_BDK_AP_TRCVIIECTLR 0
+#define arguments_BDK_AP_TRCVIIECTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcvipcssctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcvipcssctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcvipcssctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcvipcssctlr_s cn; */
+};
+typedef union bdk_ap_trcvipcssctlr bdk_ap_trcvipcssctlr_t;
+
+#define BDK_AP_TRCVIPCSSCTLR BDK_AP_TRCVIPCSSCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCVIPCSSCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVIPCSSCTLR_FUNC(void)
+{
+ return 0x20100030200ll;
+}
+
+#define typedef_BDK_AP_TRCVIPCSSCTLR bdk_ap_trcvipcssctlr_t
+#define bustype_BDK_AP_TRCVIPCSSCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVIPCSSCTLR "AP_TRCVIPCSSCTLR"
+#define busnum_BDK_AP_TRCVIPCSSCTLR 0
+#define arguments_BDK_AP_TRCVIPCSSCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcvissctlr
+ *
+ * AP Register
+ */
+union bdk_ap_trcvissctlr
+{
+ uint64_t u;
+ struct bdk_ap_trcvissctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcvissctlr_s cn; */
+};
+typedef union bdk_ap_trcvissctlr bdk_ap_trcvissctlr_t;
+
+#define BDK_AP_TRCVISSCTLR BDK_AP_TRCVISSCTLR_FUNC()
+static inline uint64_t BDK_AP_TRCVISSCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVISSCTLR_FUNC(void)
+{
+ return 0x20100020200ll;
+}
+
+#define typedef_BDK_AP_TRCVISSCTLR bdk_ap_trcvissctlr_t
+#define bustype_BDK_AP_TRCVISSCTLR BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVISSCTLR "AP_TRCVISSCTLR"
+#define busnum_BDK_AP_TRCVISSCTLR 0
+#define arguments_BDK_AP_TRCVISSCTLR -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcvmidcctlr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcvmidcctlrx
+{
+ uint64_t u;
+ struct bdk_ap_trcvmidcctlrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcvmidcctlrx_s cn; */
+};
+typedef union bdk_ap_trcvmidcctlrx bdk_ap_trcvmidcctlrx_t;
+
+static inline uint64_t BDK_AP_TRCVMIDCCTLRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVMIDCCTLRX(unsigned long a)
+{
+ if (a<=1)
+ return 0x20103020200ll + 0x10000ll * ((a) & 0x1);
+ __bdk_csr_fatal("AP_TRCVMIDCCTLRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCVMIDCCTLRX(a) bdk_ap_trcvmidcctlrx_t
+#define bustype_BDK_AP_TRCVMIDCCTLRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVMIDCCTLRX(a) "AP_TRCVMIDCCTLRX"
+#define busnum_BDK_AP_TRCVMIDCCTLRX(a) (a)
+#define arguments_BDK_AP_TRCVMIDCCTLRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_trcvmidcvr#
+ *
+ * AP Register
+ */
+union bdk_ap_trcvmidcvrx
+{
+ uint64_t u;
+ struct bdk_ap_trcvmidcvrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_trcvmidcvrx_s cn; */
+};
+typedef union bdk_ap_trcvmidcvrx bdk_ap_trcvmidcvrx_t;
+
+static inline uint64_t BDK_AP_TRCVMIDCVRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TRCVMIDCVRX(unsigned long a)
+{
+ if (a<=7)
+ return 0x20103000100ll + 0x20000ll * ((a) & 0x7);
+ __bdk_csr_fatal("AP_TRCVMIDCVRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_TRCVMIDCVRX(a) bdk_ap_trcvmidcvrx_t
+#define bustype_BDK_AP_TRCVMIDCVRX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TRCVMIDCVRX(a) "AP_TRCVMIDCVRX"
+#define busnum_BDK_AP_TRCVMIDCVRX(a) (a)
+#define arguments_BDK_AP_TRCVMIDCVRX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ttbr0_el1
+ *
+ * AP Translation Table Base EL1 Register 0
+ * Holds the base address of translation table 0, and information
+ * about the memory it occupies. This is one of the translation
+ * tables for the stage 1 translation of memory accesses at EL0
+ * and EL1.
+ */
+union bdk_ap_ttbr0_el1
+{
+ uint64_t u;
+ struct bdk_ap_ttbr0_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits [x-1:0] are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits [x-1:0] are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits [x-1:0] are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits [x-1:0] are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ttbr0_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits [x-1:0] are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits [x-1:0] are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits [x-1:0] are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits [x-1:0] are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_ttbr0_el1_s cn9; */
+};
+typedef union bdk_ap_ttbr0_el1 bdk_ap_ttbr0_el1_t;
+
+#define BDK_AP_TTBR0_EL1 BDK_AP_TTBR0_EL1_FUNC()
+static inline uint64_t BDK_AP_TTBR0_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TTBR0_EL1_FUNC(void)
+{
+ return 0x30002000000ll;
+}
+
+#define typedef_BDK_AP_TTBR0_EL1 bdk_ap_ttbr0_el1_t
+#define bustype_BDK_AP_TTBR0_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TTBR0_EL1 "AP_TTBR0_EL1"
+#define busnum_BDK_AP_TTBR0_EL1 0
+#define arguments_BDK_AP_TTBR0_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ttbr0_el12
+ *
+ * AP Translation Table Base EL1/2 Register 0
+ * Alias of AP_TTBR0_EL1 from EL2 when AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_ttbr0_el12
+{
+ uint64_t u;
+ struct bdk_ap_ttbr0_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ttbr0_el12_s cn; */
+};
+typedef union bdk_ap_ttbr0_el12 bdk_ap_ttbr0_el12_t;
+
+#define BDK_AP_TTBR0_EL12 BDK_AP_TTBR0_EL12_FUNC()
+static inline uint64_t BDK_AP_TTBR0_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TTBR0_EL12_FUNC(void)
+{
+ return 0x30502000000ll;
+}
+
+#define typedef_BDK_AP_TTBR0_EL12 bdk_ap_ttbr0_el12_t
+#define bustype_BDK_AP_TTBR0_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TTBR0_EL12 "AP_TTBR0_EL12"
+#define busnum_BDK_AP_TTBR0_EL12 0
+#define arguments_BDK_AP_TTBR0_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ttbr0_el2
+ *
+ * AP Translation Table Base EL2 Register 0
+ * Holds the base address of translation table 0, and information
+ * about the memory it occupies. This is one of the translation
+ * tables for the stage 1 translation of memory accesses at EL0
+ * and EL2 when Virtual Host Extensions are enabled.
+ */
+union bdk_ap_ttbr0_el2
+{
+ uint64_t u;
+ struct bdk_ap_ttbr0_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ttbr0_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_ttbr0_el2_s cn9; */
+};
+typedef union bdk_ap_ttbr0_el2 bdk_ap_ttbr0_el2_t;
+
+#define BDK_AP_TTBR0_EL2 BDK_AP_TTBR0_EL2_FUNC()
+static inline uint64_t BDK_AP_TTBR0_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TTBR0_EL2_FUNC(void)
+{
+ return 0x30402000000ll;
+}
+
+#define typedef_BDK_AP_TTBR0_EL2 bdk_ap_ttbr0_el2_t
+#define bustype_BDK_AP_TTBR0_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TTBR0_EL2 "AP_TTBR0_EL2"
+#define busnum_BDK_AP_TTBR0_EL2 0
+#define arguments_BDK_AP_TTBR0_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ttbr0_el3
+ *
+ * AP Translation Table Base EL2-EL3 Register 0
+ * Holds the base address of the translation table for the stage
+ * 1 translation of memory accesses from EL3.
+ */
+union bdk_ap_ttbr0_el3
+{
+ uint64_t u;
+ struct bdk_ap_ttbr0_el3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits[47:x]. Bits [x-1:0] are
+ RES0.
+
+ x is based on the value of TCR_EL*[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits [x-1:0] are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits [x-1:0] are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits[47:x]. Bits [x-1:0] are
+ RES0.
+
+ x is based on the value of TCR_EL*[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits [x-1:0] are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits [x-1:0] are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ttbr0_el3_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits[47:x]. Bits [x-1:0] are
+ RES0.
+
+ x is based on the value of TCR_EL*[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits [x-1:0] are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits [x-1:0] are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits[47:x]. Bits [x-1:0] are
+ RES0.
+
+ x is based on the value of TCR_EL*[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits [x-1:0] are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits [x-1:0] are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_ttbr0_el3_s cn9; */
+};
+typedef union bdk_ap_ttbr0_el3 bdk_ap_ttbr0_el3_t;
+
+#define BDK_AP_TTBR0_EL3 BDK_AP_TTBR0_EL3_FUNC()
+static inline uint64_t BDK_AP_TTBR0_EL3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TTBR0_EL3_FUNC(void)
+{
+ return 0x30602000000ll;
+}
+
+#define typedef_BDK_AP_TTBR0_EL3 bdk_ap_ttbr0_el3_t
+#define bustype_BDK_AP_TTBR0_EL3 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TTBR0_EL3 "AP_TTBR0_EL3"
+#define busnum_BDK_AP_TTBR0_EL3 0
+#define arguments_BDK_AP_TTBR0_EL3 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ttbr1_el1
+ *
+ * AP Translation Table Base Register 1
+ * Holds the base address of translation table 1, and information
+ * about the memory it occupies. This is one of the translation
+ * tables for the stage 1 translation of memory accesses at EL0
+ * and EL1.
+ */
+union bdk_ap_ttbr1_el1
+{
+ uint64_t u;
+ struct bdk_ap_ttbr1_el1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ttbr1_el1_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL1[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL1[A1]
+ field selects either AP_TTBR0_EL1[ASID] or AP_TTBR1_EL1[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_ttbr1_el1_s cn9; */
+};
+typedef union bdk_ap_ttbr1_el1 bdk_ap_ttbr1_el1_t;
+
+#define BDK_AP_TTBR1_EL1 BDK_AP_TTBR1_EL1_FUNC()
+static inline uint64_t BDK_AP_TTBR1_EL1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TTBR1_EL1_FUNC(void)
+{
+ return 0x30002000100ll;
+}
+
+#define typedef_BDK_AP_TTBR1_EL1 bdk_ap_ttbr1_el1_t
+#define bustype_BDK_AP_TTBR1_EL1 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TTBR1_EL1 "AP_TTBR1_EL1"
+#define busnum_BDK_AP_TTBR1_EL1 0
+#define arguments_BDK_AP_TTBR1_EL1 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ttbr1_el12
+ *
+ * AP Translation Table Base Register 1
+ * Alias of AP_TTBR1_EL1 from EL2 when AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_ttbr1_el12
+{
+ uint64_t u;
+ struct bdk_ap_ttbr1_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_ttbr1_el12_s cn; */
+};
+typedef union bdk_ap_ttbr1_el12 bdk_ap_ttbr1_el12_t;
+
+#define BDK_AP_TTBR1_EL12 BDK_AP_TTBR1_EL12_FUNC()
+static inline uint64_t BDK_AP_TTBR1_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TTBR1_EL12_FUNC(void)
+{
+ return 0x30502000100ll;
+}
+
+#define typedef_BDK_AP_TTBR1_EL12 bdk_ap_ttbr1_el12_t
+#define bustype_BDK_AP_TTBR1_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TTBR1_EL12 "AP_TTBR1_EL12"
+#define busnum_BDK_AP_TTBR1_EL12 0
+#define arguments_BDK_AP_TTBR1_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_ttbr1_el2
+ *
+ * AP EL2 Translation Table Base (v8.1) Register 1
+ * Holds the base address of translation table 1, and information
+ * about the memory it occupies. This is one of the translation
+ * tables for the stage 1 translation of memory accesses at EL2.
+ */
+union bdk_ap_ttbr1_el2
+{
+ uint64_t u;
+ struct bdk_ap_ttbr1_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL2[A1]
+ field selects either AP_TTBR0_EL2[ASID] or AP_TTBR1_EL2[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL2[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL2[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL2[A1]
+ field selects either AP_TTBR0_EL2[ASID] or AP_TTBR1_EL2[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_ttbr1_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL2[A1]
+ field selects either AP_TTBR0_EL2[ASID] or AP_TTBR1_EL2[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL2[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_TCR_EL2[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t asid : 16; /**< [ 63: 48](R/W) An ASID for the translation table base address. The AP_TCR_EL2[A1]
+ field selects either AP_TTBR0_EL2[ASID] or AP_TTBR1_EL2[ASID].
+ If the implementation has only 8 bits of ASID, then the upper
+ 8 bits of this field are RES0. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_ttbr1_el2_s cn9; */
+};
+typedef union bdk_ap_ttbr1_el2 bdk_ap_ttbr1_el2_t;
+
+#define BDK_AP_TTBR1_EL2 BDK_AP_TTBR1_EL2_FUNC()
+static inline uint64_t BDK_AP_TTBR1_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_TTBR1_EL2_FUNC(void)
+{
+ return 0x30402000100ll;
+}
+
+#define typedef_BDK_AP_TTBR1_EL2 bdk_ap_ttbr1_el2_t
+#define bustype_BDK_AP_TTBR1_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_TTBR1_EL2 "AP_TTBR1_EL2"
+#define busnum_BDK_AP_TTBR1_EL2 0
+#define arguments_BDK_AP_TTBR1_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_uao
+ *
+ * AP User Access Override Register
+ * v8.2: User Access Override bit.
+ *
+ * When 0, The behaviour of LDTR* /STTR* instructions is as defined in the base
+ * ARMv8 architecture.
+ * When 1, LDTR* /STTR* instructions when executed at EL1, or at EL2 with
+ * HCR_EL2.E2H==1 && HCR_EL2.TGE==1, behave as the equivalent LDR* /STR*
+ * instructions.
+ *
+ * UAO is held in SPSR_ELx[23] and DSPSR_EL0[23].
+ * PSTATE.UAO is copied to SPSR_ELx.UAO and is then set to 0 on an exception taken from AArch64
+ * to AArch64
+ * PSTATE.UAO is set to 0 on an exception taken from AArch32 to AArch64.
+ * SPSR_ELx.UAO is set to 0 on an exception taken from AArch32 to AArch64
+ * SPSR_ELx.UAO is copied to PSTATE.UAO on an exception return to AArch64 from AArch64
+ * PSTATE.UAO is set to zero by a DCPS instruction to AArch64 in Debug state.
+ * SPSR_ELx.UAO is copied to PSTATE.UAO by DRPS to AArch64 from AArch64 in Debug state.
+ * DSPSR_EL0.UAO is copied to PSTATE.UAO on exit from Debug state to AArch64.
+ * PSTATE.UAO is copied to DSPSR_EL0.UAO on entry to Debug state from AArch64
+ *
+ * Note, PSTATE.UAO is unchanged on entry into Debug state
+ */
+union bdk_ap_uao
+{
+ uint64_t u;
+ struct bdk_ap_uao_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t uao : 1; /**< [ 23: 23](R/W) User access override bit. */
+ uint64_t reserved_0_22 : 23;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_22 : 23;
+ uint64_t uao : 1; /**< [ 23: 23](R/W) User access override bit. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_uao_s cn; */
+};
+typedef union bdk_ap_uao bdk_ap_uao_t;
+
+#define BDK_AP_UAO BDK_AP_UAO_FUNC()
+static inline uint64_t BDK_AP_UAO_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_UAO_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30004020400ll;
+ __bdk_csr_fatal("AP_UAO", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_UAO bdk_ap_uao_t
+#define bustype_BDK_AP_UAO BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_UAO "AP_UAO"
+#define busnum_BDK_AP_UAO 0
+#define arguments_BDK_AP_UAO -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_vbar_el#
+ *
+ * AP Vector Base Address EL* Register
+ * Holds the exception base address for any exception that is
+ * taken to EL*.
+ */
+union bdk_ap_vbar_elx
+{
+ uint64_t u;
+ struct bdk_ap_vbar_elx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 53; /**< [ 63: 11](R/W) Vector base address. Base address of the exception vectors for
+ exceptions taken in EL*.
+ If tagged addresses are being used, bits \<55:48\> of VBAR_EL*
+ must be 0 or else the use of the vector address will result in
+ a recursive exception.
+
+ If tagged addresses are not being used, bits \<63:48\> of
+ VBAR_EL* must be 0 or else the use of the vector address will
+ result in a recursive exception. */
+ uint64_t reserved_0_10 : 11;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_10 : 11;
+ uint64_t data : 53; /**< [ 63: 11](R/W) Vector base address. Base address of the exception vectors for
+ exceptions taken in EL*.
+ If tagged addresses are being used, bits \<55:48\> of VBAR_EL*
+ must be 0 or else the use of the vector address will result in
+ a recursive exception.
+
+ If tagged addresses are not being used, bits \<63:48\> of
+ VBAR_EL* must be 0 or else the use of the vector address will
+ result in a recursive exception. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_vbar_elx_s cn; */
+};
+typedef union bdk_ap_vbar_elx bdk_ap_vbar_elx_t;
+
+static inline uint64_t BDK_AP_VBAR_ELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_VBAR_ELX(unsigned long a)
+{
+ if ((a>=1)&&(a<=3))
+ return 0x3000c000000ll + 0ll * ((a) & 0x3);
+ __bdk_csr_fatal("AP_VBAR_ELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_VBAR_ELX(a) bdk_ap_vbar_elx_t
+#define bustype_BDK_AP_VBAR_ELX(a) BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_VBAR_ELX(a) "AP_VBAR_ELX"
+#define busnum_BDK_AP_VBAR_ELX(a) (a)
+#define arguments_BDK_AP_VBAR_ELX(a) (a),-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_vbar_el12
+ *
+ * AP Vector Base Address EL1/2 Register
+ * Alias of VBAR_EL1 when accessed at EL2/3 and AP_HCR_EL2[E2H] is set.
+ */
+union bdk_ap_vbar_el12
+{
+ uint64_t u;
+ struct bdk_ap_vbar_el12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_vbar_el12_s cn; */
+};
+typedef union bdk_ap_vbar_el12 bdk_ap_vbar_el12_t;
+
+#define BDK_AP_VBAR_EL12 BDK_AP_VBAR_EL12_FUNC()
+static inline uint64_t BDK_AP_VBAR_EL12_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_VBAR_EL12_FUNC(void)
+{
+ return 0x3050c000000ll;
+}
+
+#define typedef_BDK_AP_VBAR_EL12 bdk_ap_vbar_el12_t
+#define bustype_BDK_AP_VBAR_EL12 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_VBAR_EL12 "AP_VBAR_EL12"
+#define busnum_BDK_AP_VBAR_EL12 0
+#define arguments_BDK_AP_VBAR_EL12 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_vdisr_el2
+ *
+ * AP Virtual Deferred Interrupt Status Register
+ * Records that a virtual SError interrupt has been consumed by an ESB instruction executed at
+ * Nonsecure EL1.
+ *
+ * Usage constraints:
+ * VDISR_EL2 is UNDEFINED at EL1 and EL0.
+ * If EL1 is using AArch64 and HCR_EL2.AMO is set to 1, then direct reads and writes of
+ * DISR_EL1 at Non-secure EL1 access VDISR_EL2.
+ * If EL1 is using AArch32 and HCR_EL2.AMO is set to 1, then direct reads and writes of DISR at
+ * Nonsecure EL1 access VDISR_EL2.
+ * An indirect write to VDISR_EL2 made by an ESB instruction does not require an explicit
+ * synchronization operation for the value written to be observed by a direct read of DISR_EL1 or
+ * DISR occurring in program order after the ESB.
+ */
+union bdk_ap_vdisr_el2
+{
+ uint64_t u;
+ struct bdk_ap_vdisr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t aa : 1; /**< [ 31: 31](R/W) Set to 1 when ESB defers a virtual SError interrupt. */
+ uint64_t reserved_25_30 : 6;
+ uint64_t ids : 1; /**< [ 24: 24](R/W) Contains the value from AP_VSESR_EL2[IDS]. */
+ uint64_t iss : 24; /**< [ 23: 0](R/W) Contains the value from VSESR_EL2[23:0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t iss : 24; /**< [ 23: 0](R/W) Contains the value from VSESR_EL2[23:0]. */
+ uint64_t ids : 1; /**< [ 24: 24](R/W) Contains the value from AP_VSESR_EL2[IDS]. */
+ uint64_t reserved_25_30 : 6;
+ uint64_t aa : 1; /**< [ 31: 31](R/W) Set to 1 when ESB defers a virtual SError interrupt. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_vdisr_el2_s cn; */
+};
+typedef union bdk_ap_vdisr_el2 bdk_ap_vdisr_el2_t;
+
+#define BDK_AP_VDISR_EL2 BDK_AP_VDISR_EL2_FUNC()
+static inline uint64_t BDK_AP_VDISR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_VDISR_EL2_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x3040c010100ll;
+ __bdk_csr_fatal("AP_VDISR_EL2", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_VDISR_EL2 bdk_ap_vdisr_el2_t
+#define bustype_BDK_AP_VDISR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_VDISR_EL2 "AP_VDISR_EL2"
+#define busnum_BDK_AP_VDISR_EL2 0
+#define arguments_BDK_AP_VDISR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_vmpidr_el2
+ *
+ * AP Virtualization Multiprocessor ID Register
+ * Holds the value of the Virtualization Multiprocessor ID. This
+ * is the value returned by nonsecure EL1 reads of AP_MPIDR_EL1.
+ */
+union bdk_ap_vmpidr_el2
+{
+ uint64_t u;
+ struct bdk_ap_vmpidr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t aff3 : 8; /**< [ 39: 32](R/W) Affinity level 3. Highest level affinity field. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint64_t u : 1; /**< [ 30: 30](R/W) Indicates a Uniprocessor system, as distinct from PE 0 in a
+ multiprocessor system.
+ 0 = Processor is part of a multiprocessor system.
+ 1 = Processor is part of a uniprocessor system. */
+ uint64_t reserved_25_29 : 5;
+ uint64_t mt : 1; /**< [ 24: 24](R/W) Indicates whether the lowest level of affinity consists of
+ logical PEs that are implemented using a multi-threading type
+ approach.
+ 0 = Performance of PEs at the lowest affinity level is largely
+ independent.
+ 1 = Performance of PEs at the lowest affinity level is very
+ interdependent. */
+ uint64_t aff2 : 8; /**< [ 23: 16](R/W) Affinity level 2. Second highest level affinity field. */
+ uint64_t aff1 : 8; /**< [ 15: 8](R/W) Affinity level 1. Third highest level affinity field. */
+ uint64_t aff0 : 8; /**< [ 7: 0](R/W) Affinity level 0. Lowest level affinity field. */
+#else /* Word 0 - Little Endian */
+ uint64_t aff0 : 8; /**< [ 7: 0](R/W) Affinity level 0. Lowest level affinity field. */
+ uint64_t aff1 : 8; /**< [ 15: 8](R/W) Affinity level 1. Third highest level affinity field. */
+ uint64_t aff2 : 8; /**< [ 23: 16](R/W) Affinity level 2. Second highest level affinity field. */
+ uint64_t mt : 1; /**< [ 24: 24](R/W) Indicates whether the lowest level of affinity consists of
+ logical PEs that are implemented using a multi-threading type
+ approach.
+ 0 = Performance of PEs at the lowest affinity level is largely
+ independent.
+ 1 = Performance of PEs at the lowest affinity level is very
+ interdependent. */
+ uint64_t reserved_25_29 : 5;
+ uint64_t u : 1; /**< [ 30: 30](R/W) Indicates a Uniprocessor system, as distinct from PE 0 in a
+ multiprocessor system.
+ 0 = Processor is part of a multiprocessor system.
+ 1 = Processor is part of a uniprocessor system. */
+ uint64_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint64_t aff3 : 8; /**< [ 39: 32](R/W) Affinity level 3. Highest level affinity field. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_vmpidr_el2_s cn; */
+};
+typedef union bdk_ap_vmpidr_el2 bdk_ap_vmpidr_el2_t;
+
+#define BDK_AP_VMPIDR_EL2 BDK_AP_VMPIDR_EL2_FUNC()
+static inline uint64_t BDK_AP_VMPIDR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_VMPIDR_EL2_FUNC(void)
+{
+ return 0x30400000500ll;
+}
+
+#define typedef_BDK_AP_VMPIDR_EL2 bdk_ap_vmpidr_el2_t
+#define bustype_BDK_AP_VMPIDR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_VMPIDR_EL2 "AP_VMPIDR_EL2"
+#define busnum_BDK_AP_VMPIDR_EL2 0
+#define arguments_BDK_AP_VMPIDR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_vpidr_el2
+ *
+ * AP Virtualization Processor ID Register
+ * Holds the value of the Virtualization Processor ID. This is
+ * the value returned by nonsecure EL1 reads of AP_MIDR_EL1.
+ */
+union bdk_ap_vpidr_el2
+{
+ uint32_t u;
+ struct bdk_ap_vpidr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t implementer : 8; /**< [ 31: 24](R/W) The implementer code. This field must hold an implementer code
+ that has been assigned by ARM.
+ Hex representation ASCII representation.
+ 0x41 = 'A' = ARM Limited.
+ 0x42 = 'B' = Broadcom Corporation.
+ 0x43 = 'C' = Cavium Inc.
+ 0x44 = 'D' = Digital Equipment Corporation.
+ 0x49 = 'I' = Infineon Technologies AG.
+ 0x4D = 'M' = Motorola or Freescale Semiconductor Inc.
+ 0x4E = 'N' = NVIDIA Corporation.
+ 0x50 = 'P' = Applied Micro Circuits Corporation.
+ 0x51 = 'Q' = Qualcomm Inc.
+ 0x56 = 'V' = Marvell International Ltd.
+ 0x69 = 'i' = Intel Corporation.
+
+ ARM can assign codes that are not published in this manual.
+ All values not assigned by ARM are reserved and must not be
+ used. */
+ uint32_t variant : 4; /**< [ 23: 20](R/W) An implementation defined variant number. Typically, this
+ field is used to distinguish between different product
+ variants, or major revisions of a product. */
+ uint32_t architecture : 4; /**< [ 19: 16](R/W) Architecture:
+ 0x1 = ARMv4.
+ 0x2 = ARMv4T.
+ 0x3 = ARMv5 (obsolete).
+ 0x4 = ARMv5T.
+ 0x5 = ARMv5TE.
+ 0x6 = ARMv5TEJ.
+ 0x7 = ARMv6.
+ 0xF = Defined by CPUID scheme.
+ _ All other values are reserved. */
+ uint32_t partnum : 12; /**< [ 15: 4](R/W) An implementation defined primary part number for the device.
+ On processors implemented by ARM, if the top four bits of the
+ primary part number are 0x00x7. */
+ uint32_t revision : 4; /**< [ 3: 0](R/W) An implementation defined revision number for the device. */
+#else /* Word 0 - Little Endian */
+ uint32_t revision : 4; /**< [ 3: 0](R/W) An implementation defined revision number for the device. */
+ uint32_t partnum : 12; /**< [ 15: 4](R/W) An implementation defined primary part number for the device.
+ On processors implemented by ARM, if the top four bits of the
+ primary part number are 0x00x7. */
+ uint32_t architecture : 4; /**< [ 19: 16](R/W) Architecture:
+ 0x1 = ARMv4.
+ 0x2 = ARMv4T.
+ 0x3 = ARMv5 (obsolete).
+ 0x4 = ARMv5T.
+ 0x5 = ARMv5TE.
+ 0x6 = ARMv5TEJ.
+ 0x7 = ARMv6.
+ 0xF = Defined by CPUID scheme.
+ _ All other values are reserved. */
+ uint32_t variant : 4; /**< [ 23: 20](R/W) An implementation defined variant number. Typically, this
+ field is used to distinguish between different product
+ variants, or major revisions of a product. */
+ uint32_t implementer : 8; /**< [ 31: 24](R/W) The implementer code. This field must hold an implementer code
+ that has been assigned by ARM.
+ Hex representation ASCII representation.
+ 0x41 = 'A' = ARM Limited.
+ 0x42 = 'B' = Broadcom Corporation.
+ 0x43 = 'C' = Cavium Inc.
+ 0x44 = 'D' = Digital Equipment Corporation.
+ 0x49 = 'I' = Infineon Technologies AG.
+ 0x4D = 'M' = Motorola or Freescale Semiconductor Inc.
+ 0x4E = 'N' = NVIDIA Corporation.
+ 0x50 = 'P' = Applied Micro Circuits Corporation.
+ 0x51 = 'Q' = Qualcomm Inc.
+ 0x56 = 'V' = Marvell International Ltd.
+ 0x69 = 'i' = Intel Corporation.
+
+ ARM can assign codes that are not published in this manual.
+ All values not assigned by ARM are reserved and must not be
+ used. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_vpidr_el2_s cn; */
+};
+typedef union bdk_ap_vpidr_el2 bdk_ap_vpidr_el2_t;
+
+#define BDK_AP_VPIDR_EL2 BDK_AP_VPIDR_EL2_FUNC()
+static inline uint64_t BDK_AP_VPIDR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_VPIDR_EL2_FUNC(void)
+{
+ return 0x30400000000ll;
+}
+
+#define typedef_BDK_AP_VPIDR_EL2 bdk_ap_vpidr_el2_t
+#define bustype_BDK_AP_VPIDR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_VPIDR_EL2 "AP_VPIDR_EL2"
+#define busnum_BDK_AP_VPIDR_EL2 0
+#define arguments_BDK_AP_VPIDR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_vsesr_el2
+ *
+ * AP Virtual SError Exception Syndrome Register
+ * Provides the syndrome value reported to software on taking a virtual SError interrupt
+ * exception:
+ * - If the virtual SError interrupt is taken to EL1 using AArch64, VSESR_EL2 provides the
+ * syndrome value reported in ESR_EL1.
+ * - If the virtual SError interrupt is taken to EL1 using AArch32, VSESR_EL2 provides the
+ * syndrome values reported in DFSR.{AET, ExT} and the remainder of the DFSR is set as defined by
+ * VMSAv8-32.
+ *
+ * Usage constraints:
+ * VSESR_EL2 is UNDEFINED at EL1 and EL0.
+ */
+union bdk_ap_vsesr_el2
+{
+ uint64_t u;
+ struct bdk_ap_vsesr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t ids : 1; /**< [ 24: 24](R/W) On taking a virtual SError interrupt to EL1 using AArch64 due to AP_HCR_EL2[VSE] == 1,
+ AP_ESR_EL1[24] is set to AP_VSESR_EL2[IDS]. */
+ uint64_t iss : 24; /**< [ 23: 0](RAZ) On taking a virtual SError interrupt to EL1 using AArch64 due to AP_HCR_EL2[VSE] == 1,
+ AP_ESR_EL1[23:0] is set to AP_VSESR_EL2[ISS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t iss : 24; /**< [ 23: 0](RAZ) On taking a virtual SError interrupt to EL1 using AArch64 due to AP_HCR_EL2[VSE] == 1,
+ AP_ESR_EL1[23:0] is set to AP_VSESR_EL2[ISS]. */
+ uint64_t ids : 1; /**< [ 24: 24](R/W) On taking a virtual SError interrupt to EL1 using AArch64 due to AP_HCR_EL2[VSE] == 1,
+ AP_ESR_EL1[24] is set to AP_VSESR_EL2[IDS]. */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ap_vsesr_el2_s cn; */
+};
+typedef union bdk_ap_vsesr_el2 bdk_ap_vsesr_el2_t;
+
+#define BDK_AP_VSESR_EL2 BDK_AP_VSESR_EL2_FUNC()
+static inline uint64_t BDK_AP_VSESR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_VSESR_EL2_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x30405020300ll;
+ __bdk_csr_fatal("AP_VSESR_EL2", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_AP_VSESR_EL2 bdk_ap_vsesr_el2_t
+#define bustype_BDK_AP_VSESR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_VSESR_EL2 "AP_VSESR_EL2"
+#define busnum_BDK_AP_VSESR_EL2 0
+#define arguments_BDK_AP_VSESR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_vtcr_el2
+ *
+ * AP Virtualization Translation Control Register
+ * Controls the translation table walks required for the stage 2
+ * translation of memory accesses from nonsecure EL0 and EL1,
+ * and holds cacheability and shareability information for the
+ * accesses.
+ */
+union bdk_ap_vtcr_el2
+{
+ uint32_t u;
+ struct bdk_ap_vtcr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint32_t reserved_20_30 : 11;
+ uint32_t vs : 1; /**< [ 19: 19](R/W) VMID size.
+ 0 = 8 bits.
+ 1 = 16 bits. */
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4 KB.
+ 0x1 = 64 KB.
+ 0x2 = 16 KB. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_VTTBR_EL2.
+ 0x0 = Non-shareable.
+ 0x2 = Outer shareable.
+ 0x3 = Inner shareable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_VTTBR_EL2.
+ 0x0 = Normal memory, outer non-cacheable.
+ 0x1 = Normal memory, outer write-back write-allocate cacheable.
+ 0x2 = Normal memory, outer write-through cacheable.
+ 0x3 = Normal memory, outer write-back no write-allocate cacheable. */
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_VTTBR_EL2.
+ 0x0 = Normal memory, inner non-cacheable
+ 0x1 = Normal memory, inner write-back write-allocate cacheable
+ 0x2 = Normal memory, inner write-through cacheable
+ 0x3 = Normal memory, inner write-back no write-allocate cacheable */
+ uint32_t sl0 : 2; /**< [ 7: 6](R/W) Starting level of the AP_VTCR_EL2 addressed region. The meaning
+ of this field depends on the value of AP_VTCR_EL2[TG0] (the
+ granule size). */
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_VTTBR_EL2.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_VTTBR_EL2.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint32_t sl0 : 2; /**< [ 7: 6](R/W) Starting level of the AP_VTCR_EL2 addressed region. The meaning
+ of this field depends on the value of AP_VTCR_EL2[TG0] (the
+ granule size). */
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_VTTBR_EL2.
+ 0x0 = Normal memory, inner non-cacheable
+ 0x1 = Normal memory, inner write-back write-allocate cacheable
+ 0x2 = Normal memory, inner write-through cacheable
+ 0x3 = Normal memory, inner write-back no write-allocate cacheable */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_VTTBR_EL2.
+ 0x0 = Normal memory, outer non-cacheable.
+ 0x1 = Normal memory, outer write-back write-allocate cacheable.
+ 0x2 = Normal memory, outer write-through cacheable.
+ 0x3 = Normal memory, outer write-back no write-allocate cacheable. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_VTTBR_EL2.
+ 0x0 = Non-shareable.
+ 0x2 = Outer shareable.
+ 0x3 = Inner shareable. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4 KB.
+ 0x1 = 64 KB.
+ 0x2 = 16 KB. */
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t vs : 1; /**< [ 19: 19](R/W) VMID size.
+ 0 = 8 bits.
+ 1 = 16 bits. */
+ uint32_t reserved_20_30 : 11;
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_vtcr_el2_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+ uint32_t reserved_23_30 : 8;
+ uint32_t reserved_22 : 1;
+ uint32_t reserved_21 : 1;
+ uint32_t reserved_20 : 1;
+ uint32_t vs : 1; /**< [ 19: 19](R/W) VMID size.
+ 0 = 8 bits.
+ 1 = 16 bits. */
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4 KB.
+ 0x1 = 64 KB.
+ 0x2 = 16 KB. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_VTTBR_EL2.
+ 0x0 = Non-shareable.
+ 0x2 = Outer shareable.
+ 0x3 = Inner shareable. */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_VTTBR_EL2.
+ 0x0 = Normal memory, outer non-cacheable.
+ 0x1 = Normal memory, outer write-back write-allocate cacheable.
+ 0x2 = Normal memory, outer write-through cacheable.
+ 0x3 = Normal memory, outer write-back no write-allocate cacheable. */
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_VTTBR_EL2.
+ 0x0 = Normal memory, inner non-cacheable
+ 0x1 = Normal memory, inner write-back write-allocate cacheable
+ 0x2 = Normal memory, inner write-through cacheable
+ 0x3 = Normal memory, inner write-back no write-allocate cacheable */
+ uint32_t sl0 : 2; /**< [ 7: 6](R/W) Starting level of the AP_VTCR_EL2 addressed region. The meaning
+ of this field depends on the value of AP_VTCR_EL2[TG0] (the
+ granule size). */
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_VTTBR_EL2.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+#else /* Word 0 - Little Endian */
+ uint32_t t0sz : 6; /**< [ 5: 0](R/W) The size offset of the memory region addressed by AP_VTTBR_EL2.
+ The region size is 22^(64-T0SZ) bytes.
+ The maximum and minimum possible values for T0SZ depend on the
+ level of translation table and the memory translation granule
+ size, as described in the AArch64 Virtual Memory System
+ Architecture chapter. */
+ uint32_t sl0 : 2; /**< [ 7: 6](R/W) Starting level of the AP_VTCR_EL2 addressed region. The meaning
+ of this field depends on the value of AP_VTCR_EL2[TG0] (the
+ granule size). */
+ uint32_t irgn0 : 2; /**< [ 9: 8](R/W) Inner cacheability attribute for memory associated with
+ translation table walks using AP_VTTBR_EL2.
+ 0x0 = Normal memory, inner non-cacheable
+ 0x1 = Normal memory, inner write-back write-allocate cacheable
+ 0x2 = Normal memory, inner write-through cacheable
+ 0x3 = Normal memory, inner write-back no write-allocate cacheable */
+ uint32_t orgn0 : 2; /**< [ 11: 10](R/W) Outer cacheability attribute for memory associated with
+ translation table walks using AP_VTTBR_EL2.
+ 0x0 = Normal memory, outer non-cacheable.
+ 0x1 = Normal memory, outer write-back write-allocate cacheable.
+ 0x2 = Normal memory, outer write-through cacheable.
+ 0x3 = Normal memory, outer write-back no write-allocate cacheable. */
+ uint32_t sh0 : 2; /**< [ 13: 12](R/W) Shareability attribute for memory associated with translation
+ table walks using AP_VTTBR_EL2.
+ 0x0 = Non-shareable.
+ 0x2 = Outer shareable.
+ 0x3 = Inner shareable. */
+ uint32_t tg0 : 2; /**< [ 15: 14](R/W) Granule size for the corresponding translation table base
+ address register.
+
+ If the value is programmed to either a reserved value, or a
+ size that has not been implemented, then the hardware will
+ treat the field as if it has been programmed to an
+ implementation defined choice of the sizes that has been
+ implemented for all purposes other than the value read back
+ from this register.
+
+ It is implementation defined whether the value read back is
+ the value programmed or the value that corresponds to the size
+ chosen.
+
+ 0x0 = 4 KB.
+ 0x1 = 64 KB.
+ 0x2 = 16 KB. */
+ uint32_t ps : 3; /**< [ 18: 16](R/W) Physical Address Size.
+
+ 0x0 = 32 bits, 4GB.
+ 0x1 = 36 bits, 64GB.
+ 0x2 = 40 bits, 1TB.
+ 0x3 = 42 bits, 4TB.
+ 0x4 = 44 bits, 16TB.
+ 0x5 = 48 bits, 256TB. */
+ uint32_t vs : 1; /**< [ 19: 19](R/W) VMID size.
+ 0 = 8 bits.
+ 1 = 16 bits. */
+ uint32_t reserved_20 : 1;
+ uint32_t reserved_21 : 1;
+ uint32_t reserved_22 : 1;
+ uint32_t reserved_23_30 : 8;
+ uint32_t rsvd_31 : 1; /**< [ 31: 31](RO) Reserved 1. */
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_ap_vtcr_el2 bdk_ap_vtcr_el2_t;
+
+#define BDK_AP_VTCR_EL2 BDK_AP_VTCR_EL2_FUNC()
+static inline uint64_t BDK_AP_VTCR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_VTCR_EL2_FUNC(void)
+{
+ return 0x30402010200ll;
+}
+
+#define typedef_BDK_AP_VTCR_EL2 bdk_ap_vtcr_el2_t
+#define bustype_BDK_AP_VTCR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_VTCR_EL2 "AP_VTCR_EL2"
+#define busnum_BDK_AP_VTCR_EL2 0
+#define arguments_BDK_AP_VTCR_EL2 -1,-1,-1,-1
+
+/**
+ * Register (SYSREG) ap_vttbr_el2
+ *
+ * AP Virtualization Translation Table Base Register
+ * Holds the base address of the translation table for the stage
+ * 2 translation of memory accesses from nonsecure EL0 and EL1.
+ */
+union bdk_ap_vttbr_el2
+{
+ uint64_t u;
+ struct bdk_ap_vttbr_el2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vmid : 16; /**< [ 63: 48](R/W) The VMID for the translation table. Expanded to 16 bits
+ by the ARM Large System Extensions. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_VTCR_EL2[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnp : 1; /**< [ 0: 0](R/W) Common not private. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_VTCR_EL2[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t vmid : 16; /**< [ 63: 48](R/W) The VMID for the translation table. Expanded to 16 bits
+ by the ARM Large System Extensions. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ap_vttbr_el2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vmid : 16; /**< [ 63: 48](R/W) The VMID for the translation table. Expanded to 16 bits
+ by the ARM Large System Extensions. */
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_VTCR_EL2[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t baddr : 44; /**< [ 47: 4](R/W) Translation table base address, bits\<47:x\>. Bits \<x-1:0\> are
+ RES0.
+
+ x is based on the value of AP_VTCR_EL2[T0SZ], the stage of
+ translation, and the memory translation granule size.
+ The AArch64 Virtual Memory System Architecture chapter
+ describes how x is calculated.
+ The value of x determines the required alignment of the
+ translation table, which must be aligned to 22^(x)
+ bytes.
+
+ If bits \<x-1:0\> are not all zero, this is a misaligned
+ Translation Table Base Address. Its effects are CONSTRAINED
+ UNPREDICTABLE, and can be one of the following:
+
+ Bits \<x-1:0\> are treated as if all the bits are zero. The
+ value read back from those bits might be the value written or
+ might be zero.
+
+ The calculation of an address for a translation table walk
+ using this register can be corrupted in those bits that are
+ nonzero. */
+ uint64_t vmid : 16; /**< [ 63: 48](R/W) The VMID for the translation table. Expanded to 16 bits
+ by the ARM Large System Extensions. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ap_vttbr_el2_s cn9; */
+};
+typedef union bdk_ap_vttbr_el2 bdk_ap_vttbr_el2_t;
+
+#define BDK_AP_VTTBR_EL2 BDK_AP_VTTBR_EL2_FUNC()
+static inline uint64_t BDK_AP_VTTBR_EL2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_AP_VTTBR_EL2_FUNC(void)
+{
+ return 0x30402010000ll;
+}
+
+#define typedef_BDK_AP_VTTBR_EL2 bdk_ap_vttbr_el2_t
+#define bustype_BDK_AP_VTTBR_EL2 BDK_CSR_TYPE_SYSREG
+#define basename_BDK_AP_VTTBR_EL2 "AP_VTTBR_EL2"
+#define busnum_BDK_AP_VTTBR_EL2 0
+#define arguments_BDK_AP_VTTBR_EL2 -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_AP_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-fus.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-fus.h
new file mode 100644
index 0000000000..455eeed384
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-fus.h
@@ -0,0 +1,643 @@
+#ifndef __BDK_CSRS_FUS_H__
+#define __BDK_CSRS_FUS_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium FUS.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration fus_bar_e
+ *
+ * Fuse Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_FUS_BAR_E_FUS_PF_BAR0 (0x87e003000000ll)
+#define BDK_FUS_BAR_E_FUS_PF_BAR0_SIZE 0x10000ull
+
+/**
+ * Enumeration fus_fuse_num_e
+ *
+ * INTERNAL: Fuse Number Enumeration
+ *
+ * Enumerates the fuse numbers.
+ */
+#define BDK_FUS_FUSE_NUM_E_ALT_BANKX(a) (0xf80 + (a))
+#define BDK_FUS_FUSE_NUM_E_ALT_FUSEX(a) (0x7b + (a))
+#define BDK_FUS_FUSE_NUM_E_AP_CRIPPLEX(a) (0x180 + (a))
+#define BDK_FUS_FUSE_NUM_E_AP_NOCRYPTOX(a) (0x200 + (a))
+#define BDK_FUS_FUSE_NUM_E_AP_POWER_LIMITX(a) (0x259 + (a))
+#define BDK_FUS_FUSE_NUM_E_BCH_DISABLE (0x202)
+#define BDK_FUS_FUSE_NUM_E_BISR_FUSED_ONLY (0x53)
+#define BDK_FUS_FUSE_NUM_E_BSR_LOBE_DISABLEX(a) (0x290 + (a))
+#define BDK_FUS_FUSE_NUM_E_CGX_CRIPPLEX(a) (0x228 + (a))
+#define BDK_FUS_FUSE_NUM_E_CHIP_IDX(a) (8 + (a))
+#define BDK_FUS_FUSE_NUM_E_CHIP_TYPEX(a) (0 + (a))
+#define BDK_FUS_FUSE_NUM_E_CORE_INIT_MULX(a) (0x43 + (a))
+#define BDK_FUS_FUSE_NUM_E_CORE_INIT_PLL (0x4a)
+#define BDK_FUS_FUSE_NUM_E_CORE_MAX_MULX(a) (0x4b + (a))
+#define BDK_FUS_FUSE_NUM_E_CPT0_NOCRYPTOX(a) (0x255 + (a))
+#define BDK_FUS_FUSE_NUM_E_CPT1_NOCRYPTOX(a) (0x257 + (a))
+#define BDK_FUS_FUSE_NUM_E_CPT_EXE_DISABLEX(a) (0x580 + (a))
+#define BDK_FUS_FUSE_NUM_E_CPT_INIT_MULX(a) (0x5d + (a))
+#define BDK_FUS_FUSE_NUM_E_CPT_INIT_PLL (0x64)
+#define BDK_FUS_FUSE_NUM_E_CPT_MAX_MULX(a) (0x65 + (a))
+#define BDK_FUS_FUSE_NUM_E_DESX(a) (0x540 + (a))
+#define BDK_FUS_FUSE_NUM_E_DFA_INFO_CLMX(a) (0x220 + (a))
+#define BDK_FUS_FUSE_NUM_E_DFA_INFO_DTEX(a) (0x21d + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL0_PD_DELAYX(a) (0x88 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL0_SETTINGSX(a) (0x80 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL10_PD_DELAYX(a) (0x100 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL10_SETTINGSX(a) (0xf8 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL11_PD_DELAYX(a) (0x10c + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL11_SETTINGSX(a) (0x104 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL12_PD_DELAYX(a) (0x118 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL12_SETTINGSX(a) (0x110 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL13_PD_DELAYX(a) (0x124 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL13_SETTINGSX(a) (0x11c + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL14_PD_DELAYX(a) (0x130 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL14_SETTINGSX(a) (0x128 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL15_PD_DELAYX(a) (0x13c + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL15_SETTINGSX(a) (0x134 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL1_PD_DELAYX(a) (0x94 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL1_SETTINGSX(a) (0x8c + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL2_PD_DELAYX(a) (0xa0 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL2_SETTINGSX(a) (0x98 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL3_PD_DELAYX(a) (0xac + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL3_SETTINGSX(a) (0xa4 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL4_PD_DELAYX(a) (0xb8 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL4_SETTINGSX(a) (0xb0 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL5_PD_DELAYX(a) (0xc4 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL5_SETTINGSX(a) (0xbc + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL6_PD_DELAYX(a) (0xd0 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL6_SETTINGSX(a) (0xc8 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL7_PD_DELAYX(a) (0xdc + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL7_SETTINGSX(a) (0xd4 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL8_PD_DELAYX(a) (0xe8 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL8_SETTINGSX(a) (0xe0 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL9_PD_DELAYX(a) (0xf4 + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL9_SETTINGSX(a) (0xec + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL_INIT_SETTINGSX(a) (0x6c + (a))
+#define BDK_FUS_FUSE_NUM_E_DLL_LOCK_FREEZE (0x54)
+#define BDK_FUS_FUSE_NUM_E_DLL_REFRESH_RATEX(a) (0x55 + (a))
+#define BDK_FUS_FUSE_NUM_E_DRO_CRIPPLE (0x73)
+#define BDK_FUS_FUSE_NUM_E_EFUS_LCK_CFG (0x78)
+#define BDK_FUS_FUSE_NUM_E_EFUS_LCK_MAN (0x79)
+#define BDK_FUS_FUSE_NUM_E_EFUS_LCK_PRD (0x7a)
+#define BDK_FUS_FUSE_NUM_E_EMA0X(a) (0x10 + (a))
+#define BDK_FUS_FUSE_NUM_E_EMA1X(a) (0x16 + (a))
+#define BDK_FUS_FUSE_NUM_E_EMA2X(a) (0x1c + (a))
+#define BDK_FUS_FUSE_NUM_E_EMA3X(a) (0x23 + (a))
+#define BDK_FUS_FUSE_NUM_E_EMA4X(a) (0x25 + (a))
+#define BDK_FUS_FUSE_NUM_E_EMA5 (0x28)
+#define BDK_FUS_FUSE_NUM_E_FDFX(a) (0xa00 + (a))
+#define BDK_FUS_FUSE_NUM_E_GSER_ERCX(a) (0xf00 + (a))
+#define BDK_FUS_FUSE_NUM_E_LLC_CRIPPLEX(a) (0x216 + (a))
+#define BDK_FUS_FUSE_NUM_E_LMC_CRIPPLEX(a) (0x283 + (a))
+#define BDK_FUS_FUSE_NUM_E_LMC_HALF (0x203)
+#define BDK_FUS_FUSE_NUM_E_METAL_FIXX(a) (0x2c + (a))
+#define BDK_FUS_FUSE_NUM_E_MFG_INFOX(a) (0x400 + (a))
+#define BDK_FUS_FUSE_NUM_E_MLC_CRIPPLEX(a) (0x219 + (a))
+#define BDK_FUS_FUSE_NUM_E_NODE1TRAPENA (0x250)
+#define BDK_FUS_FUSE_NUM_E_NODFA_CP2 (0x21c)
+#define BDK_FUS_FUSE_NUM_E_NPC_CRIPPLE (0x280)
+#define BDK_FUS_FUSE_NUM_E_OCX_CRIPPLE (0x227)
+#define BDK_FUS_FUSE_NUM_E_PDFX(a) (0x640 + (a))
+#define BDK_FUS_FUSE_NUM_E_PEM_CRIPPLEX(a) (0x230 + (a))
+#define BDK_FUS_FUSE_NUM_E_PNR_INIT_MULX(a) (0x34 + (a))
+#define BDK_FUS_FUSE_NUM_E_PNR_INIT_PLL (0x3b)
+#define BDK_FUS_FUSE_NUM_E_PNR_MAX_MULX(a) (0x3c + (a))
+#define BDK_FUS_FUSE_NUM_E_POWER_LIMITX(a) (0x225 + (a))
+#define BDK_FUS_FUSE_NUM_E_PVTX(a) (0x680 + (a))
+#define BDK_FUS_FUSE_NUM_E_RAID_CRIPPLE (0x224)
+#define BDK_FUS_FUSE_NUM_E_REFCLK_CHECK (0x52)
+#define BDK_FUS_FUSE_NUM_E_REPAIRX(a) (0x1000 + (a))
+#define BDK_FUS_FUSE_NUM_E_ROM_INFOX(a) (0x3f0 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD140X(a) (0x140 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD205 (0x205)
+#define BDK_FUS_FUSE_NUM_E_RSVD251X(a) (0x251 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD254 (0x254)
+#define BDK_FUS_FUSE_NUM_E_RSVD25BX(a) (0x25b + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD264X(a) (0x264 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD281 (0x281)
+#define BDK_FUS_FUSE_NUM_E_RSVD282 (0x282)
+#define BDK_FUS_FUSE_NUM_E_RSVD28B (0x28b)
+#define BDK_FUS_FUSE_NUM_E_RSVD28F (0x28f)
+#define BDK_FUS_FUSE_NUM_E_RSVD29X(a) (0x29 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD29DX(a) (0x29d + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD31X(a) (0x31 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD74 (0x74)
+#define BDK_FUS_FUSE_NUM_E_RSVD780X(a) (0x780 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD801X(a) (0x801 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVD914X(a) (0x914 + (a))
+#define BDK_FUS_FUSE_NUM_E_RSVDE00X(a) (0xe00 + (a))
+#define BDK_FUS_FUSE_NUM_E_RUN_PLATFORMX(a) (0x75 + (a))
+#define BDK_FUS_FUSE_NUM_E_RVU_CRIPPLE (0x28e)
+#define BDK_FUS_FUSE_NUM_E_SATA_CRIPPLEX(a) (0x260 + (a))
+#define BDK_FUS_FUSE_NUM_E_SERIALX(a) (0x500 + (a))
+#define BDK_FUS_FUSE_NUM_E_SSO_CRIPPLE (0x253)
+#define BDK_FUS_FUSE_NUM_E_TGGX(a) (0x600 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS0_M_CX(a) (0x810 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS0_N_OFFX(a) (0x81c + (a))
+#define BDK_FUS_FUSE_NUM_E_TS0_STROBE_COMP_DLYX(a) (0x827 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS1_M_CX(a) (0x82a + (a))
+#define BDK_FUS_FUSE_NUM_E_TS1_N_OFFX(a) (0x836 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS1_STROBE_COMP_DLYX(a) (0x841 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS2_M_CX(a) (0x844 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS2_N_OFFX(a) (0x850 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS2_STROBE_COMP_DLYX(a) (0x85b + (a))
+#define BDK_FUS_FUSE_NUM_E_TS3_M_CX(a) (0x85e + (a))
+#define BDK_FUS_FUSE_NUM_E_TS3_N_OFFX(a) (0x86a + (a))
+#define BDK_FUS_FUSE_NUM_E_TS3_STROBE_COMP_DLYX(a) (0x875 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS4_M_CX(a) (0x878 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS4_N_OFFX(a) (0x884 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS4_STROBE_COMP_DLYX(a) (0x88f + (a))
+#define BDK_FUS_FUSE_NUM_E_TS5_M_CX(a) (0x892 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS5_N_OFFX(a) (0x89e + (a))
+#define BDK_FUS_FUSE_NUM_E_TS5_STROBE_COMP_DLYX(a) (0x8a9 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS6_M_CX(a) (0x8ac + (a))
+#define BDK_FUS_FUSE_NUM_E_TS6_N_OFFX(a) (0x8b8 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS6_STROBE_COMP_DLYX(a) (0x8c3 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS7_M_CX(a) (0x8c6 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS7_N_OFFX(a) (0x8d2 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS7_STROBE_COMP_DLYX(a) (0x8dd + (a))
+#define BDK_FUS_FUSE_NUM_E_TS8_M_CX(a) (0x8e0 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS8_N_OFFX(a) (0x8ec + (a))
+#define BDK_FUS_FUSE_NUM_E_TS8_STROBE_COMP_DLYX(a) (0x8f7 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS9_M_CX(a) (0x8fa + (a))
+#define BDK_FUS_FUSE_NUM_E_TS9_N_OFFX(a) (0x906 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS9_STROBE_COMP_DLYX(a) (0x911 + (a))
+#define BDK_FUS_FUSE_NUM_E_TS_CALBRATED (0x800)
+#define BDK_FUS_FUSE_NUM_E_USBDRD_CRIPPLEX(a) (0x28c + (a))
+#define BDK_FUS_FUSE_NUM_E_ZIP_CRIPPLEX(a) (0x206 + (a))
+#define BDK_FUS_FUSE_NUM_E_ZIP_CTL_DISABLE (0x204)
+
+/**
+ * Register (RSL) fus_bnk_dat#
+ *
+ * Fuse Bank Store Register
+ * The initial state of FUS_BNK_DAT() is as if bank6 were just read,
+ * i.e. DAT* = fus[895:768].
+ */
+union bdk_fus_bnk_datx
+{
+ uint64_t u;
+ struct bdk_fus_bnk_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W/H) Efuse bank store. For read operations, [DAT] gets the fus bank last read. For write
+ operations, the [DAT] determines which fuses to blow. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W/H) Efuse bank store. For read operations, [DAT] gets the fus bank last read. For write
+ operations, the [DAT] determines which fuses to blow. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fus_bnk_datx_s cn; */
+};
+typedef union bdk_fus_bnk_datx bdk_fus_bnk_datx_t;
+
+static inline uint64_t BDK_FUS_BNK_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUS_BNK_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e003001520ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("FUS_BNK_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_FUS_BNK_DATX(a) bdk_fus_bnk_datx_t
+#define bustype_BDK_FUS_BNK_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_FUS_BNK_DATX(a) "FUS_BNK_DATX"
+#define device_bar_BDK_FUS_BNK_DATX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUS_BNK_DATX(a) (a)
+#define arguments_BDK_FUS_BNK_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) fus_cache#
+ *
+ * Fuse Cache Register
+ * This register returns the cached state of every fuse, organized into 64-fuse
+ * chunks. Each bit corresponds to a fuse enumerated by FUS_FUSE_NUM_E.
+ */
+union bdk_fus_cachex
+{
+ uint64_t u;
+ struct bdk_fus_cachex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO/H) Reads the cached fuse value.
+ Modifications to the cache will take effect on the next
+ chip domain reset and are lost on a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO/H) Reads the cached fuse value.
+ Modifications to the cache will take effect on the next
+ chip domain reset and are lost on a cold domain reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fus_cachex_s cn; */
+};
+typedef union bdk_fus_cachex bdk_fus_cachex_t;
+
+static inline uint64_t BDK_FUS_CACHEX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUS_CACHEX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=63))
+ return 0x87e003001000ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("FUS_CACHEX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_FUS_CACHEX(a) bdk_fus_cachex_t
+#define bustype_BDK_FUS_CACHEX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_FUS_CACHEX(a) "FUS_CACHEX"
+#define device_bar_BDK_FUS_CACHEX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUS_CACHEX(a) (a)
+#define arguments_BDK_FUS_CACHEX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) fus_const
+ *
+ * Fuse Constants Register
+ */
+union bdk_fus_const
+{
+ uint64_t u;
+ struct bdk_fus_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t repair_banks : 8; /**< [ 15: 8](RO) Number of 128-bit memory repair banks present. */
+ uint64_t fuse_banks : 8; /**< [ 7: 0](RO) Number of 128-bit general purpose fuse banks present. */
+#else /* Word 0 - Little Endian */
+ uint64_t fuse_banks : 8; /**< [ 7: 0](RO) Number of 128-bit general purpose fuse banks present. */
+ uint64_t repair_banks : 8; /**< [ 15: 8](RO) Number of 128-bit memory repair banks present. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fus_const_s cn; */
+};
+typedef union bdk_fus_const bdk_fus_const_t;
+
+#define BDK_FUS_CONST BDK_FUS_CONST_FUNC()
+static inline uint64_t BDK_FUS_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUS_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e003001578ll;
+ __bdk_csr_fatal("FUS_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_FUS_CONST bdk_fus_const_t
+#define bustype_BDK_FUS_CONST BDK_CSR_TYPE_RSL
+#define basename_BDK_FUS_CONST "FUS_CONST"
+#define device_bar_BDK_FUS_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUS_CONST 0
+#define arguments_BDK_FUS_CONST -1,-1,-1,-1
+
+/**
+ * Register (RSL) fus_prog
+ *
+ * INTERNAL: Fuse Programming Register
+ */
+union bdk_fus_prog
+{
+ uint64_t u;
+ struct bdk_fus_prog_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t efuse : 1; /**< [ 15: 15](R/W) Efuse storage. When set, the data is written directly to the efuse
+ bank. When cleared, data is soft blown to local storage.
+ A soft blown fuse is subject to lockdown fuses.
+ Soft blown fuses will become active after a chip domain reset
+ but will not persist through a cold domain reset. */
+ uint64_t voltage : 1; /**< [ 14: 14](RO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be set when
+ programming fuses (ie. [EFUSE] and [PROG] set). */
+ uint64_t reserved_13 : 1;
+ uint64_t prog : 1; /**< [ 12: 12](R/W/H) Internal:
+ When written to one by software, blow the fuse bank. Hardware will
+ clear the field when the program operation is complete.
+ To write a bank of fuses, software must write the fuse data into
+ FUS_BNK_DAT(). Then it writes the ADDR and EFUSE fields of this register
+ and sets the PROG bit. Hardware will clear the [PROG] when the write is
+ completed. New fuses will become active after a chip domain reset. */
+ uint64_t reserved_11 : 1;
+ uint64_t addr : 7; /**< [ 10: 4](R/W) Indicates which of the banks of 128 fuses to blow. Software
+ should not change this field while the FUS_PROG[PROG] bit is set. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t addr : 7; /**< [ 10: 4](R/W) Indicates which of the banks of 128 fuses to blow. Software
+ should not change this field while the FUS_PROG[PROG] bit is set. */
+ uint64_t reserved_11 : 1;
+ uint64_t prog : 1; /**< [ 12: 12](R/W/H) Internal:
+ When written to one by software, blow the fuse bank. Hardware will
+ clear the field when the program operation is complete.
+ To write a bank of fuses, software must write the fuse data into
+ FUS_BNK_DAT(). Then it writes the ADDR and EFUSE fields of this register
+ and sets the PROG bit. Hardware will clear the [PROG] when the write is
+ completed. New fuses will become active after a chip domain reset. */
+ uint64_t reserved_13 : 1;
+ uint64_t voltage : 1; /**< [ 14: 14](RO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be set when
+ programming fuses (ie. [EFUSE] and [PROG] set). */
+ uint64_t efuse : 1; /**< [ 15: 15](R/W) Efuse storage. When set, the data is written directly to the efuse
+ bank. When cleared, data is soft blown to local storage.
+ A soft blown fuse is subject to lockdown fuses.
+ Soft blown fuses will become active after a chip domain reset
+ but will not persist through a cold domain reset. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fus_prog_s cn; */
+};
+typedef union bdk_fus_prog bdk_fus_prog_t;
+
+#define BDK_FUS_PROG BDK_FUS_PROG_FUNC()
+static inline uint64_t BDK_FUS_PROG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUS_PROG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e003001510ll;
+ __bdk_csr_fatal("FUS_PROG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_FUS_PROG bdk_fus_prog_t
+#define bustype_BDK_FUS_PROG BDK_CSR_TYPE_RSL
+#define basename_BDK_FUS_PROG "FUS_PROG"
+#define device_bar_BDK_FUS_PROG 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUS_PROG 0
+#define arguments_BDK_FUS_PROG -1,-1,-1,-1
+
+/**
+ * Register (RSL) fus_rcmd
+ *
+ * Fuse Read Command Register
+ * Read Fuse Banks.
+ */
+union bdk_fus_rcmd
+{
+ uint64_t u;
+ struct bdk_fus_rcmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t efuse : 1; /**< [ 15: 15](R/W) Efuse storage. When set, the return data is from the efuse
+ bank directly. When cleared data is read from the local storage. */
+ uint64_t voltage : 1; /**< [ 14: 14](RO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be cleared when
+ reading fuses directly (ie. [EFUSE] and [PEND] set). */
+ uint64_t reserved_13 : 1;
+ uint64_t pend : 1; /**< [ 12: 12](R/W/H) Software sets this bit to one on a write operation that starts
+ the fuse read operation. Hardware clears this bit when the read
+ operation is complete and FUS_BNK_DAT() is valid.
+ FUS_READ_TIMES[RDSTB_WH] determines the time for the operation
+ to complete. */
+ uint64_t reserved_11 : 1;
+ uint64_t addr : 7; /**< [ 10: 4](R/W) Address. Specifies the bank address of 128 fuses to read.
+ Software should not change this field while [PEND]
+ is set. It must wait for the hardware to clear it. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t addr : 7; /**< [ 10: 4](R/W) Address. Specifies the bank address of 128 fuses to read.
+ Software should not change this field while [PEND]
+ is set. It must wait for the hardware to clear it. */
+ uint64_t reserved_11 : 1;
+ uint64_t pend : 1; /**< [ 12: 12](R/W/H) Software sets this bit to one on a write operation that starts
+ the fuse read operation. Hardware clears this bit when the read
+ operation is complete and FUS_BNK_DAT() is valid.
+ FUS_READ_TIMES[RDSTB_WH] determines the time for the operation
+ to complete. */
+ uint64_t reserved_13 : 1;
+ uint64_t voltage : 1; /**< [ 14: 14](RO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be cleared when
+ reading fuses directly (ie. [EFUSE] and [PEND] set). */
+ uint64_t efuse : 1; /**< [ 15: 15](R/W) Efuse storage. When set, the return data is from the efuse
+ bank directly. When cleared data is read from the local storage. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fus_rcmd_s cn; */
+};
+typedef union bdk_fus_rcmd bdk_fus_rcmd_t;
+
+#define BDK_FUS_RCMD BDK_FUS_RCMD_FUNC()
+static inline uint64_t BDK_FUS_RCMD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUS_RCMD_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e003001500ll;
+ __bdk_csr_fatal("FUS_RCMD", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_FUS_RCMD bdk_fus_rcmd_t
+#define bustype_BDK_FUS_RCMD BDK_CSR_TYPE_RSL
+#define basename_BDK_FUS_RCMD "FUS_RCMD"
+#define device_bar_BDK_FUS_RCMD 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUS_RCMD 0
+#define arguments_BDK_FUS_RCMD -1,-1,-1,-1
+
+/**
+ * Register (RSL) fus_read_times
+ *
+ * Fuse Read Times Register
+ * The reset values correspond to accesses of internal fuses with PLL reference clock
+ * up to 115 MHz. If any of the formulas below result in a value less than 0x0, the
+ * corresponding timing parameter should be set to zero.
+ *
+ * Prior to issuing a read operation to the fuse banks (via FUS_RCMD),
+ * this register should be written with the timing parameters that will be read.
+ * This register should not be written while FUS_RCMD[PEND] = 1.
+ */
+union bdk_fus_read_times
+{
+ uint64_t u;
+ struct bdk_fus_read_times_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t done : 4; /**< [ 31: 28](R/W) Hold time of CSB, PGENB, and LOAD with respect to falling edge
+ of STROBE for read and write mode in PLL_REF_CLK + 1 cycles.
+ Timing specs are th_CS = 6 ns, th_PG = 10 ns, th_LD_p = 7 ns.
+ Default of 0x0 yields 8.7 ns at 115 MHz. */
+ uint64_t ahd : 4; /**< [ 27: 24](R/W) Hold time of A with respect to falling edge of STROBE
+ for read and write modes in PLL_REF_CLK + 1 cycles.
+ Timing spec of tsu_A_r and tsu_A_p is 3 ns min.
+ Default of 0x0 yields 8.7 ns at 115MHz. */
+ uint64_t wrstb_wh : 12; /**< [ 23: 12](R/W) Pulse width high of STROBE in write mode in PLL_REF_CLK + 1 cycles.
+ Timing spec of twh_SB_p is 9.8 us max.
+ Default of 0x3E8 yields 8.7 us at 115MHz. */
+ uint64_t rdstb_wh : 4; /**< [ 11: 8](R/W) Pulse width high of STROBE in read mode in PLL_REF_CLK + 1 cycles.
+ Timing spec of twh_SB_p is 20 ns min.
+ Default of 0x2 yields 26.1 ns at 115 MHz. */
+ uint64_t asu : 4; /**< [ 7: 4](R/W) Setup time of A to rising edge of STROBE for read and write
+ modes in PLL_REF_CLK cycles.
+ Timing spec of tsu_A_r and tsu_A_p is 12 ns min.
+ Default of 0x1 yields 17.4 ns at 115 MHz. */
+ uint64_t setup : 4; /**< [ 3: 0](R/W) Setup time of CSB, PGENB, LOAD to rising edge of STROBE
+ in read and write modes in PLL_REF_CLK + 1 cycles.
+ tsu_CS = 16 ns, tsu_PG = 14 ns, tsu_LD_r = 10 ns.
+ Default of 0x0 yields 8.7 ns plus ASU cycles at 115 MHz. */
+#else /* Word 0 - Little Endian */
+ uint64_t setup : 4; /**< [ 3: 0](R/W) Setup time of CSB, PGENB, LOAD to rising edge of STROBE
+ in read and write modes in PLL_REF_CLK + 1 cycles.
+ tsu_CS = 16 ns, tsu_PG = 14 ns, tsu_LD_r = 10 ns.
+ Default of 0x0 yields 8.7 ns plus ASU cycles at 115 MHz. */
+ uint64_t asu : 4; /**< [ 7: 4](R/W) Setup time of A to rising edge of STROBE for read and write
+ modes in PLL_REF_CLK cycles.
+ Timing spec of tsu_A_r and tsu_A_p is 12 ns min.
+ Default of 0x1 yields 17.4 ns at 115 MHz. */
+ uint64_t rdstb_wh : 4; /**< [ 11: 8](R/W) Pulse width high of STROBE in read mode in PLL_REF_CLK + 1 cycles.
+ Timing spec of twh_SB_p is 20 ns min.
+ Default of 0x2 yields 26.1 ns at 115 MHz. */
+ uint64_t wrstb_wh : 12; /**< [ 23: 12](R/W) Pulse width high of STROBE in write mode in PLL_REF_CLK + 1 cycles.
+ Timing spec of twh_SB_p is 9.8 us max.
+ Default of 0x3E8 yields 8.7 us at 115MHz. */
+ uint64_t ahd : 4; /**< [ 27: 24](R/W) Hold time of A with respect to falling edge of STROBE
+ for read and write modes in PLL_REF_CLK + 1 cycles.
+ Timing spec of tsu_A_r and tsu_A_p is 3 ns min.
+ Default of 0x0 yields 8.7 ns at 115MHz. */
+ uint64_t done : 4; /**< [ 31: 28](R/W) Hold time of CSB, PGENB, and LOAD with respect to falling edge
+ of STROBE for read and write mode in PLL_REF_CLK + 1 cycles.
+ Timing specs are th_CS = 6 ns, th_PG = 10 ns, th_LD_p = 7 ns.
+ Default of 0x0 yields 8.7 ns at 115 MHz. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fus_read_times_s cn; */
+};
+typedef union bdk_fus_read_times bdk_fus_read_times_t;
+
+#define BDK_FUS_READ_TIMES BDK_FUS_READ_TIMES_FUNC()
+static inline uint64_t BDK_FUS_READ_TIMES_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUS_READ_TIMES_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e003001570ll;
+ __bdk_csr_fatal("FUS_READ_TIMES", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_FUS_READ_TIMES bdk_fus_read_times_t
+#define bustype_BDK_FUS_READ_TIMES BDK_CSR_TYPE_RSL
+#define basename_BDK_FUS_READ_TIMES "FUS_READ_TIMES"
+#define device_bar_BDK_FUS_READ_TIMES 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUS_READ_TIMES 0
+#define arguments_BDK_FUS_READ_TIMES -1,-1,-1,-1
+
+/**
+ * Register (RSL) fus_soft_repair
+ *
+ * INTERNAL: Fuse Soft Repair Register
+ *
+ * Internal:
+ * Aka `soft blow'. Upon reset fuse repairs are loaded into FUS_FUSE_NUM_E::REPAIR()
+ * fuses as they are loaded into the memories.
+ */
+union bdk_fus_soft_repair
+{
+ uint64_t u;
+ struct bdk_fus_soft_repair_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t auto_dly : 16; /**< [ 47: 32](R/W/H) Internal:
+ Autoblow Delay. Power supply ramp delay in 1uS increments from enabling
+ [AUTOBLOW] to programming first bit. */
+ uint64_t reserved_18_31 : 14;
+ uint64_t autoblow : 1; /**< [ 17: 17](R/W/H) Internal:
+ Set to initiate burning of defect fuses to fuse macro. Clears when fuses are
+ blown. */
+ uint64_t reserved_16 : 1;
+ uint64_t numdefects : 8; /**< [ 15: 8](RO/H) Internal:
+ After reset/BIST indicates the number of memory defects reported. Defects are
+ stored in fuses FUS_FUSE_NUM_E::REPAIR() from index [NUMREPAIRS]*32 to
+ ([NUMREPAIRS]*32 + [NUMDEFECTS]*32 - 1). */
+ uint64_t numrepairs : 8; /**< [ 7: 0](R/W) Internal:
+ Indicates the number of repairs loaded from repair mem to the memories on
+ the last chip/core/mcp/scp reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t numrepairs : 8; /**< [ 7: 0](R/W) Internal:
+ Indicates the number of repairs loaded from repair mem to the memories on
+ the last chip/core/mcp/scp reset. */
+ uint64_t numdefects : 8; /**< [ 15: 8](RO/H) Internal:
+ After reset/BIST indicates the number of memory defects reported. Defects are
+ stored in fuses FUS_FUSE_NUM_E::REPAIR() from index [NUMREPAIRS]*32 to
+ ([NUMREPAIRS]*32 + [NUMDEFECTS]*32 - 1). */
+ uint64_t reserved_16 : 1;
+ uint64_t autoblow : 1; /**< [ 17: 17](R/W/H) Internal:
+ Set to initiate burning of defect fuses to fuse macro. Clears when fuses are
+ blown. */
+ uint64_t reserved_18_31 : 14;
+ uint64_t auto_dly : 16; /**< [ 47: 32](R/W/H) Internal:
+ Autoblow Delay. Power supply ramp delay in 1uS increments from enabling
+ [AUTOBLOW] to programming first bit. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fus_soft_repair_s cn; */
+};
+typedef union bdk_fus_soft_repair bdk_fus_soft_repair_t;
+
+#define BDK_FUS_SOFT_REPAIR BDK_FUS_SOFT_REPAIR_FUNC()
+static inline uint64_t BDK_FUS_SOFT_REPAIR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUS_SOFT_REPAIR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e003001540ll;
+ __bdk_csr_fatal("FUS_SOFT_REPAIR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_FUS_SOFT_REPAIR bdk_fus_soft_repair_t
+#define bustype_BDK_FUS_SOFT_REPAIR BDK_CSR_TYPE_RSL
+#define basename_BDK_FUS_SOFT_REPAIR "FUS_SOFT_REPAIR"
+#define device_bar_BDK_FUS_SOFT_REPAIR 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUS_SOFT_REPAIR 0
+#define arguments_BDK_FUS_SOFT_REPAIR -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_FUS_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-fusf.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-fusf.h
new file mode 100644
index 0000000000..82584fc8de
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-fusf.h
@@ -0,0 +1,939 @@
+#ifndef __BDK_CSRS_FUSF_H__
+#define __BDK_CSRS_FUSF_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium FUSF.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration fusf_bar_e
+ *
+ * Field Fuse Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_FUSF_BAR_E_FUSF_PF_BAR0 (0x87e004000000ll)
+#define BDK_FUSF_BAR_E_FUSF_PF_BAR0_SIZE 0x10000ull
+
+/**
+ * Enumeration fusf_fuse_num_e
+ *
+ * Field Fuse Fuse Number Enumeration
+ * Enumerates the fuse numbers.
+ */
+#define BDK_FUSF_FUSE_NUM_E_CRYPT_NO_DIS (0xe)
+#define BDK_FUSF_FUSE_NUM_E_CRYPT_SSK_DIS (0xf)
+#define BDK_FUSF_FUSE_NUM_E_DIS_HUK (0xd)
+#define BDK_FUSF_FUSE_NUM_E_EKX(a) (0x500 + (a))
+#define BDK_FUSF_FUSE_NUM_E_FJ_CORE0 (0xc)
+#define BDK_FUSF_FUSE_NUM_E_FJ_DIS (9)
+#define BDK_FUSF_FUSE_NUM_E_FJ_TIMEOUTX(a) (0xa + (a))
+#define BDK_FUSF_FUSE_NUM_E_FUSF_LCK (0)
+#define BDK_FUSF_FUSE_NUM_E_HUKX(a) (0x480 + (a))
+#define BDK_FUSF_FUSE_NUM_E_MFG_LCK (6)
+#define BDK_FUSF_FUSE_NUM_E_ROM_SCRIPT_DISABLE (0x1e)
+#define BDK_FUSF_FUSE_NUM_E_ROM_T_CNTX(a) (0x20 + (a))
+#define BDK_FUSF_FUSE_NUM_E_ROTPKX(a) (0x300 + (a))
+#define BDK_FUSF_FUSE_NUM_E_ROT_LCK (2)
+#define BDK_FUSF_FUSE_NUM_E_RSVD128X(a) (0x80 + (a))
+#define BDK_FUSF_FUSE_NUM_E_RSVD16X(a) (0x10 + (a))
+#define BDK_FUSF_FUSE_NUM_E_RSVD256X(a) (0x100 + (a))
+#define BDK_FUSF_FUSE_NUM_E_RSVD4X(a) (4 + (a))
+#define BDK_FUSF_FUSE_NUM_E_RSVD5 (5)
+#define BDK_FUSF_FUSE_NUM_E_RSVD512X(a) (0x200 + (a))
+#define BDK_FUSF_FUSE_NUM_E_RSVD64X(a) (0x40 + (a))
+#define BDK_FUSF_FUSE_NUM_E_RSVD7 (7)
+#define BDK_FUSF_FUSE_NUM_E_SPI_SAFEMODE (0x1f)
+#define BDK_FUSF_FUSE_NUM_E_SSKX(a) (0x400 + (a))
+#define BDK_FUSF_FUSE_NUM_E_SSK_LCK (1)
+#define BDK_FUSF_FUSE_NUM_E_SWX(a) (0x600 + (a))
+#define BDK_FUSF_FUSE_NUM_E_SW_LCK (3)
+#define BDK_FUSF_FUSE_NUM_E_TRUST_DIS (4)
+#define BDK_FUSF_FUSE_NUM_E_TRUST_LVL_CHK (7)
+#define BDK_FUSF_FUSE_NUM_E_TZ_FORCE2 (8)
+
+/**
+ * Register (RSL) fusf_bnk_dat#
+ *
+ * Field Fuse Bank Store Register
+ * The initial state of FUSF_BNK_DAT() is as if bank15 were just read.
+ * i.e. DAT* = fus[2047:1920].
+ */
+union bdk_fusf_bnk_datx
+{
+ uint64_t u;
+ struct bdk_fusf_bnk_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SR/W/H) Efuse bank store. For read operations, the DAT gets the fus bank last read. For write
+ operations, the DAT determines which fuses to blow. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SR/W/H) Efuse bank store. For read operations, the DAT gets the fus bank last read. For write
+ operations, the DAT determines which fuses to blow. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_bnk_datx_s cn8; */
+ struct bdk_fusf_bnk_datx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SR/W/H) Efuse bank store. For read operations, [DAT] gets the fus bank last read. For write
+ operations, [DAT] determines which fuses to blow. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SR/W/H) Efuse bank store. For read operations, [DAT] gets the fus bank last read. For write
+ operations, [DAT] determines which fuses to blow. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_fusf_bnk_datx bdk_fusf_bnk_datx_t;
+
+static inline uint64_t BDK_FUSF_BNK_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_BNK_DATX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e004000120ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("FUSF_BNK_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_BNK_DATX(a) bdk_fusf_bnk_datx_t
+#define bustype_BDK_FUSF_BNK_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_BNK_DATX(a) "FUSF_BNK_DATX"
+#define device_bar_BDK_FUSF_BNK_DATX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_BNK_DATX(a) (a)
+#define arguments_BDK_FUSF_BNK_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) fusf_const
+ *
+ * Field Fuse Constants Register
+ */
+union bdk_fusf_const
+{
+ uint64_t u;
+ struct bdk_fusf_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t fuse_banks : 8; /**< [ 7: 0](SRO) Number of 128-bit field fuse banks present. */
+#else /* Word 0 - Little Endian */
+ uint64_t fuse_banks : 8; /**< [ 7: 0](SRO) Number of 128-bit field fuse banks present. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_const_s cn; */
+};
+typedef union bdk_fusf_const bdk_fusf_const_t;
+
+#define BDK_FUSF_CONST BDK_FUSF_CONST_FUNC()
+static inline uint64_t BDK_FUSF_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e004000130ll;
+ __bdk_csr_fatal("FUSF_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_CONST bdk_fusf_const_t
+#define bustype_BDK_FUSF_CONST BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_CONST "FUSF_CONST"
+#define device_bar_BDK_FUSF_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_CONST 0
+#define arguments_BDK_FUSF_CONST -1,-1,-1,-1
+
+/**
+ * Register (RSL) fusf_ctl
+ *
+ * Field Fuse Control Register
+ */
+union bdk_fusf_ctl
+{
+ uint64_t u;
+ struct bdk_fusf_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rom_t_cnt : 32; /**< [ 63: 32](SRO) ROM trusted counter. Reads field fuses FUSF_FUSE_NUM_E::ROM_T_CNT(). */
+ uint64_t spi_safemode : 1; /**< [ 31: 31](SRO) Reserved.
+ Internal:
+ SPI safemode. Reads field fuses FUSF_FUSE_NUM_E::SPI_SAFEMODE. */
+ uint64_t rom_script_disable : 1; /**< [ 30: 30](SRO) ROM script disable. Reads field fuses FUSF_FUSE_NUM_E::ROM_SCRIPT_DISABLE. */
+ uint64_t fuse16 : 14; /**< [ 29: 16](SRO) Reserved. */
+ uint64_t crypt_ssk_dis : 1; /**< [ 15: 15](SRO) SSK crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_SSK_DIS. */
+ uint64_t crypt_no_dis : 1; /**< [ 14: 14](SRO) No-crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_NO_DIS. */
+ uint64_t fj_dis_huk : 1; /**< [ 13: 13](SRO) Flash-jump HUK secret hiding. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS_HUK. */
+ uint64_t fj_core0 : 1; /**< [ 12: 12](SRO) Flash-jump core 0 only. Reads field fuse FUSF_FUSE_NUM_E::FJ_CORE0. */
+ uint64_t fj_timeout : 2; /**< [ 11: 10](SRO) Flash-jump timeout. Reads field fuse FUSF_FUSE_NUM_E::FJ_TIMEOUT(). */
+ uint64_t fj_dis : 1; /**< [ 9: 9](SRO) Flash-jump disable. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS. */
+ uint64_t tz_force2 : 1; /**< [ 8: 8](SRO) Disable Trustzone. Reads field fuse FUSF_FUSE_NUM_E::TRUST_DIS. */
+ uint64_t trust_lvl_chk : 1; /**< [ 7: 7](SRO) Trust level check. Reads field fuse FUSF_FUSE_NUM_E::TRUST_LVL_CHK. */
+ uint64_t mfg_lck : 1; /**< [ 6: 6](SRO) Manufacturing lock. Reads field fuse FUSF_FUSE_NUM_E::MFG_LCK. */
+ uint64_t fuse5 : 1; /**< [ 5: 5](SRO) Unallocated fuse. */
+ uint64_t trust_dis : 1; /**< [ 4: 4](SRO) Disable Trustzone. Reads field fuse FUSF_FUSE_NUM_E::TRUST_DIS. */
+ uint64_t sw_lck : 1; /**< [ 3: 3](SRO) Software fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SW_LCK. */
+ uint64_t rot_lck : 1; /**< [ 2: 2](SRO) Root-of-trust fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::ROT_LCK. */
+ uint64_t ssk_lck : 1; /**< [ 1: 1](SRO) Secret symmetric key fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SSK_LCK. */
+ uint64_t fusf_lck : 1; /**< [ 0: 0](SRO) Total field fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::FUSF_LCK. */
+#else /* Word 0 - Little Endian */
+ uint64_t fusf_lck : 1; /**< [ 0: 0](SRO) Total field fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::FUSF_LCK. */
+ uint64_t ssk_lck : 1; /**< [ 1: 1](SRO) Secret symmetric key fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SSK_LCK. */
+ uint64_t rot_lck : 1; /**< [ 2: 2](SRO) Root-of-trust fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::ROT_LCK. */
+ uint64_t sw_lck : 1; /**< [ 3: 3](SRO) Software fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SW_LCK. */
+ uint64_t trust_dis : 1; /**< [ 4: 4](SRO) Disable Trustzone. Reads field fuse FUSF_FUSE_NUM_E::TRUST_DIS. */
+ uint64_t fuse5 : 1; /**< [ 5: 5](SRO) Unallocated fuse. */
+ uint64_t mfg_lck : 1; /**< [ 6: 6](SRO) Manufacturing lock. Reads field fuse FUSF_FUSE_NUM_E::MFG_LCK. */
+ uint64_t trust_lvl_chk : 1; /**< [ 7: 7](SRO) Trust level check. Reads field fuse FUSF_FUSE_NUM_E::TRUST_LVL_CHK. */
+ uint64_t tz_force2 : 1; /**< [ 8: 8](SRO) Disable Trustzone. Reads field fuse FUSF_FUSE_NUM_E::TRUST_DIS. */
+ uint64_t fj_dis : 1; /**< [ 9: 9](SRO) Flash-jump disable. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS. */
+ uint64_t fj_timeout : 2; /**< [ 11: 10](SRO) Flash-jump timeout. Reads field fuse FUSF_FUSE_NUM_E::FJ_TIMEOUT(). */
+ uint64_t fj_core0 : 1; /**< [ 12: 12](SRO) Flash-jump core 0 only. Reads field fuse FUSF_FUSE_NUM_E::FJ_CORE0. */
+ uint64_t fj_dis_huk : 1; /**< [ 13: 13](SRO) Flash-jump HUK secret hiding. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS_HUK. */
+ uint64_t crypt_no_dis : 1; /**< [ 14: 14](SRO) No-crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_NO_DIS. */
+ uint64_t crypt_ssk_dis : 1; /**< [ 15: 15](SRO) SSK crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_SSK_DIS. */
+ uint64_t fuse16 : 14; /**< [ 29: 16](SRO) Reserved. */
+ uint64_t rom_script_disable : 1; /**< [ 30: 30](SRO) ROM script disable. Reads field fuses FUSF_FUSE_NUM_E::ROM_SCRIPT_DISABLE. */
+ uint64_t spi_safemode : 1; /**< [ 31: 31](SRO) Reserved.
+ Internal:
+ SPI safemode. Reads field fuses FUSF_FUSE_NUM_E::SPI_SAFEMODE. */
+ uint64_t rom_t_cnt : 32; /**< [ 63: 32](SRO) ROM trusted counter. Reads field fuses FUSF_FUSE_NUM_E::ROM_T_CNT(). */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_fusf_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rom_t_cnt : 32; /**< [ 63: 32](SRO) ROM trusted counter. Reads field fuses FUSF_FUSE_NUM_E::ROM_T_CNT(). */
+ uint64_t reserved_16_31 : 16;
+ uint64_t crypt_ssk_dis : 1; /**< [ 15: 15](SRO) SSK crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_SSK_DIS. */
+ uint64_t crypt_no_dis : 1; /**< [ 14: 14](SRO) No-crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_NO_DIS. */
+ uint64_t fj_dis_huk : 1; /**< [ 13: 13](SRO) Flash-jump HUK secret hiding. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS_HUK. */
+ uint64_t fj_core0 : 1; /**< [ 12: 12](SRO) Flash-jump core 0 only. Reads field fuse FUSF_FUSE_NUM_E::FJ_CORE0. */
+ uint64_t fj_timeout : 2; /**< [ 11: 10](SRO) Flash-jump timeout. Reads field fuse FUSF_FUSE_NUM_E::FJ_TIMEOUT(). */
+ uint64_t fj_dis : 1; /**< [ 9: 9](SRO) Flash-jump disable. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS. */
+ uint64_t tz_force2 : 1; /**< [ 8: 8](SRO) Disable Trustzone. Reads field fuse FUSF_FUSE_NUM_E::TRUST_DIS. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t sw_lck : 1; /**< [ 3: 3](SRO) Software fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SW_LCK. */
+ uint64_t rot_lck : 1; /**< [ 2: 2](SRO) Root-of-trust fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::ROT_LCK. */
+ uint64_t ssk_lck : 1; /**< [ 1: 1](SRO) Secret symmetric key fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SSK_LCK. */
+ uint64_t fusf_lck : 1; /**< [ 0: 0](SRO) Total field fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::FUSF_LCK. */
+#else /* Word 0 - Little Endian */
+ uint64_t fusf_lck : 1; /**< [ 0: 0](SRO) Total field fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::FUSF_LCK. */
+ uint64_t ssk_lck : 1; /**< [ 1: 1](SRO) Secret symmetric key fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SSK_LCK. */
+ uint64_t rot_lck : 1; /**< [ 2: 2](SRO) Root-of-trust fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::ROT_LCK. */
+ uint64_t sw_lck : 1; /**< [ 3: 3](SRO) Software fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SW_LCK. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tz_force2 : 1; /**< [ 8: 8](SRO) Disable Trustzone. Reads field fuse FUSF_FUSE_NUM_E::TRUST_DIS. */
+ uint64_t fj_dis : 1; /**< [ 9: 9](SRO) Flash-jump disable. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS. */
+ uint64_t fj_timeout : 2; /**< [ 11: 10](SRO) Flash-jump timeout. Reads field fuse FUSF_FUSE_NUM_E::FJ_TIMEOUT(). */
+ uint64_t fj_core0 : 1; /**< [ 12: 12](SRO) Flash-jump core 0 only. Reads field fuse FUSF_FUSE_NUM_E::FJ_CORE0. */
+ uint64_t fj_dis_huk : 1; /**< [ 13: 13](SRO) Flash-jump HUK secret hiding. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS_HUK. */
+ uint64_t crypt_no_dis : 1; /**< [ 14: 14](SRO) No-crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_NO_DIS. */
+ uint64_t crypt_ssk_dis : 1; /**< [ 15: 15](SRO) SSK crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_SSK_DIS. */
+ uint64_t reserved_16_31 : 16;
+ uint64_t rom_t_cnt : 32; /**< [ 63: 32](SRO) ROM trusted counter. Reads field fuses FUSF_FUSE_NUM_E::ROM_T_CNT(). */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_fusf_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rom_t_cnt : 32; /**< [ 63: 32](SRO) ROM trusted counter. Reads field fuses FUSF_FUSE_NUM_E::ROM_T_CNT(). */
+ uint64_t spi_safemode : 1; /**< [ 31: 31](SRO) Reserved.
+ Internal:
+ SPI safemode. Reads field fuses FUSF_FUSE_NUM_E::SPI_SAFEMODE. */
+ uint64_t rom_script_disable : 1; /**< [ 30: 30](SRO) ROM script disable. Reads field fuses FUSF_FUSE_NUM_E::ROM_SCRIPT_DISABLE. */
+ uint64_t fuse16 : 14; /**< [ 29: 16](SRO) Reserved. */
+ uint64_t crypt_ssk_dis : 1; /**< [ 15: 15](SRO) SSK crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_SSK_DIS. */
+ uint64_t crypt_no_dis : 1; /**< [ 14: 14](SRO) No-crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_NO_DIS. */
+ uint64_t fj_dis_huk : 1; /**< [ 13: 13](SRO) Flash-jump HUK secret hiding. Reads field fuse FUSF_FUSE_NUM_E::DIS_HUK. */
+ uint64_t fj_core0 : 1; /**< [ 12: 12](SRO) Flash-jump core 0 only. Reads field fuse FUSF_FUSE_NUM_E::FJ_CORE0. */
+ uint64_t fj_timeout : 2; /**< [ 11: 10](SRO) Flash-jump timeout. Reads field fuse FUSF_FUSE_NUM_E::FJ_TIMEOUT(). */
+ uint64_t fj_dis : 1; /**< [ 9: 9](SRO) Flash-jump disable. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS. */
+ uint64_t tz_force2 : 1; /**< [ 8: 8](SRO) Trusted mode force override. Reads field fuse FUSF_FUSE_NUM_E::TZ_FORCE2. */
+ uint64_t trust_lvl_chk : 1; /**< [ 7: 7](SRO) Trust level check. Reads field fuse FUSF_FUSE_NUM_E::TRUST_LVL_CHK. */
+ uint64_t mfg_lck : 1; /**< [ 6: 6](SRO) Manufacturing lock. Reads field fuse FUSF_FUSE_NUM_E::MFG_LCK. */
+ uint64_t fuse5 : 1; /**< [ 5: 5](SRO) Unallocated fuse. */
+ uint64_t trust_dis : 1; /**< [ 4: 4](SRO) Disable Trustzone. Reads field fuse FUSF_FUSE_NUM_E::TRUST_DIS. */
+ uint64_t sw_lck : 1; /**< [ 3: 3](SRO) Software fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SW_LCK. */
+ uint64_t rot_lck : 1; /**< [ 2: 2](SRO) Root-of-trust fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::ROT_LCK. */
+ uint64_t ssk_lck : 1; /**< [ 1: 1](SRO) Secret symmetric key fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SSK_LCK. */
+ uint64_t fusf_lck : 1; /**< [ 0: 0](SRO) Total field fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::FUSF_LCK. */
+#else /* Word 0 - Little Endian */
+ uint64_t fusf_lck : 1; /**< [ 0: 0](SRO) Total field fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::FUSF_LCK. */
+ uint64_t ssk_lck : 1; /**< [ 1: 1](SRO) Secret symmetric key fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SSK_LCK. */
+ uint64_t rot_lck : 1; /**< [ 2: 2](SRO) Root-of-trust fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::ROT_LCK. */
+ uint64_t sw_lck : 1; /**< [ 3: 3](SRO) Software fuse lockdown. Reads field fuse FUSF_FUSE_NUM_E::SW_LCK. */
+ uint64_t trust_dis : 1; /**< [ 4: 4](SRO) Disable Trustzone. Reads field fuse FUSF_FUSE_NUM_E::TRUST_DIS. */
+ uint64_t fuse5 : 1; /**< [ 5: 5](SRO) Unallocated fuse. */
+ uint64_t mfg_lck : 1; /**< [ 6: 6](SRO) Manufacturing lock. Reads field fuse FUSF_FUSE_NUM_E::MFG_LCK. */
+ uint64_t trust_lvl_chk : 1; /**< [ 7: 7](SRO) Trust level check. Reads field fuse FUSF_FUSE_NUM_E::TRUST_LVL_CHK. */
+ uint64_t tz_force2 : 1; /**< [ 8: 8](SRO) Trusted mode force override. Reads field fuse FUSF_FUSE_NUM_E::TZ_FORCE2. */
+ uint64_t fj_dis : 1; /**< [ 9: 9](SRO) Flash-jump disable. Reads field fuse FUSF_FUSE_NUM_E::FJ_DIS. */
+ uint64_t fj_timeout : 2; /**< [ 11: 10](SRO) Flash-jump timeout. Reads field fuse FUSF_FUSE_NUM_E::FJ_TIMEOUT(). */
+ uint64_t fj_core0 : 1; /**< [ 12: 12](SRO) Flash-jump core 0 only. Reads field fuse FUSF_FUSE_NUM_E::FJ_CORE0. */
+ uint64_t fj_dis_huk : 1; /**< [ 13: 13](SRO) Flash-jump HUK secret hiding. Reads field fuse FUSF_FUSE_NUM_E::DIS_HUK. */
+ uint64_t crypt_no_dis : 1; /**< [ 14: 14](SRO) No-crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_NO_DIS. */
+ uint64_t crypt_ssk_dis : 1; /**< [ 15: 15](SRO) SSK crypt disable. Reads field fuse FUSF_FUSE_NUM_E::CRYPT_SSK_DIS. */
+ uint64_t fuse16 : 14; /**< [ 29: 16](SRO) Reserved. */
+ uint64_t rom_script_disable : 1; /**< [ 30: 30](SRO) ROM script disable. Reads field fuses FUSF_FUSE_NUM_E::ROM_SCRIPT_DISABLE. */
+ uint64_t spi_safemode : 1; /**< [ 31: 31](SRO) Reserved.
+ Internal:
+ SPI safemode. Reads field fuses FUSF_FUSE_NUM_E::SPI_SAFEMODE. */
+ uint64_t rom_t_cnt : 32; /**< [ 63: 32](SRO) ROM trusted counter. Reads field fuses FUSF_FUSE_NUM_E::ROM_T_CNT(). */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_fusf_ctl bdk_fusf_ctl_t;
+
+#define BDK_FUSF_CTL BDK_FUSF_CTL_FUNC()
+static inline uint64_t BDK_FUSF_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_CTL_FUNC(void)
+{
+ return 0x87e004000000ll;
+}
+
+#define typedef_BDK_FUSF_CTL bdk_fusf_ctl_t
+#define bustype_BDK_FUSF_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_CTL "FUSF_CTL"
+#define device_bar_BDK_FUSF_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_CTL 0
+#define arguments_BDK_FUSF_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) fusf_eco
+ *
+ * INTERNAL: FUSF ECO Register
+ */
+union bdk_fusf_eco
+{
+ uint64_t u;
+ struct bdk_fusf_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) ECO flops.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) ECO flops.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_eco_s cn; */
+};
+typedef union bdk_fusf_eco bdk_fusf_eco_t;
+
+#define BDK_FUSF_ECO BDK_FUSF_ECO_FUNC()
+static inline uint64_t BDK_FUSF_ECO_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_ECO_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e004000118ll;
+ __bdk_csr_fatal("FUSF_ECO", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_ECO bdk_fusf_eco_t
+#define bustype_BDK_FUSF_ECO BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_ECO "FUSF_ECO"
+#define device_bar_BDK_FUSF_ECO 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_ECO 0
+#define arguments_BDK_FUSF_ECO -1,-1,-1,-1
+
+/**
+ * Register (RSL) fusf_ek#
+ *
+ * Field Fuse ECC Private Endorsement Key Registers
+ */
+union bdk_fusf_ekx
+{
+ uint64_t u;
+ struct bdk_fusf_ekx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) ECC private endorsement key. Reads field fuses FUSF_FUSE_NUM_E::EK(). */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) ECC private endorsement key. Reads field fuses FUSF_FUSE_NUM_E::EK(). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_ekx_s cn; */
+};
+typedef union bdk_fusf_ekx bdk_fusf_ekx_t;
+
+static inline uint64_t BDK_FUSF_EKX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_EKX(unsigned long a)
+{
+ if (a<=3)
+ return 0x87e0040000a0ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("FUSF_EKX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_EKX(a) bdk_fusf_ekx_t
+#define bustype_BDK_FUSF_EKX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_EKX(a) "FUSF_EKX"
+#define device_bar_BDK_FUSF_EKX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_EKX(a) (a)
+#define arguments_BDK_FUSF_EKX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) fusf_huk#
+ *
+ * Field Fuse Hardware Unique Key Registers
+ */
+union bdk_fusf_hukx
+{
+ uint64_t u;
+ struct bdk_fusf_hukx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) Hardware unique key (HUK). Reads field fuses FUSF_FUSE_NUM_E::HUK(). */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) Hardware unique key (HUK). Reads field fuses FUSF_FUSE_NUM_E::HUK(). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_hukx_s cn; */
+};
+typedef union bdk_fusf_hukx bdk_fusf_hukx_t;
+
+static inline uint64_t BDK_FUSF_HUKX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_HUKX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e004000090ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("FUSF_HUKX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_HUKX(a) bdk_fusf_hukx_t
+#define bustype_BDK_FUSF_HUKX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_HUKX(a) "FUSF_HUKX"
+#define device_bar_BDK_FUSF_HUKX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_HUKX(a) (a)
+#define arguments_BDK_FUSF_HUKX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) fusf_prog
+ *
+ * Field Fuse Programming Register
+ */
+union bdk_fusf_prog
+{
+ uint64_t u;
+ struct bdk_fusf_prog_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t efuse : 1; /**< [ 15: 15](SR/W) Efuse storage. When set, the data is written directly to the efuse
+ bank. When cleared, data is soft blown to local storage.
+ A soft blown fuse is subject to lockdown fuses.
+ Soft blown fuses will become active after a chip domain reset
+ but will not persist through a cold domain reset. */
+ uint64_t voltage : 1; /**< [ 14: 14](SRO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be set when
+ programming fuses (ie. [EFUSE] and [PROG] set). */
+ uint64_t reserved_8_13 : 6;
+ uint64_t addr : 4; /**< [ 7: 4](SR/W) Indicates which of bank of 128 fuses to blow. Software
+ should not change this field while the FUSF_PROG[PROG] bit is set. */
+ uint64_t reserved_3 : 1;
+ uint64_t prog_pin : 1; /**< [ 2: 2](SRO) Efuse program voltage (EFUS_PROG) is applied.
+ Internal:
+ Indicates state of pi_efuse_pgm_ext not pi_efuse_pgm_int. */
+ uint64_t sft : 1; /**< [ 1: 1](SR/W/H) When set with [PROG], causes only the local storage to change and will not blow
+ any fuses. Hardware will clear when the program operation is complete. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t sft : 1; /**< [ 1: 1](SR/W/H) When set with [PROG], causes only the local storage to change and will not blow
+ any fuses. Hardware will clear when the program operation is complete. */
+ uint64_t prog_pin : 1; /**< [ 2: 2](SRO) Efuse program voltage (EFUS_PROG) is applied.
+ Internal:
+ Indicates state of pi_efuse_pgm_ext not pi_efuse_pgm_int. */
+ uint64_t reserved_3 : 1;
+ uint64_t addr : 4; /**< [ 7: 4](SR/W) Indicates which of bank of 128 fuses to blow. Software
+ should not change this field while the FUSF_PROG[PROG] bit is set. */
+ uint64_t reserved_8_13 : 6;
+ uint64_t voltage : 1; /**< [ 14: 14](SRO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be set when
+ programming fuses (ie. [EFUSE] and [PROG] set). */
+ uint64_t efuse : 1; /**< [ 15: 15](SR/W) Efuse storage. When set, the data is written directly to the efuse
+ bank. When cleared, data is soft blown to local storage.
+ A soft blown fuse is subject to lockdown fuses.
+ Soft blown fuses will become active after a chip domain reset
+ but will not persist through a cold domain reset. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_fusf_prog_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t volt_en : 1; /**< [ 3: 3](SWO) Enable programming voltage. Asserts EFUSE_ENABLE_L opep-drain output pin. */
+ uint64_t prog_pin : 1; /**< [ 2: 2](SRO) Efuse program voltage (EFUS_PROG) is applied.
+ Internal:
+ Indicates state of pi_efuse_pgm_ext not pi_efuse_pgm_int. */
+ uint64_t sft : 1; /**< [ 1: 1](SR/W/H) When set with [PROG], causes only the local storage to change and will not blow
+ any fuses. Hardware will clear when the program operation is complete. */
+ uint64_t prog : 1; /**< [ 0: 0](SR/W/H) When written to 1 by software, blow the fuse bank. Hardware clears this bit when
+ the program operation is complete.
+
+ To write a bank of fuses, software must set FUSF_WADR[ADDR] to the bank to be
+ programmed and then set each bit within FUSF_BNK_DATX to indicate which fuses to blow.
+
+ Once FUSF_WADR[ADDR], and DAT are setup, software can write to FUSF_PROG[PROG]
+ to start the bank write and poll on [PROG]. Once PROG is clear, the bank write
+ is complete. MIO_FUS_READ_TIMES[WRSTB_WH] set the time for the hardware to
+ clear this bit. A soft blow is still subject to lockdown fuses. After a
+ soft/warm reset, the chip behaves as though the fuses were actually blown. A
+ cold reset restores the actual fuse values. */
+#else /* Word 0 - Little Endian */
+ uint64_t prog : 1; /**< [ 0: 0](SR/W/H) When written to 1 by software, blow the fuse bank. Hardware clears this bit when
+ the program operation is complete.
+
+ To write a bank of fuses, software must set FUSF_WADR[ADDR] to the bank to be
+ programmed and then set each bit within FUSF_BNK_DATX to indicate which fuses to blow.
+
+ Once FUSF_WADR[ADDR], and DAT are setup, software can write to FUSF_PROG[PROG]
+ to start the bank write and poll on [PROG]. Once PROG is clear, the bank write
+ is complete. MIO_FUS_READ_TIMES[WRSTB_WH] set the time for the hardware to
+ clear this bit. A soft blow is still subject to lockdown fuses. After a
+ soft/warm reset, the chip behaves as though the fuses were actually blown. A
+ cold reset restores the actual fuse values. */
+ uint64_t sft : 1; /**< [ 1: 1](SR/W/H) When set with [PROG], causes only the local storage to change and will not blow
+ any fuses. Hardware will clear when the program operation is complete. */
+ uint64_t prog_pin : 1; /**< [ 2: 2](SRO) Efuse program voltage (EFUS_PROG) is applied.
+ Internal:
+ Indicates state of pi_efuse_pgm_ext not pi_efuse_pgm_int. */
+ uint64_t volt_en : 1; /**< [ 3: 3](SWO) Enable programming voltage. Asserts EFUSE_ENABLE_L opep-drain output pin. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_fusf_prog_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t efuse : 1; /**< [ 15: 15](SR/W) Efuse storage. When set, the data is written directly to the efuse
+ bank. When cleared, data is soft blown to local storage.
+ A soft blown fuse is subject to lockdown fuses.
+ Soft blown fuses will become active after a chip domain reset
+ but will not persist through a cold domain reset. */
+ uint64_t voltage : 1; /**< [ 14: 14](SRO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be set when
+ programming fuses (ie. [EFUSE] and [PROG] set). */
+ uint64_t volt_en : 1; /**< [ 13: 13](SR/W) Enable programming voltage. Asserts EFUSE_ENABLE_L open-drain output pin. */
+ uint64_t prog : 1; /**< [ 12: 12](SR/W/H) Internal:
+ When written to one by software, blow the fuse bank. Hardware will
+ clear the field when the program operation is complete.
+ To write a bank of fuses, software must write the fuse data into
+ FUSF_BNK_DAT(). Then it writes the ADDR and EFUSE fields of this register
+ and sets the PROG bit. Hardware will clear the [PROG] when the write is
+ completed. New fuses will become active after a chip domain reset. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t addr : 4; /**< [ 7: 4](SR/W) Indicates which of bank of 128 fuses to blow. Software
+ should not change this field while the FUSF_PROG[PROG] bit is set. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t addr : 4; /**< [ 7: 4](SR/W) Indicates which of bank of 128 fuses to blow. Software
+ should not change this field while the FUSF_PROG[PROG] bit is set. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t prog : 1; /**< [ 12: 12](SR/W/H) Internal:
+ When written to one by software, blow the fuse bank. Hardware will
+ clear the field when the program operation is complete.
+ To write a bank of fuses, software must write the fuse data into
+ FUSF_BNK_DAT(). Then it writes the ADDR and EFUSE fields of this register
+ and sets the PROG bit. Hardware will clear the [PROG] when the write is
+ completed. New fuses will become active after a chip domain reset. */
+ uint64_t volt_en : 1; /**< [ 13: 13](SR/W) Enable programming voltage. Asserts EFUSE_ENABLE_L open-drain output pin. */
+ uint64_t voltage : 1; /**< [ 14: 14](SRO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be set when
+ programming fuses (ie. [EFUSE] and [PROG] set). */
+ uint64_t efuse : 1; /**< [ 15: 15](SR/W) Efuse storage. When set, the data is written directly to the efuse
+ bank. When cleared, data is soft blown to local storage.
+ A soft blown fuse is subject to lockdown fuses.
+ Soft blown fuses will become active after a chip domain reset
+ but will not persist through a cold domain reset. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_fusf_prog_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t volt_en : 1; /**< [ 3: 3](SR/W) Enable programming voltage. Asserts EFUSE_ENABLE_L open-drain output pin. */
+ uint64_t prog_pin : 1; /**< [ 2: 2](SRO) Efuse program voltage (EFUS_PROG) is applied.
+ Internal:
+ Indicates state of pi_efuse_pgm_ext not pi_efuse_pgm_int. */
+ uint64_t sft : 1; /**< [ 1: 1](SR/W/H) When set with [PROG], causes only the local storage to change and will not blow
+ any fuses. Hardware will clear when the program operation is complete. */
+ uint64_t prog : 1; /**< [ 0: 0](SR/W/H) When written to 1 by software, blow the fuse bank. Hardware clears this bit when
+ the program operation is complete.
+
+ To write a bank of fuses, software must set FUSF_WADR[ADDR] to the bank to be
+ programmed and then set each bit within FUSF_BNK_DATX to indicate which fuses to blow.
+
+ Once FUSF_WADR[ADDR], and DAT are setup, software can write to FUSF_PROG[PROG]
+ to start the bank write and poll on [PROG]. Once PROG is clear, the bank write
+ is complete. MIO_FUS_READ_TIMES[WRSTB_WH] set the time for the hardware to
+ clear this bit. A soft blow is still subject to lockdown fuses. After a
+ soft/warm reset, the chip behaves as though the fuses were actually blown. A
+ cold reset restores the actual fuse values. */
+#else /* Word 0 - Little Endian */
+ uint64_t prog : 1; /**< [ 0: 0](SR/W/H) When written to 1 by software, blow the fuse bank. Hardware clears this bit when
+ the program operation is complete.
+
+ To write a bank of fuses, software must set FUSF_WADR[ADDR] to the bank to be
+ programmed and then set each bit within FUSF_BNK_DATX to indicate which fuses to blow.
+
+ Once FUSF_WADR[ADDR], and DAT are setup, software can write to FUSF_PROG[PROG]
+ to start the bank write and poll on [PROG]. Once PROG is clear, the bank write
+ is complete. MIO_FUS_READ_TIMES[WRSTB_WH] set the time for the hardware to
+ clear this bit. A soft blow is still subject to lockdown fuses. After a
+ soft/warm reset, the chip behaves as though the fuses were actually blown. A
+ cold reset restores the actual fuse values. */
+ uint64_t sft : 1; /**< [ 1: 1](SR/W/H) When set with [PROG], causes only the local storage to change and will not blow
+ any fuses. Hardware will clear when the program operation is complete. */
+ uint64_t prog_pin : 1; /**< [ 2: 2](SRO) Efuse program voltage (EFUS_PROG) is applied.
+ Internal:
+ Indicates state of pi_efuse_pgm_ext not pi_efuse_pgm_int. */
+ uint64_t volt_en : 1; /**< [ 3: 3](SR/W) Enable programming voltage. Asserts EFUSE_ENABLE_L open-drain output pin. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_fusf_prog_cn81xx cn83xx; */
+ /* struct bdk_fusf_prog_cn81xx cn88xxp2; */
+};
+typedef union bdk_fusf_prog bdk_fusf_prog_t;
+
+#define BDK_FUSF_PROG BDK_FUSF_PROG_FUNC()
+static inline uint64_t BDK_FUSF_PROG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_PROG_FUNC(void)
+{
+ return 0x87e004000110ll;
+}
+
+#define typedef_BDK_FUSF_PROG bdk_fusf_prog_t
+#define bustype_BDK_FUSF_PROG BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_PROG "FUSF_PROG"
+#define device_bar_BDK_FUSF_PROG 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_PROG 0
+#define arguments_BDK_FUSF_PROG -1,-1,-1,-1
+
+/**
+ * Register (RSL) fusf_rcmd
+ *
+ * Field Fuse Read Command Register
+ * To read an efuse, software writes FUSF_RCMD[ADDR, PEND] with the byte address of
+ * the fuse in
+ * question, then software can poll FUSF_RCMD[PEND]. When [PEND] is clear and if the efuse read
+ * went to the efuse banks (e.g. EFUSE was set on the read), software can read FUSF_BNK_DATx
+ * which contains all 128 fuses in the bank associated in ADDR.
+ */
+union bdk_fusf_rcmd
+{
+ uint64_t u;
+ struct bdk_fusf_rcmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t voltage : 1; /**< [ 14: 14](SRO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be cleared when
+ reading fuses directly (ie. [EFUSE] and [PEND] set). */
+ uint64_t reserved_13 : 1;
+ uint64_t pend : 1; /**< [ 12: 12](SR/W/H) Software sets this bit on a write to start FUSE read operation.
+ Hardware clears the bit when the read is complete and the DAT is
+ valid. MIO_FUS_READ_TIMES[RDSTB_WH] determines the time for this
+ operation. */
+ uint64_t reserved_11 : 1;
+ uint64_t addr_hi : 2; /**< [ 10: 9](SR/W) Upper fuse address bits to extend space beyond 2k fuses. Valid range is
+ 0x0. Enumerated by FUSF_FUSE_NUM_E\<9:8\>. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t addr_hi : 2; /**< [ 10: 9](SR/W) Upper fuse address bits to extend space beyond 2k fuses. Valid range is
+ 0x0. Enumerated by FUSF_FUSE_NUM_E\<9:8\>. */
+ uint64_t reserved_11 : 1;
+ uint64_t pend : 1; /**< [ 12: 12](SR/W/H) Software sets this bit on a write to start FUSE read operation.
+ Hardware clears the bit when the read is complete and the DAT is
+ valid. MIO_FUS_READ_TIMES[RDSTB_WH] determines the time for this
+ operation. */
+ uint64_t reserved_13 : 1;
+ uint64_t voltage : 1; /**< [ 14: 14](SRO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be cleared when
+ reading fuses directly (ie. [EFUSE] and [PEND] set). */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_fusf_rcmd_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t reserved_16_23 : 8;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pend : 1; /**< [ 12: 12](SR/W/H) Software sets this bit on a write to start FUSE read operation.
+ Hardware clears the bit when the read is complete and the DAT is
+ valid. MIO_FUS_READ_TIMES[RDSTB_WH] determines the time for this
+ operation. */
+ uint64_t reserved_11 : 1;
+ uint64_t addr_hi : 2; /**< [ 10: 9](SR/W) Upper fuse address bits to extend space beyond 2k fuses. Valid range is
+ 0x0. Enumerated by FUSF_FUSE_NUM_E\<9:8\>. */
+ uint64_t efuse : 1; /**< [ 8: 8](SR/W) When set, return data from the efuse storage rather than the local storage.
+ Software should not change this field while the FUSF_RCMD[PEND] is set.
+ It should wait for the hardware to clear the bit first. */
+ uint64_t addr : 8; /**< [ 7: 0](SR/W) The byte address of the fuse to read. Enumerated by FUSF_FUSE_NUM_E\<7:0\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 8; /**< [ 7: 0](SR/W) The byte address of the fuse to read. Enumerated by FUSF_FUSE_NUM_E\<7:0\>. */
+ uint64_t efuse : 1; /**< [ 8: 8](SR/W) When set, return data from the efuse storage rather than the local storage.
+ Software should not change this field while the FUSF_RCMD[PEND] is set.
+ It should wait for the hardware to clear the bit first. */
+ uint64_t addr_hi : 2; /**< [ 10: 9](SR/W) Upper fuse address bits to extend space beyond 2k fuses. Valid range is
+ 0x0. Enumerated by FUSF_FUSE_NUM_E\<9:8\>. */
+ uint64_t reserved_11 : 1;
+ uint64_t pend : 1; /**< [ 12: 12](SR/W/H) Software sets this bit on a write to start FUSE read operation.
+ Hardware clears the bit when the read is complete and the DAT is
+ valid. MIO_FUS_READ_TIMES[RDSTB_WH] determines the time for this
+ operation. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t reserved_16_23 : 8;
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_fusf_rcmd_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t efuse : 1; /**< [ 15: 15](SR/W) Efuse storage. When set, the return data is from the efuse
+ bank directly. When cleared data is read from the local storage. */
+ uint64_t voltage : 1; /**< [ 14: 14](SRO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be cleared when
+ reading fuses directly (ie. [EFUSE] and [PEND] set). */
+ uint64_t reserved_13 : 1;
+ uint64_t pend : 1; /**< [ 12: 12](SR/W/H) Software sets this bit to one on a write operation that starts
+ the fuse read operation. Hardware clears this bit when the read
+ operation is complete and FUS_BNK_DAT() is valid.
+ FUS_READ_TIMES[RDSTB_WH] determines the time for the operation
+ to complete. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t addr : 4; /**< [ 7: 4](SR/W) Address. Specifies the bank address of 128 fuses to read.
+ Software should not change this field while [PEND]
+ is set. It must wait for the hardware to clear it. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t addr : 4; /**< [ 7: 4](SR/W) Address. Specifies the bank address of 128 fuses to read.
+ Software should not change this field while [PEND]
+ is set. It must wait for the hardware to clear it. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t pend : 1; /**< [ 12: 12](SR/W/H) Software sets this bit to one on a write operation that starts
+ the fuse read operation. Hardware clears this bit when the read
+ operation is complete and FUS_BNK_DAT() is valid.
+ FUS_READ_TIMES[RDSTB_WH] determines the time for the operation
+ to complete. */
+ uint64_t reserved_13 : 1;
+ uint64_t voltage : 1; /**< [ 14: 14](SRO) Efuse programming voltage status. When set, EFUSE banks have
+ programming voltage applied. Required to be cleared when
+ reading fuses directly (ie. [EFUSE] and [PEND] set). */
+ uint64_t efuse : 1; /**< [ 15: 15](SR/W) Efuse storage. When set, the return data is from the efuse
+ bank directly. When cleared data is read from the local storage. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_fusf_rcmd bdk_fusf_rcmd_t;
+
+#define BDK_FUSF_RCMD BDK_FUSF_RCMD_FUNC()
+static inline uint64_t BDK_FUSF_RCMD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_RCMD_FUNC(void)
+{
+ return 0x87e004000100ll;
+}
+
+#define typedef_BDK_FUSF_RCMD bdk_fusf_rcmd_t
+#define bustype_BDK_FUSF_RCMD BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_RCMD "FUSF_RCMD"
+#define device_bar_BDK_FUSF_RCMD 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_RCMD 0
+#define arguments_BDK_FUSF_RCMD -1,-1,-1,-1
+
+/**
+ * Register (RSL) fusf_rotpk#
+ *
+ * Field Fuse Root-of-Trust Public Key Registers
+ */
+union bdk_fusf_rotpkx
+{
+ uint64_t u;
+ struct bdk_fusf_rotpkx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) Hash of the root-of-trust public key (ROTPK). Reads field fuses FUSF_FUSE_NUM_E::ROTPK(). */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) Hash of the root-of-trust public key (ROTPK). Reads field fuses FUSF_FUSE_NUM_E::ROTPK(). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_rotpkx_s cn; */
+};
+typedef union bdk_fusf_rotpkx bdk_fusf_rotpkx_t;
+
+static inline uint64_t BDK_FUSF_ROTPKX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_ROTPKX(unsigned long a)
+{
+ if (a<=3)
+ return 0x87e004000060ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("FUSF_ROTPKX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_ROTPKX(a) bdk_fusf_rotpkx_t
+#define bustype_BDK_FUSF_ROTPKX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_ROTPKX(a) "FUSF_ROTPKX"
+#define device_bar_BDK_FUSF_ROTPKX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_ROTPKX(a) (a)
+#define arguments_BDK_FUSF_ROTPKX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) fusf_ssk#
+ *
+ * Field Fuse Secret Symmetric Key Registers
+ */
+union bdk_fusf_sskx
+{
+ uint64_t u;
+ struct bdk_fusf_sskx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) Secret symmetric key (SSK). Reads field fuses FUSF_FUSE_NUM_E::SSK(). */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) Secret symmetric key (SSK). Reads field fuses FUSF_FUSE_NUM_E::SSK(). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_sskx_s cn; */
+};
+typedef union bdk_fusf_sskx bdk_fusf_sskx_t;
+
+static inline uint64_t BDK_FUSF_SSKX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_SSKX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e004000080ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("FUSF_SSKX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_SSKX(a) bdk_fusf_sskx_t
+#define bustype_BDK_FUSF_SSKX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_SSKX(a) "FUSF_SSKX"
+#define device_bar_BDK_FUSF_SSKX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_SSKX(a) (a)
+#define arguments_BDK_FUSF_SSKX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) fusf_sw#
+ *
+ * Field Fuse Software Fuses Registers
+ */
+union bdk_fusf_swx
+{
+ uint64_t u;
+ struct bdk_fusf_swx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) Software assigned fuse data. Reads field fuses FUSF_FUSE_NUM_E::SW(). */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO) Software assigned fuse data. Reads field fuses FUSF_FUSE_NUM_E::SW(). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_swx_s cn; */
+};
+typedef union bdk_fusf_swx bdk_fusf_swx_t;
+
+static inline uint64_t BDK_FUSF_SWX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_SWX(unsigned long a)
+{
+ if (a<=7)
+ return 0x87e0040000c0ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("FUSF_SWX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_SWX(a) bdk_fusf_swx_t
+#define bustype_BDK_FUSF_SWX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_SWX(a) "FUSF_SWX"
+#define device_bar_BDK_FUSF_SWX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_SWX(a) (a)
+#define arguments_BDK_FUSF_SWX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) fusf_wadr
+ *
+ * Field Fuse Write Address Register
+ */
+union bdk_fusf_wadr
+{
+ uint64_t u;
+ struct bdk_fusf_wadr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t addr : 4; /**< [ 3: 0](SR/W) Indicates which of the banks of 128 fuses to blow. Enumerated by FUSF_FUSE_NUM_E\<10:7\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 4; /**< [ 3: 0](SR/W) Indicates which of the banks of 128 fuses to blow. Enumerated by FUSF_FUSE_NUM_E\<10:7\>. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_fusf_wadr_s cn; */
+};
+typedef union bdk_fusf_wadr bdk_fusf_wadr_t;
+
+#define BDK_FUSF_WADR BDK_FUSF_WADR_FUNC()
+static inline uint64_t BDK_FUSF_WADR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_FUSF_WADR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e004000108ll;
+ __bdk_csr_fatal("FUSF_WADR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_FUSF_WADR bdk_fusf_wadr_t
+#define bustype_BDK_FUSF_WADR BDK_CSR_TYPE_RSL
+#define basename_BDK_FUSF_WADR "FUSF_WADR"
+#define device_bar_BDK_FUSF_WADR 0x0 /* PF_BAR0 */
+#define busnum_BDK_FUSF_WADR 0
+#define arguments_BDK_FUSF_WADR -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_FUSF_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gpio.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gpio.h
new file mode 100644
index 0000000000..d42e3d32e8
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gpio.h
@@ -0,0 +1,2995 @@
+#ifndef __BDK_CSRS_GPIO_H__
+#define __BDK_CSRS_GPIO_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium GPIO.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration gpio_assigned_pin_e
+ *
+ * GPIO Assigned Pin Number Enumeration
+ * Enumerates GPIO pin numbers which have certain dedicated hardware and boot usage.
+ */
+#define BDK_GPIO_ASSIGNED_PIN_E_BOOT_REQ (9)
+#define BDK_GPIO_ASSIGNED_PIN_E_BOOT_WAIT (0xe)
+#define BDK_GPIO_ASSIGNED_PIN_E_EJTAG_TCK (0x13)
+#define BDK_GPIO_ASSIGNED_PIN_E_EJTAG_TDI (0x14)
+#define BDK_GPIO_ASSIGNED_PIN_E_EJTAG_TDO (0x12)
+#define BDK_GPIO_ASSIGNED_PIN_E_EJTAG_TMS (0x15)
+#define BDK_GPIO_ASSIGNED_PIN_E_EMMC_POWER (8)
+#define BDK_GPIO_ASSIGNED_PIN_E_FAILED (0xa)
+#define BDK_GPIO_ASSIGNED_PIN_E_FAIL_CODE (0xb)
+#define BDK_GPIO_ASSIGNED_PIN_E_NCSI_CRS_DV (0x1f)
+#define BDK_GPIO_ASSIGNED_PIN_E_NCSI_REF_CLK (0x1a)
+#define BDK_GPIO_ASSIGNED_PIN_E_NCSI_RXD0 (0x1c)
+#define BDK_GPIO_ASSIGNED_PIN_E_NCSI_RXD1 (0x1d)
+#define BDK_GPIO_ASSIGNED_PIN_E_NCSI_RX_ER (0x1e)
+#define BDK_GPIO_ASSIGNED_PIN_E_NCSI_TXD0 (0x18)
+#define BDK_GPIO_ASSIGNED_PIN_E_NCSI_TXD1 (0x19)
+#define BDK_GPIO_ASSIGNED_PIN_E_NCSI_TX_EN (0x1b)
+#define BDK_GPIO_ASSIGNED_PIN_E_PSPI_CLK (0x27)
+#define BDK_GPIO_ASSIGNED_PIN_E_PSPI_CS (0x2a)
+#define BDK_GPIO_ASSIGNED_PIN_E_PSPI_MISO (0x29)
+#define BDK_GPIO_ASSIGNED_PIN_E_PSPI_MOSI (0x28)
+#define BDK_GPIO_ASSIGNED_PIN_E_SPI1_CLK (0x24)
+#define BDK_GPIO_ASSIGNED_PIN_E_SPI1_CS0 (0x25)
+#define BDK_GPIO_ASSIGNED_PIN_E_SPI1_CS1 (0x26)
+#define BDK_GPIO_ASSIGNED_PIN_E_SPI1_IO0 (0x20)
+#define BDK_GPIO_ASSIGNED_PIN_E_SPI1_IO1 (0x21)
+#define BDK_GPIO_ASSIGNED_PIN_E_SPI1_IO2 (0x22)
+#define BDK_GPIO_ASSIGNED_PIN_E_SPI1_IO3 (0x23)
+#define BDK_GPIO_ASSIGNED_PIN_E_SWP_RESET_L (0x2c)
+#define BDK_GPIO_ASSIGNED_PIN_E_SWP_SPI1_CS3 (0x2b)
+#define BDK_GPIO_ASSIGNED_PIN_E_UART7_CTS_L (0x16)
+#define BDK_GPIO_ASSIGNED_PIN_E_UART7_RTS_L (0xc)
+#define BDK_GPIO_ASSIGNED_PIN_E_UART7_SIN (0x17)
+#define BDK_GPIO_ASSIGNED_PIN_E_UART7_SOUT (0xd)
+
+/**
+ * Enumeration gpio_bar_e
+ *
+ * GPIO Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_GPIO_BAR_E_GPIO_PF_BAR0_CN8 (0x803000000000ll)
+#define BDK_GPIO_BAR_E_GPIO_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_GPIO_BAR_E_GPIO_PF_BAR0_CN9 (0x803000000000ll)
+#define BDK_GPIO_BAR_E_GPIO_PF_BAR0_CN9_SIZE 0x10000ull
+#define BDK_GPIO_BAR_E_GPIO_PF_BAR4 (0x803000f00000ll)
+#define BDK_GPIO_BAR_E_GPIO_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration gpio_int_vec_e
+ *
+ * GPIO MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_GPIO_INT_VEC_E_INTR_PINX_CN9(a) (0x36 + 2 * (a))
+#define BDK_GPIO_INT_VEC_E_INTR_PINX_CN81XX(a) (4 + 2 * (a))
+#define BDK_GPIO_INT_VEC_E_INTR_PINX_CN88XX(a) (0x30 + 2 * (a))
+#define BDK_GPIO_INT_VEC_E_INTR_PINX_CN83XX(a) (0x18 + 2 * (a))
+#define BDK_GPIO_INT_VEC_E_INTR_PINX_CLEAR_CN9(a) (0x37 + 2 * (a))
+#define BDK_GPIO_INT_VEC_E_INTR_PINX_CLEAR_CN81XX(a) (5 + 2 * (a))
+#define BDK_GPIO_INT_VEC_E_INTR_PINX_CLEAR_CN88XX(a) (0x31 + 2 * (a))
+#define BDK_GPIO_INT_VEC_E_INTR_PINX_CLEAR_CN83XX(a) (0x19 + 2 * (a))
+#define BDK_GPIO_INT_VEC_E_MC_INTR_PPX(a) (0 + (a))
+
+/**
+ * Enumeration gpio_pin_sel_e
+ *
+ * GPIO Pin Select Enumeration
+ * Enumerates the GPIO pin function selections for GPIO_BIT_CFG()[PIN_SEL].
+ *
+ * The GPIO pins can be configured as either input, output or input/output/bidirectional
+ * depending on the GPIO_PIN_SEL_E used as described in the value's description. When
+ * a GPIO pin is used as input, the value is provided to the described function, and is
+ * also readable via GPIO_RX_DAT.
+ *
+ * Multiple GPIO pins may not be configured to point to the same input encoding, or
+ * the input result is unpredictable (e.g. GPIO_BIT_CFG(1)[PIN_SEL] and
+ * GPIO_BIT_CFG(2)[PIN_SEL] cannot both be 0x80).
+ *
+ * If a given select is not assigned to any pin, then that virtual input receives a
+ * logical zero. E.g. if no GPIO_BIT_CFG()[PIN_SEL] has the value ::OCLA_EXT_TRIGGER,
+ * then the GPIO will provide the OCLA block's external trigger input with the value of
+ * zero.
+ *
+ * Internal:
+ * FIXME keep PCM_* as reserved encodings.
+ */
+#define BDK_GPIO_PIN_SEL_E_BOOT_REQ (0x3e0)
+#define BDK_GPIO_PIN_SEL_E_BOOT_WAIT (0x3e1)
+#define BDK_GPIO_PIN_SEL_E_BTS_BFN_CLK (0x506)
+#define BDK_GPIO_PIN_SEL_E_BTS_BFN_IN (0x505)
+#define BDK_GPIO_PIN_SEL_E_BTS_BFN_OUT (0x510)
+#define BDK_GPIO_PIN_SEL_E_BTS_CGBFN_OUT (0x50d)
+#define BDK_GPIO_PIN_SEL_E_BTS_CGCLK_OUT (0x50e)
+#define BDK_GPIO_PIN_SEL_E_BTS_CGTENMS_OUT (0x50c)
+#define BDK_GPIO_PIN_SEL_E_BTS_DAC_CLK (0x511)
+#define BDK_GPIO_PIN_SEL_E_BTS_EXTREFX_CLK(a) (0x500 + (a))
+#define BDK_GPIO_PIN_SEL_E_BTS_PWM_DOUT (0x513)
+#define BDK_GPIO_PIN_SEL_E_BTS_PWM_SCLK (0x512)
+#define BDK_GPIO_PIN_SEL_E_BTS_RFP_IN (0x504)
+#define BDK_GPIO_PIN_SEL_E_BTS_RFP_OUT (0x50f)
+#define BDK_GPIO_PIN_SEL_E_BTS_TPX(a) (0x508 + (a))
+#define BDK_GPIO_PIN_SEL_E_CER_CATERR (0x3fb)
+#define BDK_GPIO_PIN_SEL_E_CER_ERR0 (0x3f8)
+#define BDK_GPIO_PIN_SEL_E_CER_ERR1 (0x3f9)
+#define BDK_GPIO_PIN_SEL_E_CER_ERR2 (0x3fa)
+#define BDK_GPIO_PIN_SEL_E_CGXX_LMACX_RX(a,b) (0x4a0 + 4 * (a) + (b))
+#define BDK_GPIO_PIN_SEL_E_CGXX_LMACX_RXTX(a,b) (0x4e0 + 4 * (a) + (b))
+#define BDK_GPIO_PIN_SEL_E_CGXX_LMACX_TX(a,b) (0x4c0 + 4 * (a) + (b))
+#define BDK_GPIO_PIN_SEL_E_CORE_RESET_IN (0x480)
+#define BDK_GPIO_PIN_SEL_E_CORE_RESET_OUT (0x481)
+#define BDK_GPIO_PIN_SEL_E_EJTAG_TCK (0x3f1)
+#define BDK_GPIO_PIN_SEL_E_EJTAG_TDI (0x3f0)
+#define BDK_GPIO_PIN_SEL_E_EJTAG_TDO (0x3f4)
+#define BDK_GPIO_PIN_SEL_E_EJTAG_TMS (0x3f2)
+#define BDK_GPIO_PIN_SEL_E_GPIO_CLKX_CN8(a) (5 + (a))
+#define BDK_GPIO_PIN_SEL_E_GPIO_CLKX_CN9(a) (0x260 + (a))
+#define BDK_GPIO_PIN_SEL_E_GPIO_CLK_SYNCEX(a) (3 + (a))
+#define BDK_GPIO_PIN_SEL_E_GPIO_PTP_CKOUT (1)
+#define BDK_GPIO_PIN_SEL_E_GPIO_PTP_PPS (2)
+#define BDK_GPIO_PIN_SEL_E_GPIO_PTP_SYSCK (8)
+#define BDK_GPIO_PIN_SEL_E_GPIO_SW (0)
+#define BDK_GPIO_PIN_SEL_E_LMCX_ECC_CN9(a) (0x3d0 + (a))
+#define BDK_GPIO_PIN_SEL_E_LMCX_ECC_CN81XX(a) (0x237 + (a))
+#define BDK_GPIO_PIN_SEL_E_LMCX_ECC_CN88XX(a) (0x70 + (a))
+#define BDK_GPIO_PIN_SEL_E_LMCX_ECC_CN83XX(a) (0x237 + (a))
+#define BDK_GPIO_PIN_SEL_E_MCDX_IN_CN9(a) (0x23f + (a))
+#define BDK_GPIO_PIN_SEL_E_MCDX_IN_CN81XX(a) (0x23f + (a))
+#define BDK_GPIO_PIN_SEL_E_MCDX_IN_CN88XX(a) (0xb0 + (a))
+#define BDK_GPIO_PIN_SEL_E_MCDX_IN_CN83XX(a) (0x23f + (a))
+#define BDK_GPIO_PIN_SEL_E_MCDX_OUT_CN9(a) (0x242 + (a))
+#define BDK_GPIO_PIN_SEL_E_MCDX_OUT_CN81XX(a) (0x242 + (a))
+#define BDK_GPIO_PIN_SEL_E_MCDX_OUT_CN88XX(a) (0x14 + (a))
+#define BDK_GPIO_PIN_SEL_E_MCDX_OUT_CN83XX(a) (0x242 + (a))
+#define BDK_GPIO_PIN_SEL_E_MCP_RESET_IN (0x482)
+#define BDK_GPIO_PIN_SEL_E_MCP_RESET_OUT (0x483)
+#define BDK_GPIO_PIN_SEL_E_NCSI_CRS_DV (0x258)
+#define BDK_GPIO_PIN_SEL_E_NCSI_REF_CLK (0x25c)
+#define BDK_GPIO_PIN_SEL_E_NCSI_RXDX(a) (0x25a + (a))
+#define BDK_GPIO_PIN_SEL_E_NCSI_RX_ER (0x259)
+#define BDK_GPIO_PIN_SEL_E_NCSI_TXDX(a) (0x25e + (a))
+#define BDK_GPIO_PIN_SEL_E_NCSI_TX_EN (0x25d)
+#define BDK_GPIO_PIN_SEL_E_OCLA_EXT_TRIGGER_CN9 (0x231)
+#define BDK_GPIO_PIN_SEL_E_OCLA_EXT_TRIGGER_CN81XX (0x231)
+#define BDK_GPIO_PIN_SEL_E_OCLA_EXT_TRIGGER_CN88XX (0x8a)
+#define BDK_GPIO_PIN_SEL_E_OCLA_EXT_TRIGGER_CN83XX (0x231)
+#define BDK_GPIO_PIN_SEL_E_PBUS_ADX(a) (0xfa + (a))
+#define BDK_GPIO_PIN_SEL_E_PBUS_ALEX(a) (0xe8 + (a))
+#define BDK_GPIO_PIN_SEL_E_PBUS_CEX(a) (0xec + (a))
+#define BDK_GPIO_PIN_SEL_E_PBUS_CLE (0xe0)
+#define BDK_GPIO_PIN_SEL_E_PBUS_DIR (0xe4)
+#define BDK_GPIO_PIN_SEL_E_PBUS_DMACKX(a) (0xe6 + (a))
+#define BDK_GPIO_PIN_SEL_E_PBUS_DMARQX(a) (0x11a + (a))
+#define BDK_GPIO_PIN_SEL_E_PBUS_OE (0xe3)
+#define BDK_GPIO_PIN_SEL_E_PBUS_WAIT (0xe1)
+#define BDK_GPIO_PIN_SEL_E_PBUS_WE (0xe2)
+#define BDK_GPIO_PIN_SEL_E_PCM_BCLKX(a) (0x246 + (a))
+#define BDK_GPIO_PIN_SEL_E_PCM_DATAX(a) (0x24c + (a))
+#define BDK_GPIO_PIN_SEL_E_PCM_FSYNCX(a) (0x24a + (a))
+#define BDK_GPIO_PIN_SEL_E_PSPI_CLK (0x28d)
+#define BDK_GPIO_PIN_SEL_E_PSPI_CS (0x290)
+#define BDK_GPIO_PIN_SEL_E_PSPI_MISO (0x28f)
+#define BDK_GPIO_PIN_SEL_E_PSPI_MOSI (0x28e)
+#define BDK_GPIO_PIN_SEL_E_PTP_EVTCNT (0x252)
+#define BDK_GPIO_PIN_SEL_E_PTP_EXT_CLK (0x250)
+#define BDK_GPIO_PIN_SEL_E_PTP_TSTMP (0x251)
+#define BDK_GPIO_PIN_SEL_E_SATAX_ACT_LED_CN9(a) (0x420 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_ACT_LED_CN81XX(a) (0x16a + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_ACT_LED_CN88XX(a) (0x60 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_ACT_LED_CN83XX(a) (0x16a + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_CP_DET_CN9(a) (0x440 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_CP_DET_CN81XX(a) (0x18b + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_CP_DET_CN88XX(a) (0x90 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_CP_DET_CN83XX(a) (0x18b + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_CP_POD_CN9(a) (0x400 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_CP_POD_CN81XX(a) (0x145 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_CP_POD_CN88XX(a) (0x50 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_CP_POD_CN83XX(a) (0x145 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_MP_SWITCH_CN9(a) (0x460 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_MP_SWITCH_CN81XX(a) (0x200 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_MP_SWITCH_CN88XX(a) (0xa0 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATAX_MP_SWITCH_CN83XX(a) (0x200 + (a))
+#define BDK_GPIO_PIN_SEL_E_SATA_LAB_LB_CN9 (0x18a)
+#define BDK_GPIO_PIN_SEL_E_SATA_LAB_LB_CN81XX (0x18a)
+#define BDK_GPIO_PIN_SEL_E_SATA_LAB_LB_CN88XX (0x8e)
+#define BDK_GPIO_PIN_SEL_E_SATA_LAB_LB_CN83XX (0x18a)
+#define BDK_GPIO_PIN_SEL_E_SCP_RESET_IN (0x484)
+#define BDK_GPIO_PIN_SEL_E_SCP_RESET_OUT (0x485)
+#define BDK_GPIO_PIN_SEL_E_SGPIO_ACT_LEDX_CN9(a) (0x2c0 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_ACT_LEDX_CN81XX(a) (0xf + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_ACT_LEDX_CN88XX(a) (0x20 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_ACT_LEDX_CN83XX(a) (0xf + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_ERR_LEDX_CN9(a) (0x340 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_ERR_LEDX_CN81XX(a) (0x90 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_ERR_LEDX_CN88XX(a) (0x30 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_ERR_LEDX_CN83XX(a) (0x90 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_LOC_LEDX_CN9(a) (0x300 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_LOC_LEDX_CN81XX(a) (0x50 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_LOC_LEDX_CN88XX(a) (0x40 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_LOC_LEDX_CN83XX(a) (0x50 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SCLOCK_CN9 (0x2a0)
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SCLOCK_CN81XX (9)
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SCLOCK_CN88XX (0xb)
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SCLOCK_CN83XX (9)
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SDATAINX_CN9(a) (0x380 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SDATAINX_CN81XX(a) (0xd0 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SDATAINX_CN88XX(a) (0x80 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SDATAINX_CN83XX(a) (0xd0 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SDATAOUTX_CN9(a) (0x2b0 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SDATAOUTX_CN81XX(a) (0xb + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SDATAOUTX_CN88XX(a) (0x10 + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SDATAOUTX_CN83XX(a) (0xb + (a))
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SLOAD_CN9 (0x2a1)
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SLOAD_CN81XX (0xa)
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SLOAD_CN88XX (0xc)
+#define BDK_GPIO_PIN_SEL_E_SGPIO_SLOAD_CN83XX (0xa)
+#define BDK_GPIO_PIN_SEL_E_SPI0_CSX(a) (0x278 + (a))
+#define BDK_GPIO_PIN_SEL_E_SPI1_CLK (0x280)
+#define BDK_GPIO_PIN_SEL_E_SPI1_CSX(a) (0x284 + (a))
+#define BDK_GPIO_PIN_SEL_E_SPI1_IOX(a) (0x288 + (a))
+#define BDK_GPIO_PIN_SEL_E_SPI_CSX_CN81XX(a) (0x233 + (a))
+#define BDK_GPIO_PIN_SEL_E_SPI_CSX_CN88XX(a) (0x18 + (a))
+#define BDK_GPIO_PIN_SEL_E_SPI_CSX_CN83XX(a) (0x233 + (a))
+#define BDK_GPIO_PIN_SEL_E_TIMER_CN9 (0x11c)
+#define BDK_GPIO_PIN_SEL_E_TIMER_CN81XX (0x11c)
+#define BDK_GPIO_PIN_SEL_E_TIMER_CN88XX (0x8b)
+#define BDK_GPIO_PIN_SEL_E_TIMER_CN83XX (0x11c)
+#define BDK_GPIO_PIN_SEL_E_TIM_GPIO_CLK (0x230)
+#define BDK_GPIO_PIN_SEL_E_TWS_SCLX(a) (0x294 + (a))
+#define BDK_GPIO_PIN_SEL_E_TWS_SDAX(a) (0x29a + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_CTS_CN8(a) (0x13f + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_CTS_CN9(a) (0x3c0 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DCD_CN8(a) (0x131 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DCD_CN9(a) (0x3b0 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DCD_N(a) (0x84 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DSR_CN9(a) (0x3b8 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DSR_CN81XX(a) (0x139 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DSR_CN88XX(a) (0x88 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DSR_CN83XX(a) (0x139 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DTR_CN8(a) (0x11d + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DTR_CN9(a) (0x390 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_DTR_N(a) (9 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_RI_CN9(a) (0x3a8 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_RI_CN81XX(a) (0x129 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_RI_CN88XX(a) (0x86 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_RI_CN83XX(a) (0x129 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_RTS_CN8(a) (0x123 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_RTS_CN9(a) (0x398 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_SIN_CN8(a) (0x141 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_SIN_CN9(a) (0x3c8 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_SOUT_CN8(a) (0x125 + (a))
+#define BDK_GPIO_PIN_SEL_E_UARTX_SOUT_CN9(a) (0x3a0 + (a))
+#define BDK_GPIO_PIN_SEL_E_USBX_OVR_CRNT_CN9(a) (0x228 + (a))
+#define BDK_GPIO_PIN_SEL_E_USBX_OVR_CRNT_CN81XX(a) (0x228 + (a))
+#define BDK_GPIO_PIN_SEL_E_USBX_OVR_CRNT_CN88XX(a) (0x8c + (a))
+#define BDK_GPIO_PIN_SEL_E_USBX_OVR_CRNT_CN83XX(a) (0x228 + (a))
+#define BDK_GPIO_PIN_SEL_E_USBX_VBUS_CTRL_CN9(a) (0x220 + (a))
+#define BDK_GPIO_PIN_SEL_E_USBX_VBUS_CTRL_CN81XX(a) (0x220 + (a))
+#define BDK_GPIO_PIN_SEL_E_USBX_VBUS_CTRL_CN88XX(a) (0x74 + (a))
+#define BDK_GPIO_PIN_SEL_E_USBX_VBUS_CTRL_CN83XX(a) (0x220 + (a))
+
+/**
+ * Enumeration gpio_strap_pin_e
+ *
+ * GPIO Strap Pin Number Enumeration
+ * Enumerates GPIO pin numbers with their associated strap functions. The names of
+ * these values are used as the documented name of each
+ * strap. e.g. GPIO_STRAP_PIN_E::BOOT_METHOD0 describes the GPIO0/BOOT_METHOD0 strap.
+ */
+#define BDK_GPIO_STRAP_PIN_E_AVS_DISABLE (9)
+#define BDK_GPIO_STRAP_PIN_E_BOOT_METHOD0 (0)
+#define BDK_GPIO_STRAP_PIN_E_BOOT_METHOD1 (1)
+#define BDK_GPIO_STRAP_PIN_E_BOOT_METHOD2 (2)
+#define BDK_GPIO_STRAP_PIN_E_BOOT_METHOD3 (3)
+#define BDK_GPIO_STRAP_PIN_E_BOOT_METHOD4 (0xc)
+#define BDK_GPIO_STRAP_PIN_E_BOOT_METHOD5 (0xd)
+#define BDK_GPIO_STRAP_PIN_E_CCPI_NODE_ID (0xb)
+#define BDK_GPIO_STRAP_PIN_E_GSER_CLK0_TERM_SEL0 (0x10)
+#define BDK_GPIO_STRAP_PIN_E_GSER_CLK0_TERM_SEL1 (0x11)
+#define BDK_GPIO_STRAP_PIN_E_MCP_DBG_ON_GPIO (4)
+#define BDK_GPIO_STRAP_PIN_E_NCSI_ON_GPIO (5)
+#define BDK_GPIO_STRAP_PIN_E_PCIE0_EP_MODE (0x18)
+#define BDK_GPIO_STRAP_PIN_E_PCIE2_EP_MODE (0x19)
+#define BDK_GPIO_STRAP_PIN_E_TRUSTED_MODE (0xa)
+
+/**
+ * Register (NCB) gpio_bit_cfg#
+ *
+ * GPIO Bit Configuration Registers
+ * Each register provides configuration information for the corresponding GPIO pin.
+ *
+ * Each index is only accessible to the requestor(s) permitted with GPIO_BIT_PERMIT().
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_bit_cfgx
+{
+ uint64_t u;
+ struct bdk_gpio_bit_cfgx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t pin_sel : 11; /**< [ 26: 16](R/W) Selects which signal is reported to GPIO output, or which signal GPIO input need to
+ connect. Each GPIO pin can be configured either input-only or output-only depending
+ function selected, as enumerated by GPIO_PIN_SEL_E. For GPIO input selects,
+ the GPIO signal used is after glitch filter and XOR inverter (GPIO_BIT_CFG()[PIN_XOR]). */
+ uint64_t reserved_15 : 1;
+ uint64_t blink_en : 2; /**< [ 14: 13](R/W) GPIO pin output blink filter enable. This function is after the [PIN_SEL]
+ multiplexing but before the [PIN_XOR] inverter and [TX_OD] overdriver
+ conversion. When blink filter is enabled, the pin output will generate blinking
+ pattern based on configuration of GPIO_BLINK_CFG.
+ 0x0 = Disable blink filter.
+ 0x1 = Enable blink filter based on the start of activity.
+ 0x2 = Enable blink filter based on the end of activity, essentially based on
+ inversion of blink filter's input.
+ 0x3 = Disable blink filter. */
+ uint64_t tx_od : 1; /**< [ 12: 12](R/W) GPIO output open-drain conversion. This function is after PIN_SEL MUX
+ and [PIN_XOR] inverter.
+ When set, the pin output will connect to zero, and pin enable
+ will connect to original pin output. With both [TX_OD] and [PIN_XOR] set, a transmit
+ of 1 will tristate the pin output driver to archive open-drain function. */
+ uint64_t fil_sel : 4; /**< [ 11: 8](R/W) Filter select. Global counter bit-select (controls sample rate).
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t fil_cnt : 4; /**< [ 7: 4](R/W) Filter count. Specifies the number of consecutive samples ([FIL_CNT]+1) to change state.
+ Zero to disable the filter.
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t int_type : 1; /**< [ 3: 3](R/W) Type of interrupt when pin is an input and [INT_EN] set. When set, rising edge
+ interrupt, else level interrupt. The GPIO signal used to generate interrupt is after
+ the filter and after the XOR inverter. */
+ uint64_t int_en : 1; /**< [ 2: 2](R/W) Pin controls interrupt.
+
+ If set, assertions of this pin after [PIN_XOR] will set GPIO_INTR()[INTR].
+
+ If set and [INT_TYPE] is clear (level-sensitive), deassertions of this pin
+ after [PIN_XOR] will clear GPIO_INTR()[INTR].
+
+ This does NOT control if interrupts are enabled towards the MSI-X vector,
+ for that see GPIO_INTR()[INTR_ENA]. */
+ uint64_t pin_xor : 1; /**< [ 1: 1](R/W) GPIO data inversion. When set, inverts the receiving or transmitting GPIO signal.
+ For GPIO output, this inversion is after all GPIO [PIN_SEL] muxes. This inversion function
+ is applicable to any GPIO output mux signals, also used to control GPIO_RX_DAT.
+ For GPIO input, this inversion is before the GPIO [PIN_SEL] muxes, as used to control
+ GPIO interrupts. */
+ uint64_t tx_oe : 1; /**< [ 0: 0](R/W) Transmit output enable. When set to 1, the GPIO pin can be driven as an output
+ pin if GPIO_BIT_CFG()[PIN_SEL] selects GPIO_PIN_SEL_E::GPIO_SW. [TX_OE] is only
+ used along with GPIO_TX_SET or GPIO_TX_CLR, and [TX_OE] function is before
+ GPIO_BIT_CFG()[PIN_SEL] mux. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_oe : 1; /**< [ 0: 0](R/W) Transmit output enable. When set to 1, the GPIO pin can be driven as an output
+ pin if GPIO_BIT_CFG()[PIN_SEL] selects GPIO_PIN_SEL_E::GPIO_SW. [TX_OE] is only
+ used along with GPIO_TX_SET or GPIO_TX_CLR, and [TX_OE] function is before
+ GPIO_BIT_CFG()[PIN_SEL] mux. */
+ uint64_t pin_xor : 1; /**< [ 1: 1](R/W) GPIO data inversion. When set, inverts the receiving or transmitting GPIO signal.
+ For GPIO output, this inversion is after all GPIO [PIN_SEL] muxes. This inversion function
+ is applicable to any GPIO output mux signals, also used to control GPIO_RX_DAT.
+ For GPIO input, this inversion is before the GPIO [PIN_SEL] muxes, as used to control
+ GPIO interrupts. */
+ uint64_t int_en : 1; /**< [ 2: 2](R/W) Pin controls interrupt.
+
+ If set, assertions of this pin after [PIN_XOR] will set GPIO_INTR()[INTR].
+
+ If set and [INT_TYPE] is clear (level-sensitive), deassertions of this pin
+ after [PIN_XOR] will clear GPIO_INTR()[INTR].
+
+ This does NOT control if interrupts are enabled towards the MSI-X vector,
+ for that see GPIO_INTR()[INTR_ENA]. */
+ uint64_t int_type : 1; /**< [ 3: 3](R/W) Type of interrupt when pin is an input and [INT_EN] set. When set, rising edge
+ interrupt, else level interrupt. The GPIO signal used to generate interrupt is after
+ the filter and after the XOR inverter. */
+ uint64_t fil_cnt : 4; /**< [ 7: 4](R/W) Filter count. Specifies the number of consecutive samples ([FIL_CNT]+1) to change state.
+ Zero to disable the filter.
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t fil_sel : 4; /**< [ 11: 8](R/W) Filter select. Global counter bit-select (controls sample rate).
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t tx_od : 1; /**< [ 12: 12](R/W) GPIO output open-drain conversion. This function is after PIN_SEL MUX
+ and [PIN_XOR] inverter.
+ When set, the pin output will connect to zero, and pin enable
+ will connect to original pin output. With both [TX_OD] and [PIN_XOR] set, a transmit
+ of 1 will tristate the pin output driver to archive open-drain function. */
+ uint64_t blink_en : 2; /**< [ 14: 13](R/W) GPIO pin output blink filter enable. This function is after the [PIN_SEL]
+ multiplexing but before the [PIN_XOR] inverter and [TX_OD] overdriver
+ conversion. When blink filter is enabled, the pin output will generate blinking
+ pattern based on configuration of GPIO_BLINK_CFG.
+ 0x0 = Disable blink filter.
+ 0x1 = Enable blink filter based on the start of activity.
+ 0x2 = Enable blink filter based on the end of activity, essentially based on
+ inversion of blink filter's input.
+ 0x3 = Disable blink filter. */
+ uint64_t reserved_15 : 1;
+ uint64_t pin_sel : 11; /**< [ 26: 16](R/W) Selects which signal is reported to GPIO output, or which signal GPIO input need to
+ connect. Each GPIO pin can be configured either input-only or output-only depending
+ function selected, as enumerated by GPIO_PIN_SEL_E. For GPIO input selects,
+ the GPIO signal used is after glitch filter and XOR inverter (GPIO_BIT_CFG()[PIN_XOR]). */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_bit_cfgx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t pin_sel : 11; /**< [ 26: 16](R/W) Selects which signal is reported to GPIO output, or which signal GPIO input need
+ to connect. Each GPIO pin can be configured either input-only, output only or
+ input/output depending on function selected, as enumerated by
+ GPIO_PIN_SEL_E. For GPIO input selects, the GPIO signal used is after glitch
+ filter and XOR inverter (GPIO_BIT_CFG()[PIN_XOR]). */
+ uint64_t reserved_15 : 1;
+ uint64_t blink_en : 2; /**< [ 14: 13](R/W) GPIO pin output blink filter enable. This function is after the [PIN_SEL]
+ multiplexing but before the [PIN_XOR] inverter and [TX_OD] overdriver
+ conversion. When blink filter is enabled, the pin output will generate blinking
+ pattern based on configuration of GPIO_BLINK_CFG.
+ 0x0 = Disable blink filter.
+ 0x1 = Enable blink filter based on the start of activity.
+ 0x2 = Enable blink filter based on the end of activity, essentially based on
+ inversion of blink filter's input.
+ 0x3 = Disable blink filter. */
+ uint64_t tx_od : 1; /**< [ 12: 12](R/W) GPIO output open-drain conversion. This function is after the [PIN_SEL]
+ multiplexing, [PIN_XOR] inverter, and [BLINK_EN] activity filter.
+ When set, the pin output will connect to zero, and pin enable
+ will connect to original pin output. With both [TX_OD] and [PIN_XOR] set, a transmit
+ of 1 will tristate the pin output driver to achieve open-drain function
+ with additional pullup on board. */
+ uint64_t fil_sel : 4; /**< [ 11: 8](R/W) Filter select. Global counter bit-select (controls sample rate).
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t fil_cnt : 4; /**< [ 7: 4](R/W) Filter count. Specifies the number of consecutive samples ([FIL_CNT]+1) to change state.
+ Zero to disable the filter.
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t int_type : 1; /**< [ 3: 3](R/W) Type of interrupt when pin is an input and [INT_EN] set. When set, rising edge
+ interrupt, else level interrupt. The GPIO signal used to generate interrupt is after
+ the filter and after the XOR inverter. */
+ uint64_t int_en : 1; /**< [ 2: 2](R/W) Pin controls interrupt.
+
+ If set, assertions of this pin after [PIN_XOR] will set GPIO_INTR()[INTR].
+
+ If set and [INT_TYPE] is clear (level-sensitive), deassertions of this pin
+ after [PIN_XOR] will clear GPIO_INTR()[INTR].
+
+ This does NOT control if interrupts are enabled towards the MSI-X vector,
+ for that see GPIO_INTR()[INTR_ENA_W1S]. */
+ uint64_t pin_xor : 1; /**< [ 1: 1](R/W) GPIO data inversion. When set, inverts the receiving or transmitting GPIO signal.
+ For GPIO output, this inversion is after all GPIO [PIN_SEL] muxes. This inversion function
+ is applicable to any GPIO output mux signals, also used to control GPIO_RX_DAT.
+ For GPIO input, this inversion is before the GPIO [PIN_SEL] muxes, as used to control
+ GPIO interrupts. */
+ uint64_t tx_oe : 1; /**< [ 0: 0](R/W) Transmit output enable. When set to 1, the GPIO pin can be driven as an output
+ pin if GPIO_BIT_CFG()[PIN_SEL] selects GPIO_PIN_SEL_E::GPIO_SW. [TX_OE] is only
+ used along with GPIO_TX_SET or GPIO_TX_CLR, and [TX_OE] function is before
+ GPIO_BIT_CFG()[PIN_SEL] mux. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_oe : 1; /**< [ 0: 0](R/W) Transmit output enable. When set to 1, the GPIO pin can be driven as an output
+ pin if GPIO_BIT_CFG()[PIN_SEL] selects GPIO_PIN_SEL_E::GPIO_SW. [TX_OE] is only
+ used along with GPIO_TX_SET or GPIO_TX_CLR, and [TX_OE] function is before
+ GPIO_BIT_CFG()[PIN_SEL] mux. */
+ uint64_t pin_xor : 1; /**< [ 1: 1](R/W) GPIO data inversion. When set, inverts the receiving or transmitting GPIO signal.
+ For GPIO output, this inversion is after all GPIO [PIN_SEL] muxes. This inversion function
+ is applicable to any GPIO output mux signals, also used to control GPIO_RX_DAT.
+ For GPIO input, this inversion is before the GPIO [PIN_SEL] muxes, as used to control
+ GPIO interrupts. */
+ uint64_t int_en : 1; /**< [ 2: 2](R/W) Pin controls interrupt.
+
+ If set, assertions of this pin after [PIN_XOR] will set GPIO_INTR()[INTR].
+
+ If set and [INT_TYPE] is clear (level-sensitive), deassertions of this pin
+ after [PIN_XOR] will clear GPIO_INTR()[INTR].
+
+ This does NOT control if interrupts are enabled towards the MSI-X vector,
+ for that see GPIO_INTR()[INTR_ENA_W1S]. */
+ uint64_t int_type : 1; /**< [ 3: 3](R/W) Type of interrupt when pin is an input and [INT_EN] set. When set, rising edge
+ interrupt, else level interrupt. The GPIO signal used to generate interrupt is after
+ the filter and after the XOR inverter. */
+ uint64_t fil_cnt : 4; /**< [ 7: 4](R/W) Filter count. Specifies the number of consecutive samples ([FIL_CNT]+1) to change state.
+ Zero to disable the filter.
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t fil_sel : 4; /**< [ 11: 8](R/W) Filter select. Global counter bit-select (controls sample rate).
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t tx_od : 1; /**< [ 12: 12](R/W) GPIO output open-drain conversion. This function is after the [PIN_SEL]
+ multiplexing, [PIN_XOR] inverter, and [BLINK_EN] activity filter.
+ When set, the pin output will connect to zero, and pin enable
+ will connect to original pin output. With both [TX_OD] and [PIN_XOR] set, a transmit
+ of 1 will tristate the pin output driver to achieve open-drain function
+ with additional pullup on board. */
+ uint64_t blink_en : 2; /**< [ 14: 13](R/W) GPIO pin output blink filter enable. This function is after the [PIN_SEL]
+ multiplexing but before the [PIN_XOR] inverter and [TX_OD] overdriver
+ conversion. When blink filter is enabled, the pin output will generate blinking
+ pattern based on configuration of GPIO_BLINK_CFG.
+ 0x0 = Disable blink filter.
+ 0x1 = Enable blink filter based on the start of activity.
+ 0x2 = Enable blink filter based on the end of activity, essentially based on
+ inversion of blink filter's input.
+ 0x3 = Disable blink filter. */
+ uint64_t reserved_15 : 1;
+ uint64_t pin_sel : 11; /**< [ 26: 16](R/W) Selects which signal is reported to GPIO output, or which signal GPIO input need
+ to connect. Each GPIO pin can be configured either input-only, output only or
+ input/output depending on function selected, as enumerated by
+ GPIO_PIN_SEL_E. For GPIO input selects, the GPIO signal used is after glitch
+ filter and XOR inverter (GPIO_BIT_CFG()[PIN_XOR]). */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gpio_bit_cfgx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t pin_sel : 10; /**< [ 25: 16](R/W) Selects which signal is reported to GPIO output, or which signal GPIO input need to
+ connect. Each GPIO pin can be configured either input-only or output-only depending
+ function selected, as enumerated by GPIO_PIN_SEL_E. For GPIO input selects,
+ the GPIO signal used is after glitch filter and XOR inverter (GPIO_BIT_CFG()[PIN_XOR]). */
+ uint64_t reserved_13_15 : 3;
+ uint64_t tx_od : 1; /**< [ 12: 12](R/W) GPIO output open-drain conversion. This function is after PIN_SEL MUX
+ and [PIN_XOR] inverter.
+ When set, the pin output will connect to zero, and pin enable
+ will connect to original pin output. With both [TX_OD] and [PIN_XOR] set, a transmit
+ of 1 will tristate the pin output driver to archive open-drain function. */
+ uint64_t fil_sel : 4; /**< [ 11: 8](R/W) Filter select. Global counter bit-select (controls sample rate).
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t fil_cnt : 4; /**< [ 7: 4](R/W) Filter count. Specifies the number of consecutive samples ([FIL_CNT]+1) to change state.
+ Zero to disable the filter.
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t int_type : 1; /**< [ 3: 3](R/W) Type of interrupt when pin is an input and [INT_EN] set. When set, rising edge
+ interrupt, else level interrupt. The GPIO signal used to generate interrupt is after
+ the filter and after the XOR inverter. */
+ uint64_t int_en : 1; /**< [ 2: 2](R/W) Pin controls interrupt.
+
+ If set, assertions of this pin after [PIN_XOR] will set GPIO_INTR()[INTR].
+
+ If set and [INT_TYPE] is clear (level-sensitive), deassertions of this pin
+ after [PIN_XOR] will clear GPIO_INTR()[INTR].
+
+ This does NOT control if interrupts are enabled towards the MSI-X vector,
+ for that see GPIO_INTR()[INTR_ENA]. */
+ uint64_t pin_xor : 1; /**< [ 1: 1](R/W) GPIO data inversion. When set, inverts the receiving or transmitting GPIO signal.
+ For GPIO output, this inversion is after all GPIO [PIN_SEL] muxes. This inversion function
+ is applicable to any GPIO output mux signals, also used to control GPIO_RX_DAT.
+ For GPIO input, this inversion is before the GPIO [PIN_SEL] muxes, as used to control
+ GPIO interrupts. */
+ uint64_t tx_oe : 1; /**< [ 0: 0](R/W) Transmit output enable. When set to 1, the GPIO pin can be driven as an output
+ pin if GPIO_BIT_CFG()[PIN_SEL] selects GPIO_PIN_SEL_E::GPIO_SW. [TX_OE] is only
+ used along with GPIO_TX_SET or GPIO_TX_CLR, and [TX_OE] function is before
+ GPIO_BIT_CFG()[PIN_SEL] mux. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_oe : 1; /**< [ 0: 0](R/W) Transmit output enable. When set to 1, the GPIO pin can be driven as an output
+ pin if GPIO_BIT_CFG()[PIN_SEL] selects GPIO_PIN_SEL_E::GPIO_SW. [TX_OE] is only
+ used along with GPIO_TX_SET or GPIO_TX_CLR, and [TX_OE] function is before
+ GPIO_BIT_CFG()[PIN_SEL] mux. */
+ uint64_t pin_xor : 1; /**< [ 1: 1](R/W) GPIO data inversion. When set, inverts the receiving or transmitting GPIO signal.
+ For GPIO output, this inversion is after all GPIO [PIN_SEL] muxes. This inversion function
+ is applicable to any GPIO output mux signals, also used to control GPIO_RX_DAT.
+ For GPIO input, this inversion is before the GPIO [PIN_SEL] muxes, as used to control
+ GPIO interrupts. */
+ uint64_t int_en : 1; /**< [ 2: 2](R/W) Pin controls interrupt.
+
+ If set, assertions of this pin after [PIN_XOR] will set GPIO_INTR()[INTR].
+
+ If set and [INT_TYPE] is clear (level-sensitive), deassertions of this pin
+ after [PIN_XOR] will clear GPIO_INTR()[INTR].
+
+ This does NOT control if interrupts are enabled towards the MSI-X vector,
+ for that see GPIO_INTR()[INTR_ENA]. */
+ uint64_t int_type : 1; /**< [ 3: 3](R/W) Type of interrupt when pin is an input and [INT_EN] set. When set, rising edge
+ interrupt, else level interrupt. The GPIO signal used to generate interrupt is after
+ the filter and after the XOR inverter. */
+ uint64_t fil_cnt : 4; /**< [ 7: 4](R/W) Filter count. Specifies the number of consecutive samples ([FIL_CNT]+1) to change state.
+ Zero to disable the filter.
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t fil_sel : 4; /**< [ 11: 8](R/W) Filter select. Global counter bit-select (controls sample rate).
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t tx_od : 1; /**< [ 12: 12](R/W) GPIO output open-drain conversion. This function is after PIN_SEL MUX
+ and [PIN_XOR] inverter.
+ When set, the pin output will connect to zero, and pin enable
+ will connect to original pin output. With both [TX_OD] and [PIN_XOR] set, a transmit
+ of 1 will tristate the pin output driver to archive open-drain function. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pin_sel : 10; /**< [ 25: 16](R/W) Selects which signal is reported to GPIO output, or which signal GPIO input need to
+ connect. Each GPIO pin can be configured either input-only or output-only depending
+ function selected, as enumerated by GPIO_PIN_SEL_E. For GPIO input selects,
+ the GPIO signal used is after glitch filter and XOR inverter (GPIO_BIT_CFG()[PIN_XOR]). */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_bit_cfgx_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t pin_sel : 8; /**< [ 23: 16](R/W) Selects which signal is reported to GPIO output, or which signal GPIO input need to
+ connect. Each GPIO pin can be configured either input-only or output-only depending
+ function selected, as enumerated by GPIO_PIN_SEL_E. For GPIO input selects,
+ the GPIO signal used is after glitch filter and XOR inverter (GPIO_BIT_CFG()[PIN_XOR]). */
+ uint64_t reserved_13_15 : 3;
+ uint64_t tx_od : 1; /**< [ 12: 12](R/W) GPIO output open-drain conversion. This function is after PIN_SEL MUX
+ and [PIN_XOR] inverter.
+ When set, the pin output will connect to zero, and pin enable
+ will connect to original pin output. With both [TX_OD] and [PIN_XOR] set, a transmit
+ of 1 will tristate the pin output driver to archive open-drain function. */
+ uint64_t fil_sel : 4; /**< [ 11: 8](R/W) Filter select. Global counter bit-select (controls sample rate).
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t fil_cnt : 4; /**< [ 7: 4](R/W) Filter count. Specifies the number of consecutive samples ([FIL_CNT]+1) to change state.
+ Zero to disable the filter.
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t int_type : 1; /**< [ 3: 3](R/W) Type of interrupt when pin is an input and [INT_EN] set. When set, rising edge
+ interrupt, else level interrupt. The GPIO signal used to generate interrupt is after
+ the filter and after the XOR inverter. */
+ uint64_t int_en : 1; /**< [ 2: 2](R/W) Pin controls interrupt.
+
+ If set, assertions of this pin after [PIN_XOR] will set GPIO_INTR()[INTR].
+
+ If set and [INT_TYPE] is clear (level-sensitive), deassertions of this pin
+ after [PIN_XOR] will clear GPIO_INTR()[INTR].
+
+ This does NOT control if interrupts are enabled towards the MSI-X vector,
+ for that see GPIO_INTR()[INTR_ENA]. */
+ uint64_t pin_xor : 1; /**< [ 1: 1](R/W) GPIO data inversion. When set, inverts the receiving or transmitting GPIO signal.
+ For GPIO output, this inversion is after all GPIO [PIN_SEL] muxes. This inversion function
+ is applicable to any GPIO output mux signals, also used to control GPIO_RX_DAT.
+ For GPIO input, this inversion is before the GPIO [PIN_SEL] muxes, as used to control
+ GPIO interrupts. */
+ uint64_t tx_oe : 1; /**< [ 0: 0](R/W) Transmit output enable. When set to 1, the GPIO pin can be driven as an output
+ pin if GPIO_BIT_CFG()[PIN_SEL] selects GPIO_PIN_SEL_E::GPIO_SW. [TX_OE] is only
+ used along with GPIO_TX_SET or GPIO_TX_CLR, and [TX_OE] function is before
+ GPIO_BIT_CFG()[PIN_SEL] mux. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_oe : 1; /**< [ 0: 0](R/W) Transmit output enable. When set to 1, the GPIO pin can be driven as an output
+ pin if GPIO_BIT_CFG()[PIN_SEL] selects GPIO_PIN_SEL_E::GPIO_SW. [TX_OE] is only
+ used along with GPIO_TX_SET or GPIO_TX_CLR, and [TX_OE] function is before
+ GPIO_BIT_CFG()[PIN_SEL] mux. */
+ uint64_t pin_xor : 1; /**< [ 1: 1](R/W) GPIO data inversion. When set, inverts the receiving or transmitting GPIO signal.
+ For GPIO output, this inversion is after all GPIO [PIN_SEL] muxes. This inversion function
+ is applicable to any GPIO output mux signals, also used to control GPIO_RX_DAT.
+ For GPIO input, this inversion is before the GPIO [PIN_SEL] muxes, as used to control
+ GPIO interrupts. */
+ uint64_t int_en : 1; /**< [ 2: 2](R/W) Pin controls interrupt.
+
+ If set, assertions of this pin after [PIN_XOR] will set GPIO_INTR()[INTR].
+
+ If set and [INT_TYPE] is clear (level-sensitive), deassertions of this pin
+ after [PIN_XOR] will clear GPIO_INTR()[INTR].
+
+ This does NOT control if interrupts are enabled towards the MSI-X vector,
+ for that see GPIO_INTR()[INTR_ENA]. */
+ uint64_t int_type : 1; /**< [ 3: 3](R/W) Type of interrupt when pin is an input and [INT_EN] set. When set, rising edge
+ interrupt, else level interrupt. The GPIO signal used to generate interrupt is after
+ the filter and after the XOR inverter. */
+ uint64_t fil_cnt : 4; /**< [ 7: 4](R/W) Filter count. Specifies the number of consecutive samples ([FIL_CNT]+1) to change state.
+ Zero to disable the filter.
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t fil_sel : 4; /**< [ 11: 8](R/W) Filter select. Global counter bit-select (controls sample rate).
+ Filter and XOR inverter are also applicable to GPIO input muxing signals and interrupts. */
+ uint64_t tx_od : 1; /**< [ 12: 12](R/W) GPIO output open-drain conversion. This function is after PIN_SEL MUX
+ and [PIN_XOR] inverter.
+ When set, the pin output will connect to zero, and pin enable
+ will connect to original pin output. With both [TX_OD] and [PIN_XOR] set, a transmit
+ of 1 will tristate the pin output driver to archive open-drain function. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pin_sel : 8; /**< [ 23: 16](R/W) Selects which signal is reported to GPIO output, or which signal GPIO input need to
+ connect. Each GPIO pin can be configured either input-only or output-only depending
+ function selected, as enumerated by GPIO_PIN_SEL_E. For GPIO input selects,
+ the GPIO signal used is after glitch filter and XOR inverter (GPIO_BIT_CFG()[PIN_XOR]). */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gpio_bit_cfgx_cn81xx cn83xx; */
+};
+typedef union bdk_gpio_bit_cfgx bdk_gpio_bit_cfgx_t;
+
+static inline uint64_t BDK_GPIO_BIT_CFGX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_BIT_CFGX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=47))
+ return 0x803000000400ll + 8ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=79))
+ return 0x803000000400ll + 8ll * ((a) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=50))
+ return 0x803000000400ll + 8ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=63))
+ return 0x803000000400ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("GPIO_BIT_CFGX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_BIT_CFGX(a) bdk_gpio_bit_cfgx_t
+#define bustype_BDK_GPIO_BIT_CFGX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_BIT_CFGX(a) "GPIO_BIT_CFGX"
+#define device_bar_BDK_GPIO_BIT_CFGX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_BIT_CFGX(a) (a)
+#define arguments_BDK_GPIO_BIT_CFGX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_bit_permit#
+ *
+ * GPIO Bit Permit Register
+ * This register determines which requestor(s) are permitted to access which GPIO pins.
+ *
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ * (That is, only the GPIO_PERMIT permitted agent can change the permission settings of
+ * all requestors.)
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_bit_permitx
+{
+ uint64_t u;
+ struct bdk_gpio_bit_permitx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t permitdis : 4; /**< [ 3: 0](R/W) Each bit, if set, disables the given requestor from accessing the corresponding pin.
+ If a disabled requestor makes a request, the access becomes read-zero/write ignored.
+ \<0\> = Disable APs (non MCP/SCP) secure world from accessing the pin.
+ \<1\> = Disable APs (non MCP/SCP) nonsecure world from accessing the pin.
+ \<2\> = Disable XCP0 (SCP) from accessing the pin.
+ \<3\> = Disable XCP1 (MCP) from accessing the pin. */
+#else /* Word 0 - Little Endian */
+ uint64_t permitdis : 4; /**< [ 3: 0](R/W) Each bit, if set, disables the given requestor from accessing the corresponding pin.
+ If a disabled requestor makes a request, the access becomes read-zero/write ignored.
+ \<0\> = Disable APs (non MCP/SCP) secure world from accessing the pin.
+ \<1\> = Disable APs (non MCP/SCP) nonsecure world from accessing the pin.
+ \<2\> = Disable XCP0 (SCP) from accessing the pin.
+ \<3\> = Disable XCP1 (MCP) from accessing the pin. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_bit_permitx_s cn; */
+};
+typedef union bdk_gpio_bit_permitx bdk_gpio_bit_permitx_t;
+
+static inline uint64_t BDK_GPIO_BIT_PERMITX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_BIT_PERMITX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=63))
+ return 0x803000002000ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("GPIO_BIT_PERMITX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_BIT_PERMITX(a) bdk_gpio_bit_permitx_t
+#define bustype_BDK_GPIO_BIT_PERMITX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_BIT_PERMITX(a) "GPIO_BIT_PERMITX"
+#define device_bar_BDK_GPIO_BIT_PERMITX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_BIT_PERMITX(a) (a)
+#define arguments_BDK_GPIO_BIT_PERMITX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_blink_cfg
+ *
+ * GPIO Output Blinker Configuration Register
+ * This register configures the blink generator.
+ *
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_blink_cfg
+{
+ uint64_t u;
+ struct bdk_gpio_blink_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t force_off : 4; /**< [ 15: 12](R/W) Force activity off time. The minimum amount of time to disable the activity indicator if
+ it has been continually enabled for the [MAX_ON] time.
+ 0x0 = No minimum.
+ 0x1 = 1/8 second.
+ ...
+ 0xF = 15/8 seconds. */
+ uint64_t max_on : 4; /**< [ 11: 8](R/W) Maximum activity on time. The maximum amount of time to enable the activity indicator.
+ 0x0 = No maximum.
+ 0x1 = 1/4 second.
+ ...
+ 0xF = 15/4 seconds. */
+ uint64_t stretch_off : 4; /**< [ 7: 4](R/W) Stretch activity off. The minimum amount of time to disable the activity indicator.
+ 0x0 = No minimum.
+ 0x1 = 1/64 second.
+ ...
+ 0xF = 15/64 seconds. */
+ uint64_t stretch_on : 4; /**< [ 3: 0](R/W) Stretch activity on. The minimum amount of time to enable the activity indicator.
+ 0x0 = 1/64 second.
+ 0x1 = 2/64 second.
+ ...
+ 0xF = 16/64 seconds. */
+#else /* Word 0 - Little Endian */
+ uint64_t stretch_on : 4; /**< [ 3: 0](R/W) Stretch activity on. The minimum amount of time to enable the activity indicator.
+ 0x0 = 1/64 second.
+ 0x1 = 2/64 second.
+ ...
+ 0xF = 16/64 seconds. */
+ uint64_t stretch_off : 4; /**< [ 7: 4](R/W) Stretch activity off. The minimum amount of time to disable the activity indicator.
+ 0x0 = No minimum.
+ 0x1 = 1/64 second.
+ ...
+ 0xF = 15/64 seconds. */
+ uint64_t max_on : 4; /**< [ 11: 8](R/W) Maximum activity on time. The maximum amount of time to enable the activity indicator.
+ 0x0 = No maximum.
+ 0x1 = 1/4 second.
+ ...
+ 0xF = 15/4 seconds. */
+ uint64_t force_off : 4; /**< [ 15: 12](R/W) Force activity off time. The minimum amount of time to disable the activity indicator if
+ it has been continually enabled for the [MAX_ON] time.
+ 0x0 = No minimum.
+ 0x1 = 1/8 second.
+ ...
+ 0xF = 15/8 seconds. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_blink_cfg_s cn; */
+};
+typedef union bdk_gpio_blink_cfg bdk_gpio_blink_cfg_t;
+
+#define BDK_GPIO_BLINK_CFG BDK_GPIO_BLINK_CFG_FUNC()
+static inline uint64_t BDK_GPIO_BLINK_CFG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_BLINK_CFG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001440ll;
+ __bdk_csr_fatal("GPIO_BLINK_CFG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_BLINK_CFG bdk_gpio_blink_cfg_t
+#define bustype_BDK_GPIO_BLINK_CFG BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_BLINK_CFG "GPIO_BLINK_CFG"
+#define device_bar_BDK_GPIO_BLINK_CFG 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_BLINK_CFG 0
+#define arguments_BDK_GPIO_BLINK_CFG -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_blink_freq
+ *
+ * GPIO Blink Clock Register
+ * This register configures the blink generator.
+ *
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_blink_freq
+{
+ uint64_t u;
+ struct bdk_gpio_blink_freq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t div : 27; /**< [ 26: 0](R/W) Number of 100 MHz reference clocks in 1/64th of a second. */
+#else /* Word 0 - Little Endian */
+ uint64_t div : 27; /**< [ 26: 0](R/W) Number of 100 MHz reference clocks in 1/64th of a second. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_blink_freq_s cn; */
+};
+typedef union bdk_gpio_blink_freq bdk_gpio_blink_freq_t;
+
+#define BDK_GPIO_BLINK_FREQ BDK_GPIO_BLINK_FREQ_FUNC()
+static inline uint64_t BDK_GPIO_BLINK_FREQ_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_BLINK_FREQ_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001448ll;
+ __bdk_csr_fatal("GPIO_BLINK_FREQ", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_BLINK_FREQ bdk_gpio_blink_freq_t
+#define bustype_BDK_GPIO_BLINK_FREQ BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_BLINK_FREQ "GPIO_BLINK_FREQ"
+#define device_bar_BDK_GPIO_BLINK_FREQ 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_BLINK_FREQ 0
+#define arguments_BDK_GPIO_BLINK_FREQ -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_cer_err_w1c
+ *
+ * GPIO Central Error Write-One-to-Clear Register
+ * Internal:
+ * FIXME algorithm int_w1c.
+ */
+union bdk_gpio_cer_err_w1c
+{
+ uint64_t u;
+ struct bdk_gpio_cer_err_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t caterr : 1; /**< [ 3: 3](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_CATERR output, to indicate a
+ catastrophic error to the BMC. */
+ uint64_t err2 : 1; /**< [ 2: 2](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR2 output, to indicate an error to the BMC. */
+ uint64_t err1 : 1; /**< [ 1: 1](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR1 output, to indicate an error to the BMC. */
+ uint64_t err0 : 1; /**< [ 0: 0](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR0 GPIO output, to indicate an error to the BMC. */
+#else /* Word 0 - Little Endian */
+ uint64_t err0 : 1; /**< [ 0: 0](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR0 GPIO output, to indicate an error to the BMC. */
+ uint64_t err1 : 1; /**< [ 1: 1](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR1 output, to indicate an error to the BMC. */
+ uint64_t err2 : 1; /**< [ 2: 2](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR2 output, to indicate an error to the BMC. */
+ uint64_t caterr : 1; /**< [ 3: 3](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_CATERR output, to indicate a
+ catastrophic error to the BMC. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_cer_err_w1c_s cn; */
+};
+typedef union bdk_gpio_cer_err_w1c bdk_gpio_cer_err_w1c_t;
+
+#define BDK_GPIO_CER_ERR_W1C BDK_GPIO_CER_ERR_W1C_FUNC()
+static inline uint64_t BDK_GPIO_CER_ERR_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_CER_ERR_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001608ll;
+ __bdk_csr_fatal("GPIO_CER_ERR_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_CER_ERR_W1C bdk_gpio_cer_err_w1c_t
+#define bustype_BDK_GPIO_CER_ERR_W1C BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_CER_ERR_W1C "GPIO_CER_ERR_W1C"
+#define device_bar_BDK_GPIO_CER_ERR_W1C 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_CER_ERR_W1C 0
+#define arguments_BDK_GPIO_CER_ERR_W1C -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_cer_err_w1s
+ *
+ * GPIO Central Error Write-One-to-Set Register
+ * This register report CER Errors to GPIO pins, TBD.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_cer_err_w1s
+{
+ uint64_t u;
+ struct bdk_gpio_cer_err_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t caterr : 1; /**< [ 3: 3](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_CATERR output, to indicate a
+ catastrophic error to the BMC. */
+ uint64_t err2 : 1; /**< [ 2: 2](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR2 output, to indicate an error to the BMC. */
+ uint64_t err1 : 1; /**< [ 1: 1](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR1 output, to indicate an error to the BMC. */
+ uint64_t err0 : 1; /**< [ 0: 0](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR0 GPIO output, to indicate an error to the BMC. */
+#else /* Word 0 - Little Endian */
+ uint64_t err0 : 1; /**< [ 0: 0](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR0 GPIO output, to indicate an error to the BMC. */
+ uint64_t err1 : 1; /**< [ 1: 1](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR1 output, to indicate an error to the BMC. */
+ uint64_t err2 : 1; /**< [ 2: 2](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_ERR2 output, to indicate an error to the BMC. */
+ uint64_t caterr : 1; /**< [ 3: 3](R/W1S) Set the selectable-GPIO GPIO_PIN_SEL_E::CER_CATERR output, to indicate a
+ catastrophic error to the BMC. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_cer_err_w1s_s cn; */
+};
+typedef union bdk_gpio_cer_err_w1s bdk_gpio_cer_err_w1s_t;
+
+#define BDK_GPIO_CER_ERR_W1S BDK_GPIO_CER_ERR_W1S_FUNC()
+static inline uint64_t BDK_GPIO_CER_ERR_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_CER_ERR_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001600ll;
+ __bdk_csr_fatal("GPIO_CER_ERR_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_CER_ERR_W1S bdk_gpio_cer_err_w1s_t
+#define bustype_BDK_GPIO_CER_ERR_W1S BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_CER_ERR_W1S "GPIO_CER_ERR_W1S"
+#define device_bar_BDK_GPIO_CER_ERR_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_CER_ERR_W1S 0
+#define arguments_BDK_GPIO_CER_ERR_W1S -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_clk_gen#
+ *
+ * GPIO Clock Generator Registers
+ * This register configures the clock generators. The number of generators is
+ * discoverable in GPIO_CONST[CLKGEN].
+ *
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_clk_genx
+{
+ uint64_t u;
+ struct bdk_gpio_clk_genx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t high : 32; /**< [ 63: 32](R/W) Determines the high period of the GPIO clock generator. [HIGH] defines the
+ number of coprocessor clocks in GPIO clock high period. Writing [HIGH] = 0
+ changes clock generator back original 50% duty cycle, which is backward
+ compatible. */
+ uint64_t n : 32; /**< [ 31: 0](R/W) Determines the frequency of the GPIO clock generator. N should be less than or equal to
+ 2^31-1.
+ The frequency of the GPIO clock generator equals the coprocessor-clock frequency times N
+ divided by 2^32.
+ Writing N = 0x0 stops the clock generator. */
+#else /* Word 0 - Little Endian */
+ uint64_t n : 32; /**< [ 31: 0](R/W) Determines the frequency of the GPIO clock generator. N should be less than or equal to
+ 2^31-1.
+ The frequency of the GPIO clock generator equals the coprocessor-clock frequency times N
+ divided by 2^32.
+ Writing N = 0x0 stops the clock generator. */
+ uint64_t high : 32; /**< [ 63: 32](R/W) Determines the high period of the GPIO clock generator. [HIGH] defines the
+ number of coprocessor clocks in GPIO clock high period. Writing [HIGH] = 0
+ changes clock generator back original 50% duty cycle, which is backward
+ compatible. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_clk_genx_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t n : 32; /**< [ 31: 0](R/W) Determines the frequency of the GPIO clock generator. N should be less than or equal to
+ 2^31-1.
+ The frequency of the GPIO clock generator equals the coprocessor-clock frequency times N
+ divided by 2^32.
+ Writing N = 0x0 stops the clock generator. */
+#else /* Word 0 - Little Endian */
+ uint64_t n : 32; /**< [ 31: 0](R/W) Determines the frequency of the GPIO clock generator. N should be less than or equal to
+ 2^31-1.
+ The frequency of the GPIO clock generator equals the coprocessor-clock frequency times N
+ divided by 2^32.
+ Writing N = 0x0 stops the clock generator. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_gpio_clk_genx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t high : 32; /**< [ 63: 32](R/W) Determines the high period of the GPIO clock generator. [HIGH] defines the
+ number of coprocessor clocks in GPIO clock high period. Writing [HIGH] = 0
+ changes clock generator back original 50% duty cycle, which is backward
+ compatible. */
+ uint64_t n : 32; /**< [ 31: 0](R/W) Determines the frequency of the GPIO clock generator. [N] should be less than or
+ equal to 2^31-1. The frequency of the GPIO clock generator equals the
+ coprocessor-clock frequency times [N] divided by 2^32. Writing [N] = 0x0 stops
+ the clock generator. */
+#else /* Word 0 - Little Endian */
+ uint64_t n : 32; /**< [ 31: 0](R/W) Determines the frequency of the GPIO clock generator. [N] should be less than or
+ equal to 2^31-1. The frequency of the GPIO clock generator equals the
+ coprocessor-clock frequency times [N] divided by 2^32. Writing [N] = 0x0 stops
+ the clock generator. */
+ uint64_t high : 32; /**< [ 63: 32](R/W) Determines the high period of the GPIO clock generator. [HIGH] defines the
+ number of coprocessor clocks in GPIO clock high period. Writing [HIGH] = 0
+ changes clock generator back original 50% duty cycle, which is backward
+ compatible. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_clk_genx bdk_gpio_clk_genx_t;
+
+static inline uint64_t BDK_GPIO_CLK_GENX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_CLK_GENX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=3))
+ return 0x803000000040ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x803000001800ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("GPIO_CLK_GENX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_CLK_GENX(a) bdk_gpio_clk_genx_t
+#define bustype_BDK_GPIO_CLK_GENX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_CLK_GENX(a) "GPIO_CLK_GENX"
+#define device_bar_BDK_GPIO_CLK_GENX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_CLK_GENX(a) (a)
+#define arguments_BDK_GPIO_CLK_GENX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_clk_synce#
+ *
+ * GPIO Clock SyncE Registers
+ * A GSER can be configured as a clock source. The GPIO block can support up to two
+ * unique clocks to send out any GPIO pin as configured when GPIO_BIT_CFG()[PIN_SEL] =
+ * GPIO_PIN_SEL_E::GPIO_CLK_SYNCE(0..1). The clock can be divided by 20, 40, 80 or 160
+ * of the selected GSER SerDes clock. Legal values are based on the number of SerDes.
+ *
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_clk_syncex
+{
+ uint64_t u;
+ struct bdk_gpio_clk_syncex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t qlm_sel : 4; /**< [ 11: 8](R/W) Selects which GSER(0..3) to select from. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t div : 2; /**< [ 3: 2](R/W) GPIO internal clock division of the GSER SerDes recovered clock to create the
+ output clock. The maximum supported GPIO output frequency is 125 MHz.
+ 0x0 = Divide by 20.
+ 0x1 = Divide by 40.
+ 0x2 = Divide by 80.
+ 0x3 = Divide by 160. */
+ uint64_t lane_sel : 2; /**< [ 1: 0](R/W) Which RX lane within the GSER selected with [QLM_SEL] to use as the GPIO
+ internal QLMx clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_sel : 2; /**< [ 1: 0](R/W) Which RX lane within the GSER selected with [QLM_SEL] to use as the GPIO
+ internal QLMx clock. */
+ uint64_t div : 2; /**< [ 3: 2](R/W) GPIO internal clock division of the GSER SerDes recovered clock to create the
+ output clock. The maximum supported GPIO output frequency is 125 MHz.
+ 0x0 = Divide by 20.
+ 0x1 = Divide by 40.
+ 0x2 = Divide by 80.
+ 0x3 = Divide by 160. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t qlm_sel : 4; /**< [ 11: 8](R/W) Selects which GSER(0..3) to select from. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_clk_syncex_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t qlm_sel : 4; /**< [ 11: 8](R/W) Selects which GSER to select from. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t div : 2; /**< [ 3: 2](R/W) GPIO internal clock division of the GSER SerDes recovered clock to create the
+ output clock. The maximum supported GPIO output frequency is 125 MHz.
+ 0x0 = Divide by 20.
+ 0x1 = Divide by 40.
+ 0x2 = Divide by 80.
+ 0x3 = Divide by 160. */
+ uint64_t lane_sel : 2; /**< [ 1: 0](R/W) Which RX lane within the GSER permitted with [QLM_SEL] to use as the GPIO
+ internal QLMx clock. Note that GSER 0..3 have four selections each while
+ GSER 4..6 have two selections each. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_sel : 2; /**< [ 1: 0](R/W) Which RX lane within the GSER permitted with [QLM_SEL] to use as the GPIO
+ internal QLMx clock. Note that GSER 0..3 have four selections each while
+ GSER 4..6 have two selections each. */
+ uint64_t div : 2; /**< [ 3: 2](R/W) GPIO internal clock division of the GSER SerDes recovered clock to create the
+ output clock. The maximum supported GPIO output frequency is 125 MHz.
+ 0x0 = Divide by 20.
+ 0x1 = Divide by 40.
+ 0x2 = Divide by 80.
+ 0x3 = Divide by 160. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t qlm_sel : 4; /**< [ 11: 8](R/W) Selects which GSER to select from. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gpio_clk_syncex_s cn81xx; */
+ struct bdk_gpio_clk_syncex_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t qlm_sel : 4; /**< [ 11: 8](R/W) Selects which GSER(0..7) to select from. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t div : 2; /**< [ 3: 2](R/W) GPIO internal clock division of the GSER SerDes recovered clock to create the
+ output clock. The maximum supported GPIO output frequency is 125 MHz.
+ 0x0 = Divide by 20.
+ 0x1 = Divide by 40.
+ 0x2 = Divide by 80.
+ 0x3 = Divide by 160. */
+ uint64_t lane_sel : 2; /**< [ 1: 0](R/W) Which RX lane within the GSER selected with [QLM_SEL] to use as the GPIO
+ internal QLMx clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_sel : 2; /**< [ 1: 0](R/W) Which RX lane within the GSER selected with [QLM_SEL] to use as the GPIO
+ internal QLMx clock. */
+ uint64_t div : 2; /**< [ 3: 2](R/W) GPIO internal clock division of the GSER SerDes recovered clock to create the
+ output clock. The maximum supported GPIO output frequency is 125 MHz.
+ 0x0 = Divide by 20.
+ 0x1 = Divide by 40.
+ 0x2 = Divide by 80.
+ 0x3 = Divide by 160. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t qlm_sel : 4; /**< [ 11: 8](R/W) Selects which GSER(0..7) to select from. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gpio_clk_syncex_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t qlm_sel : 4; /**< [ 11: 8](R/W) Selects which GSER(0..6) to select from. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t div : 2; /**< [ 3: 2](R/W) GPIO internal clock division of the GSER SerDes recovered clock to create the
+ output clock. The maximum supported GPIO output frequency is 125 MHz.
+ 0x0 = Divide by 20.
+ 0x1 = Divide by 40.
+ 0x2 = Divide by 80.
+ 0x3 = Divide by 160. */
+ uint64_t lane_sel : 2; /**< [ 1: 0](R/W) Which RX lane within the GSER selected with [QLM_SEL] to use as the GPIO
+ internal QLMx clock. Note that GSER 0..3 have four selections each while
+ GSER 4..6 have two selections each. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_sel : 2; /**< [ 1: 0](R/W) Which RX lane within the GSER selected with [QLM_SEL] to use as the GPIO
+ internal QLMx clock. Note that GSER 0..3 have four selections each while
+ GSER 4..6 have two selections each. */
+ uint64_t div : 2; /**< [ 3: 2](R/W) GPIO internal clock division of the GSER SerDes recovered clock to create the
+ output clock. The maximum supported GPIO output frequency is 125 MHz.
+ 0x0 = Divide by 20.
+ 0x1 = Divide by 40.
+ 0x2 = Divide by 80.
+ 0x3 = Divide by 160. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t qlm_sel : 4; /**< [ 11: 8](R/W) Selects which GSER(0..6) to select from. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gpio_clk_syncex bdk_gpio_clk_syncex_t;
+
+static inline uint64_t BDK_GPIO_CLK_SYNCEX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_CLK_SYNCEX(unsigned long a)
+{
+ if (a<=1)
+ return 0x803000000060ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("GPIO_CLK_SYNCEX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_CLK_SYNCEX(a) bdk_gpio_clk_syncex_t
+#define bustype_BDK_GPIO_CLK_SYNCEX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_CLK_SYNCEX(a) "GPIO_CLK_SYNCEX"
+#define device_bar_BDK_GPIO_CLK_SYNCEX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_CLK_SYNCEX(a) (a)
+#define arguments_BDK_GPIO_CLK_SYNCEX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_comp
+ *
+ * GPIO Compensation Register
+ */
+union bdk_gpio_comp
+{
+ uint64_t u;
+ struct bdk_gpio_comp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t pctl : 3; /**< [ 10: 8](R/W) GPIO bus driver PCTL. Suggested values:
+ 0x4 = 60 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 30 ohm. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t nctl : 3; /**< [ 2: 0](R/W) GPIO bus driver NCTL. Suggested values:
+ 0x4 = 60 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 30 ohm. */
+#else /* Word 0 - Little Endian */
+ uint64_t nctl : 3; /**< [ 2: 0](R/W) GPIO bus driver NCTL. Suggested values:
+ 0x4 = 60 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 30 ohm. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t pctl : 3; /**< [ 10: 8](R/W) GPIO bus driver PCTL. Suggested values:
+ 0x4 = 60 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 30 ohm. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_comp_s cn; */
+};
+typedef union bdk_gpio_comp bdk_gpio_comp_t;
+
+#define BDK_GPIO_COMP BDK_GPIO_COMP_FUNC()
+static inline uint64_t BDK_GPIO_COMP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_COMP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x803000000080ll;
+ __bdk_csr_fatal("GPIO_COMP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_COMP bdk_gpio_comp_t
+#define bustype_BDK_GPIO_COMP BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_COMP "GPIO_COMP"
+#define device_bar_BDK_GPIO_COMP 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_COMP 0
+#define arguments_BDK_GPIO_COMP -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_const
+ *
+ * GPIO Constants Register
+ * This register contains constants for software discovery.
+ *
+ * This register is accessible to all requestors (regardless of GPIO_PERMIT).
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_const
+{
+ uint64_t u;
+ struct bdk_gpio_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t clkgen : 4; /**< [ 19: 16](RO) Number of clock generators in GPIO. */
+ uint64_t pp : 8; /**< [ 15: 8](RO) Number of PP vectors in GPIO_INT_VEC_E::MC_INTR_PP(). */
+ uint64_t gpios : 8; /**< [ 7: 0](RO) Number of GPIOs implemented. */
+#else /* Word 0 - Little Endian */
+ uint64_t gpios : 8; /**< [ 7: 0](RO) Number of GPIOs implemented. */
+ uint64_t pp : 8; /**< [ 15: 8](RO) Number of PP vectors in GPIO_INT_VEC_E::MC_INTR_PP(). */
+ uint64_t clkgen : 4; /**< [ 19: 16](RO) Number of clock generators in GPIO. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_const_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pp : 8; /**< [ 15: 8](RO) Number of PP vectors in GPIO_INT_VEC_E::MC_INTR_PP(). */
+ uint64_t gpios : 8; /**< [ 7: 0](RO) Number of GPIOs implemented. */
+#else /* Word 0 - Little Endian */
+ uint64_t gpios : 8; /**< [ 7: 0](RO) Number of GPIOs implemented. */
+ uint64_t pp : 8; /**< [ 15: 8](RO) Number of PP vectors in GPIO_INT_VEC_E::MC_INTR_PP(). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gpio_const_s cn9; */
+};
+typedef union bdk_gpio_const bdk_gpio_const_t;
+
+#define BDK_GPIO_CONST BDK_GPIO_CONST_FUNC()
+static inline uint64_t BDK_GPIO_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x803000000090ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x803000000090ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000000090ll;
+ __bdk_csr_fatal("GPIO_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_CONST bdk_gpio_const_t
+#define bustype_BDK_GPIO_CONST BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_CONST "GPIO_CONST"
+#define device_bar_BDK_GPIO_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_CONST 0
+#define arguments_BDK_GPIO_CONST -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_intr#
+ *
+ * GPIO Bit Interrupt Registers
+ * Each register provides interrupt information for the corresponding GPIO pin.
+ * GPIO_INTR() interrupts can be level or edge interrupts depending on GPIO_BIT_CFG()[INT_TYPE].
+ *
+ * Each index is only accessible to the requestor(s) permitted with GPIO_BIT_PERMIT().
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_intrx
+{
+ uint64_t u;
+ struct bdk_gpio_intrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t intr_ena_w1s : 1; /**< [ 3: 3](R/W1S) GPIO signaled interrupt enable. Write one to set interrupt enable.
+ [INTR_ENA_W1S] and [INTR_ENA_W1C] both read the interrupt enable state. */
+ uint64_t intr_ena_w1c : 1; /**< [ 2: 2](R/W1C) GPIO signaled interrupt enable. Write one to clear interrupt enable.
+ [INTR_ENA_W1S] and [INTR_ENA_W1C] both read the interrupt enable state. */
+ uint64_t intr_w1s : 1; /**< [ 1: 1](R/W1S/H) GPIO signaled interrupt. If interrupts are edge-sensitive, write one to set, otherwise
+ will clear automatically when GPIO pin de-asserts.
+ [INTR_W1S] and [INTR] both read the interrupt state. */
+ uint64_t intr : 1; /**< [ 0: 0](R/W1C/H) GPIO signaled interrupt. If interrupts are edge-sensitive, write one to clear, otherwise
+ will clear automatically when GPIO pin de-asserts.
+ [INTR_W1S] and [INTR] both read the interrupt state.
+ An interrupt set event is sent when [INTR_ENA_W1S] reads as set. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 1; /**< [ 0: 0](R/W1C/H) GPIO signaled interrupt. If interrupts are edge-sensitive, write one to clear, otherwise
+ will clear automatically when GPIO pin de-asserts.
+ [INTR_W1S] and [INTR] both read the interrupt state.
+ An interrupt set event is sent when [INTR_ENA_W1S] reads as set. */
+ uint64_t intr_w1s : 1; /**< [ 1: 1](R/W1S/H) GPIO signaled interrupt. If interrupts are edge-sensitive, write one to set, otherwise
+ will clear automatically when GPIO pin de-asserts.
+ [INTR_W1S] and [INTR] both read the interrupt state. */
+ uint64_t intr_ena_w1c : 1; /**< [ 2: 2](R/W1C) GPIO signaled interrupt enable. Write one to clear interrupt enable.
+ [INTR_ENA_W1S] and [INTR_ENA_W1C] both read the interrupt enable state. */
+ uint64_t intr_ena_w1s : 1; /**< [ 3: 3](R/W1S) GPIO signaled interrupt enable. Write one to set interrupt enable.
+ [INTR_ENA_W1S] and [INTR_ENA_W1C] both read the interrupt enable state. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_intrx_s cn; */
+};
+typedef union bdk_gpio_intrx bdk_gpio_intrx_t;
+
+static inline uint64_t BDK_GPIO_INTRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_INTRX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=47))
+ return 0x803000000800ll + 8ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=79))
+ return 0x803000000800ll + 8ll * ((a) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=50))
+ return 0x803000000800ll + 8ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=63))
+ return 0x803000000800ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("GPIO_INTRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_INTRX(a) bdk_gpio_intrx_t
+#define bustype_BDK_GPIO_INTRX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_INTRX(a) "GPIO_INTRX"
+#define device_bar_BDK_GPIO_INTRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_INTRX(a) (a)
+#define arguments_BDK_GPIO_INTRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_io_ctl
+ *
+ * GPIO I/O Control Register
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_io_ctl
+{
+ uint64_t u;
+ struct bdk_gpio_io_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t drive2 : 2; /**< [ 11: 10](R/W) GPIO\<63:48\> pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t reserved_9 : 1;
+ uint64_t slew2 : 1; /**< [ 8: 8](R/W) GPIO\<63:48\> pin output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+ uint64_t drive1 : 2; /**< [ 7: 6](R/W) GPIO\<47:24\> pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t reserved_5 : 1;
+ uint64_t slew1 : 1; /**< [ 4: 4](R/W) GPIO\<47:24\> pin output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+ uint64_t drive0 : 2; /**< [ 3: 2](R/W) GPIO\<23:0\> pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t reserved_1 : 1;
+ uint64_t slew0 : 1; /**< [ 0: 0](R/W) GPIO\<23:0\> pin output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+#else /* Word 0 - Little Endian */
+ uint64_t slew0 : 1; /**< [ 0: 0](R/W) GPIO\<23:0\> pin output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+ uint64_t reserved_1 : 1;
+ uint64_t drive0 : 2; /**< [ 3: 2](R/W) GPIO\<23:0\> pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t slew1 : 1; /**< [ 4: 4](R/W) GPIO\<47:24\> pin output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+ uint64_t reserved_5 : 1;
+ uint64_t drive1 : 2; /**< [ 7: 6](R/W) GPIO\<47:24\> pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t slew2 : 1; /**< [ 8: 8](R/W) GPIO\<63:48\> pin output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+ uint64_t reserved_9 : 1;
+ uint64_t drive2 : 2; /**< [ 11: 10](R/W) GPIO\<63:48\> pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_io_ctl_s cn; */
+};
+typedef union bdk_gpio_io_ctl bdk_gpio_io_ctl_t;
+
+#define BDK_GPIO_IO_CTL BDK_GPIO_IO_CTL_FUNC()
+static inline uint64_t BDK_GPIO_IO_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_IO_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000000080ll;
+ __bdk_csr_fatal("GPIO_IO_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_IO_CTL bdk_gpio_io_ctl_t
+#define bustype_BDK_GPIO_IO_CTL BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_IO_CTL "GPIO_IO_CTL"
+#define device_bar_BDK_GPIO_IO_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_IO_CTL 0
+#define arguments_BDK_GPIO_IO_CTL -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_mc_intr#
+ *
+ * GPIO Bit Multicast Interrupt Registers
+ * Each register provides interrupt multicasting for GPIO(4..7).
+ *
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_mc_intrx
+{
+ uint64_t u;
+ struct bdk_gpio_mc_intrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t intr : 48; /**< [ 47: 0](R/W1C/H) GPIO interrupt for each core. When corresponding GPIO4-7 is edge-triggered and MULTI_CAST
+ is enabled, a GPIO assertion will set all 48 bits. Each bit is expected to be routed to
+ interrupt a different core using the CIU, and each core will then write one to clear its
+ corresponding bit in this register. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 48; /**< [ 47: 0](R/W1C/H) GPIO interrupt for each core. When corresponding GPIO4-7 is edge-triggered and MULTI_CAST
+ is enabled, a GPIO assertion will set all 48 bits. Each bit is expected to be routed to
+ interrupt a different core using the CIU, and each core will then write one to clear its
+ corresponding bit in this register. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_mc_intrx_s cn8; */
+ struct bdk_gpio_mc_intrx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t intr : 24; /**< [ 23: 0](R/W1C/H) GPIO interrupt for each core. When corresponding GPIO4-7 is edge-triggered and GPIO_MULTI_CAST[EN]
+ is enabled, a GPIO assertion will set all 24 bits. Each bit is expected to be routed to
+ interrupt a different core using the CIU, and each core will then write one to clear its
+ corresponding bit in this register. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 24; /**< [ 23: 0](R/W1C/H) GPIO interrupt for each core. When corresponding GPIO4-7 is edge-triggered and GPIO_MULTI_CAST[EN]
+ is enabled, a GPIO assertion will set all 24 bits. Each bit is expected to be routed to
+ interrupt a different core using the CIU, and each core will then write one to clear its
+ corresponding bit in this register. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_mc_intrx bdk_gpio_mc_intrx_t;
+
+static inline uint64_t BDK_GPIO_MC_INTRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MC_INTRX(unsigned long a)
+{
+ if ((a>=4)&&(a<=7))
+ return 0x803000001000ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("GPIO_MC_INTRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MC_INTRX(a) bdk_gpio_mc_intrx_t
+#define bustype_BDK_GPIO_MC_INTRX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MC_INTRX(a) "GPIO_MC_INTRX"
+#define device_bar_BDK_GPIO_MC_INTRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_MC_INTRX(a) (a)
+#define arguments_BDK_GPIO_MC_INTRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_mc_intr#_ena_w1c
+ *
+ * GPIO Bit Multicast Interrupt Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_gpio_mc_intrx_ena_w1c
+{
+ uint64_t u;
+ struct bdk_gpio_mc_intrx_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t intr : 48; /**< [ 47: 0](R/W1C/H) Reads or clears enable for GPIO_MC_INTR(4..7)[INTR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 48; /**< [ 47: 0](R/W1C/H) Reads or clears enable for GPIO_MC_INTR(4..7)[INTR]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_mc_intrx_ena_w1c_s cn8; */
+ struct bdk_gpio_mc_intrx_ena_w1c_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t intr : 24; /**< [ 23: 0](R/W1C/H) Reads or clears enable for GPIO_MC_INTR(4..7)[INTR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 24; /**< [ 23: 0](R/W1C/H) Reads or clears enable for GPIO_MC_INTR(4..7)[INTR]. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_mc_intrx_ena_w1c bdk_gpio_mc_intrx_ena_w1c_t;
+
+static inline uint64_t BDK_GPIO_MC_INTRX_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MC_INTRX_ENA_W1C(unsigned long a)
+{
+ if ((a>=4)&&(a<=7))
+ return 0x803000001200ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("GPIO_MC_INTRX_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MC_INTRX_ENA_W1C(a) bdk_gpio_mc_intrx_ena_w1c_t
+#define bustype_BDK_GPIO_MC_INTRX_ENA_W1C(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MC_INTRX_ENA_W1C(a) "GPIO_MC_INTRX_ENA_W1C"
+#define device_bar_BDK_GPIO_MC_INTRX_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_MC_INTRX_ENA_W1C(a) (a)
+#define arguments_BDK_GPIO_MC_INTRX_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_mc_intr#_ena_w1s
+ *
+ * GPIO Bit Multicast Interrupt Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_gpio_mc_intrx_ena_w1s
+{
+ uint64_t u;
+ struct bdk_gpio_mc_intrx_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t intr : 48; /**< [ 47: 0](R/W1S/H) Reads or sets enable for GPIO_MC_INTR(4..7)[INTR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 48; /**< [ 47: 0](R/W1S/H) Reads or sets enable for GPIO_MC_INTR(4..7)[INTR]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_mc_intrx_ena_w1s_s cn8; */
+ struct bdk_gpio_mc_intrx_ena_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t intr : 24; /**< [ 23: 0](R/W1S/H) Reads or sets enable for GPIO_MC_INTR(4..7)[INTR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 24; /**< [ 23: 0](R/W1S/H) Reads or sets enable for GPIO_MC_INTR(4..7)[INTR]. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_mc_intrx_ena_w1s bdk_gpio_mc_intrx_ena_w1s_t;
+
+static inline uint64_t BDK_GPIO_MC_INTRX_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MC_INTRX_ENA_W1S(unsigned long a)
+{
+ if ((a>=4)&&(a<=7))
+ return 0x803000001300ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("GPIO_MC_INTRX_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MC_INTRX_ENA_W1S(a) bdk_gpio_mc_intrx_ena_w1s_t
+#define bustype_BDK_GPIO_MC_INTRX_ENA_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MC_INTRX_ENA_W1S(a) "GPIO_MC_INTRX_ENA_W1S"
+#define device_bar_BDK_GPIO_MC_INTRX_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_MC_INTRX_ENA_W1S(a) (a)
+#define arguments_BDK_GPIO_MC_INTRX_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_mc_intr#_w1s
+ *
+ * GPIO Bit Multicast Interrupt Registers
+ * This register sets interrupt bits.
+ */
+union bdk_gpio_mc_intrx_w1s
+{
+ uint64_t u;
+ struct bdk_gpio_mc_intrx_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t intr : 48; /**< [ 47: 0](R/W1S/H) Reads or sets GPIO_MC_INTR(4..7)[INTR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 48; /**< [ 47: 0](R/W1S/H) Reads or sets GPIO_MC_INTR(4..7)[INTR]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_mc_intrx_w1s_s cn8; */
+ struct bdk_gpio_mc_intrx_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t intr : 24; /**< [ 23: 0](R/W1S/H) Reads or sets GPIO_MC_INTR(4..7)[INTR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 24; /**< [ 23: 0](R/W1S/H) Reads or sets GPIO_MC_INTR(4..7)[INTR]. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_mc_intrx_w1s bdk_gpio_mc_intrx_w1s_t;
+
+static inline uint64_t BDK_GPIO_MC_INTRX_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MC_INTRX_W1S(unsigned long a)
+{
+ if ((a>=4)&&(a<=7))
+ return 0x803000001100ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("GPIO_MC_INTRX_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MC_INTRX_W1S(a) bdk_gpio_mc_intrx_w1s_t
+#define bustype_BDK_GPIO_MC_INTRX_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MC_INTRX_W1S(a) "GPIO_MC_INTRX_W1S"
+#define device_bar_BDK_GPIO_MC_INTRX_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_MC_INTRX_W1S(a) (a)
+#define arguments_BDK_GPIO_MC_INTRX_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_misc_strap
+ *
+ * GPIO Misc Strap Value Register
+ * This register contains the miscellaneous strap state.
+ *
+ * This register is accessible to all requestors (regardless of GPIO_PERMIT).
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_misc_strap
+{
+ uint64_t u;
+ struct bdk_gpio_misc_strap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t uart1_rts : 1; /**< [ 17: 17](RO/H) State of UART1_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t uart0_rts : 1; /**< [ 16: 16](RO/H) State of UART0_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t uart0_rts : 1; /**< [ 16: 16](RO/H) State of UART0_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t uart1_rts : 1; /**< [ 17: 17](RO/H) State of UART1_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_misc_strap_s cn; */
+};
+typedef union bdk_gpio_misc_strap bdk_gpio_misc_strap_t;
+
+#define BDK_GPIO_MISC_STRAP BDK_GPIO_MISC_STRAP_FUNC()
+static inline uint64_t BDK_GPIO_MISC_STRAP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MISC_STRAP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000000030ll;
+ __bdk_csr_fatal("GPIO_MISC_STRAP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MISC_STRAP bdk_gpio_misc_strap_t
+#define bustype_BDK_GPIO_MISC_STRAP BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MISC_STRAP "GPIO_MISC_STRAP"
+#define device_bar_BDK_GPIO_MISC_STRAP 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_MISC_STRAP 0
+#define arguments_BDK_GPIO_MISC_STRAP -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_misc_supply
+ *
+ * GPIO Misc Supply Value Register
+ * This register contains the state of the GPIO power supplies.
+ *
+ * This register is accessible to all requestors (regardless of GPIO_PERMIT).
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_misc_supply
+{
+ uint64_t u;
+ struct bdk_gpio_misc_supply_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_22_63 : 42;
+ uint64_t vdet_avs : 2; /**< [ 21: 20](RO/H) Sensed I/O power supply setting for AVS bus:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_emmc : 2; /**< [ 19: 18](RO/H) Sensed I/O power supply setting for EMMC bus:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_gpio0 : 2; /**< [ 17: 16](RO/H) Sensed I/O power supply setting for GPIO0..23.
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_gpio24 : 2; /**< [ 15: 14](RO/H) Sensed I/O power supply setting for GPIO24..47.
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_gpio48 : 2; /**< [ 13: 12](RO/H) Sensed I/O power supply setting for GPIO48..63.
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_io_e : 2; /**< [ 11: 10](RO/H) Sensed I/O power supply setting for generic east IO pins:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_io_n : 2; /**< [ 9: 8](RO/H) Sensed I/O power supply setting for generic north IO pins:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_pci : 2; /**< [ 7: 6](RO/H) Sensed I/O power supply setting for PCI IO pins:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_smi : 2; /**< [ 5: 4](RO/H) Sensed I/O power supply setting for SMI bus:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_spi : 2; /**< [ 3: 2](RO/H) Sensed I/O power supply setting for SPI0 bus:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_tws_avs : 2; /**< [ 1: 0](RO/H) Sensed I/O power supply setting for TWSI and AVS:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t vdet_tws_avs : 2; /**< [ 1: 0](RO/H) Sensed I/O power supply setting for TWSI and AVS:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_spi : 2; /**< [ 3: 2](RO/H) Sensed I/O power supply setting for SPI0 bus:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_smi : 2; /**< [ 5: 4](RO/H) Sensed I/O power supply setting for SMI bus:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_pci : 2; /**< [ 7: 6](RO/H) Sensed I/O power supply setting for PCI IO pins:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_io_n : 2; /**< [ 9: 8](RO/H) Sensed I/O power supply setting for generic north IO pins:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_io_e : 2; /**< [ 11: 10](RO/H) Sensed I/O power supply setting for generic east IO pins:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_gpio48 : 2; /**< [ 13: 12](RO/H) Sensed I/O power supply setting for GPIO48..63.
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_gpio24 : 2; /**< [ 15: 14](RO/H) Sensed I/O power supply setting for GPIO24..47.
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_gpio0 : 2; /**< [ 17: 16](RO/H) Sensed I/O power supply setting for GPIO0..23.
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_emmc : 2; /**< [ 19: 18](RO/H) Sensed I/O power supply setting for EMMC bus:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t vdet_avs : 2; /**< [ 21: 20](RO/H) Sensed I/O power supply setting for AVS bus:
+ 0x0 = 3.3 V.
+ 0x1 = 2.5 V.
+ 0x2/0x3 = 1.8 V.
+ _ All other values reserved. */
+ uint64_t reserved_22_63 : 42;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_misc_supply_s cn; */
+};
+typedef union bdk_gpio_misc_supply bdk_gpio_misc_supply_t;
+
+#define BDK_GPIO_MISC_SUPPLY BDK_GPIO_MISC_SUPPLY_FUNC()
+static inline uint64_t BDK_GPIO_MISC_SUPPLY_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MISC_SUPPLY_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000000038ll;
+ __bdk_csr_fatal("GPIO_MISC_SUPPLY", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MISC_SUPPLY bdk_gpio_misc_supply_t
+#define bustype_BDK_GPIO_MISC_SUPPLY BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MISC_SUPPLY "GPIO_MISC_SUPPLY"
+#define device_bar_BDK_GPIO_MISC_SUPPLY 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_MISC_SUPPLY 0
+#define arguments_BDK_GPIO_MISC_SUPPLY -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_msix_pba#
+ *
+ * GPIO MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table; the bit number is indexed by the GPIO_INT_VEC_E enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_msix_pbax
+{
+ uint64_t u;
+ struct bdk_gpio_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO) Pending message for the associated GPIO_MSIX_VEC()_CTL, enumerated by
+ GPIO_INT_VEC_E. Bits
+ that have no associated GPIO_INT_VEC_E are 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO) Pending message for the associated GPIO_MSIX_VEC()_CTL, enumerated by
+ GPIO_INT_VEC_E. Bits
+ that have no associated GPIO_INT_VEC_E are 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_msix_pbax_s cn; */
+};
+typedef union bdk_gpio_msix_pbax bdk_gpio_msix_pbax_t;
+
+static inline uint64_t BDK_GPIO_MSIX_PBAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MSIX_PBAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x803000ff0000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x803000ff0000ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x803000ff0000ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x803000ff0000ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("GPIO_MSIX_PBAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MSIX_PBAX(a) bdk_gpio_msix_pbax_t
+#define bustype_BDK_GPIO_MSIX_PBAX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MSIX_PBAX(a) "GPIO_MSIX_PBAX"
+#define device_bar_BDK_GPIO_MSIX_PBAX(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GPIO_MSIX_PBAX(a) (a)
+#define arguments_BDK_GPIO_MSIX_PBAX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_msix_vec#_addr
+ *
+ * GPIO MSI-X Vector-Table Address Register
+ * This register is the MSI-X vector table, indexed by the GPIO_INT_VEC_E enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_gpio_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's GPIO_MSIX_VEC()_ADDR, GPIO_MSIX_VEC()_CTL, and corresponding
+ bit of GPIO_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_GPIO_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's GPIO_MSIX_VEC()_ADDR, GPIO_MSIX_VEC()_CTL, and corresponding
+ bit of GPIO_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_GPIO_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_msix_vecx_addr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's GPIO_MSIX_VEC()_ADDR, GPIO_MSIX_VEC()_CTL, and corresponding
+ bit of GPIO_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_GPIO_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's GPIO_MSIX_VEC()_ADDR, GPIO_MSIX_VEC()_CTL, and corresponding
+ bit of GPIO_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_GPIO_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gpio_msix_vecx_addr_s cn9; */
+};
+typedef union bdk_gpio_msix_vecx_addr bdk_gpio_msix_vecx_addr_t;
+
+static inline uint64_t BDK_GPIO_MSIX_VECX_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MSIX_VECX_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=99))
+ return 0x803000f00000ll + 0x10ll * ((a) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=183))
+ return 0x803000f00000ll + 0x10ll * ((a) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=149))
+ return 0x803000f00000ll + 0x10ll * ((a) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=181))
+ return 0x803000f00000ll + 0x10ll * ((a) & 0xff);
+ __bdk_csr_fatal("GPIO_MSIX_VECX_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MSIX_VECX_ADDR(a) bdk_gpio_msix_vecx_addr_t
+#define bustype_BDK_GPIO_MSIX_VECX_ADDR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MSIX_VECX_ADDR(a) "GPIO_MSIX_VECX_ADDR"
+#define device_bar_BDK_GPIO_MSIX_VECX_ADDR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GPIO_MSIX_VECX_ADDR(a) (a)
+#define arguments_BDK_GPIO_MSIX_VECX_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_msix_vec#_ctl
+ *
+ * GPIO MSI-X Vector-Table Control and Data Register
+ * This register is the MSI-X vector table, indexed by the GPIO_INT_VEC_E enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_gpio_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gpio_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_gpio_msix_vecx_ctl bdk_gpio_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_GPIO_MSIX_VECX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MSIX_VECX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=99))
+ return 0x803000f00008ll + 0x10ll * ((a) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=183))
+ return 0x803000f00008ll + 0x10ll * ((a) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=149))
+ return 0x803000f00008ll + 0x10ll * ((a) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=181))
+ return 0x803000f00008ll + 0x10ll * ((a) & 0xff);
+ __bdk_csr_fatal("GPIO_MSIX_VECX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_MSIX_VECX_CTL(a) bdk_gpio_msix_vecx_ctl_t
+#define bustype_BDK_GPIO_MSIX_VECX_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MSIX_VECX_CTL(a) "GPIO_MSIX_VECX_CTL"
+#define device_bar_BDK_GPIO_MSIX_VECX_CTL(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GPIO_MSIX_VECX_CTL(a) (a)
+#define arguments_BDK_GPIO_MSIX_VECX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gpio_multi_cast
+ *
+ * GPIO Multicast Register
+ * This register enables multicast GPIO interrupts.
+ *
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_multi_cast
+{
+ uint64_t u;
+ struct bdk_gpio_multi_cast_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< [ 0: 0](R/W) Enable GPIO interrupt multicast mode. When [EN] is set, GPIO\<7:4\> functions in multicast
+ mode allowing these four GPIOs to interrupt multiple cores. Multicast functionality allows
+ the GPIO to exist as per-core interrupts as opposed to a global interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t en : 1; /**< [ 0: 0](R/W) Enable GPIO interrupt multicast mode. When [EN] is set, GPIO\<7:4\> functions in multicast
+ mode allowing these four GPIOs to interrupt multiple cores. Multicast functionality allows
+ the GPIO to exist as per-core interrupts as opposed to a global interrupt. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_multi_cast_s cn; */
+};
+typedef union bdk_gpio_multi_cast bdk_gpio_multi_cast_t;
+
+#define BDK_GPIO_MULTI_CAST BDK_GPIO_MULTI_CAST_FUNC()
+static inline uint64_t BDK_GPIO_MULTI_CAST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_MULTI_CAST_FUNC(void)
+{
+ return 0x803000000018ll;
+}
+
+#define typedef_BDK_GPIO_MULTI_CAST bdk_gpio_multi_cast_t
+#define bustype_BDK_GPIO_MULTI_CAST BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_MULTI_CAST "GPIO_MULTI_CAST"
+#define device_bar_BDK_GPIO_MULTI_CAST 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_MULTI_CAST 0
+#define arguments_BDK_GPIO_MULTI_CAST -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_ocla_exten_trig
+ *
+ * GPIO OCLA External Trigger Register
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_ocla_exten_trig
+{
+ uint64_t u;
+ struct bdk_gpio_ocla_exten_trig_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t m_trig : 1; /**< [ 0: 0](R/W) Manual trigger. Assert the OCLA trigger for GPIO-based triggering. This manual
+ trigger is ORed with the optional GPIO input pin selected with
+ GPIO_BIT_CFG()[PIN_SEL] = GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER. */
+#else /* Word 0 - Little Endian */
+ uint64_t m_trig : 1; /**< [ 0: 0](R/W) Manual trigger. Assert the OCLA trigger for GPIO-based triggering. This manual
+ trigger is ORed with the optional GPIO input pin selected with
+ GPIO_BIT_CFG()[PIN_SEL] = GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_ocla_exten_trig_s cn8; */
+ struct bdk_gpio_ocla_exten_trig_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t m_trig : 1; /**< [ 0: 0](R/W) Manual trigger. Assert the OCLA trigger for GPIO-based triggering. This manual
+ trigger is ORed with the optional GPIO input pin permitted with
+ GPIO_BIT_CFG()[PIN_SEL] = GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER. */
+#else /* Word 0 - Little Endian */
+ uint64_t m_trig : 1; /**< [ 0: 0](R/W) Manual trigger. Assert the OCLA trigger for GPIO-based triggering. This manual
+ trigger is ORed with the optional GPIO input pin permitted with
+ GPIO_BIT_CFG()[PIN_SEL] = GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_ocla_exten_trig bdk_gpio_ocla_exten_trig_t;
+
+#define BDK_GPIO_OCLA_EXTEN_TRIG BDK_GPIO_OCLA_EXTEN_TRIG_FUNC()
+static inline uint64_t BDK_GPIO_OCLA_EXTEN_TRIG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_OCLA_EXTEN_TRIG_FUNC(void)
+{
+ return 0x803000000020ll;
+}
+
+#define typedef_BDK_GPIO_OCLA_EXTEN_TRIG bdk_gpio_ocla_exten_trig_t
+#define bustype_BDK_GPIO_OCLA_EXTEN_TRIG BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_OCLA_EXTEN_TRIG "GPIO_OCLA_EXTEN_TRIG"
+#define device_bar_BDK_GPIO_OCLA_EXTEN_TRIG 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_OCLA_EXTEN_TRIG 0
+#define arguments_BDK_GPIO_OCLA_EXTEN_TRIG -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_permit
+ *
+ * GPIO Permit Register
+ * This register determines which requestor(s) are permitted to access which GPIO global
+ * registers.
+ *
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ * (That is, only the GPIO_PERMIT permitted agent can change the permission settings of
+ * all requestors.)
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_permit
+{
+ uint64_t u;
+ struct bdk_gpio_permit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t permitdis : 4; /**< [ 3: 0](R/W) Each bit, if set, disables the given requestor from accessing GPIO global registers.
+ If a disabled requestor makes a request, the access becomes read-zero/write ignored.
+ \<0\> = Disable AP/NCSI/JTAG (non MCP/SCP) secure world from accessing GPIO global registers.
+ \<1\> = Disable AP/NCSI/JTAG (non MCP/SCP) nonsecure world from accessing GPIO global registers.
+ \<2\> = Disable XCP0 (SCP) from accessing GPIO global registers.
+ \<3\> = Disable XCP1 (MCP) from accessing GPIO global registers. */
+#else /* Word 0 - Little Endian */
+ uint64_t permitdis : 4; /**< [ 3: 0](R/W) Each bit, if set, disables the given requestor from accessing GPIO global registers.
+ If a disabled requestor makes a request, the access becomes read-zero/write ignored.
+ \<0\> = Disable AP/NCSI/JTAG (non MCP/SCP) secure world from accessing GPIO global registers.
+ \<1\> = Disable AP/NCSI/JTAG (non MCP/SCP) nonsecure world from accessing GPIO global registers.
+ \<2\> = Disable XCP0 (SCP) from accessing GPIO global registers.
+ \<3\> = Disable XCP1 (MCP) from accessing GPIO global registers. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_permit_s cn; */
+};
+typedef union bdk_gpio_permit bdk_gpio_permit_t;
+
+#define BDK_GPIO_PERMIT BDK_GPIO_PERMIT_FUNC()
+static inline uint64_t BDK_GPIO_PERMIT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_PERMIT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001500ll;
+ __bdk_csr_fatal("GPIO_PERMIT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_PERMIT bdk_gpio_permit_t
+#define bustype_BDK_GPIO_PERMIT BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_PERMIT "GPIO_PERMIT"
+#define device_bar_BDK_GPIO_PERMIT 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_PERMIT 0
+#define arguments_BDK_GPIO_PERMIT -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_pkg_ver
+ *
+ * Chip Package Version Register
+ * This register reads the package version.
+ */
+union bdk_gpio_pkg_ver
+{
+ uint64_t u;
+ struct bdk_gpio_pkg_ver_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pkg_ver : 3; /**< [ 2: 0](RO/H) Reads the package version straps, which are set by the package.
+ 0x0 = 47.5 x 47.5mm package non-SWP.
+ 0x1 = 40 x 40mm package.
+ 0x2 = 47.5 x 47.5mm package with SWP. */
+#else /* Word 0 - Little Endian */
+ uint64_t pkg_ver : 3; /**< [ 2: 0](RO/H) Reads the package version straps, which are set by the package.
+ 0x0 = 47.5 x 47.5mm package non-SWP.
+ 0x1 = 40 x 40mm package.
+ 0x2 = 47.5 x 47.5mm package with SWP. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_pkg_ver_s cn; */
+};
+typedef union bdk_gpio_pkg_ver bdk_gpio_pkg_ver_t;
+
+#define BDK_GPIO_PKG_VER BDK_GPIO_PKG_VER_FUNC()
+static inline uint64_t BDK_GPIO_PKG_VER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_PKG_VER_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001610ll;
+ __bdk_csr_fatal("GPIO_PKG_VER", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_PKG_VER bdk_gpio_pkg_ver_t
+#define bustype_BDK_GPIO_PKG_VER BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_PKG_VER "GPIO_PKG_VER"
+#define device_bar_BDK_GPIO_PKG_VER 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_PKG_VER 0
+#define arguments_BDK_GPIO_PKG_VER -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_pspi_ctl
+ *
+ * GPIO Expansion ROM SPI Control Register
+ * This register is only accessible to the requestor(s) permitted with GPIO_PERMIT.
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_pspi_ctl
+{
+ uint64_t u;
+ struct bdk_gpio_pspi_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t pspi_gpio : 1; /**< [ 0: 0](R/W) PSPI GPIO reset override.
+ When set, this field causes the GPIO pins 39-43 to maintain their
+ values through a chip reset. This bit is typically set when PCIe Expansion RIM
+ is required and a PEM has been configured as an end point.
+ When cleared, the GPIOs are reset during a chip domain reset.
+ This register is reset only on a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pspi_gpio : 1; /**< [ 0: 0](R/W) PSPI GPIO reset override.
+ When set, this field causes the GPIO pins 39-43 to maintain their
+ values through a chip reset. This bit is typically set when PCIe Expansion RIM
+ is required and a PEM has been configured as an end point.
+ When cleared, the GPIOs are reset during a chip domain reset.
+ This register is reset only on a cold domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_pspi_ctl_s cn; */
+};
+typedef union bdk_gpio_pspi_ctl bdk_gpio_pspi_ctl_t;
+
+#define BDK_GPIO_PSPI_CTL BDK_GPIO_PSPI_CTL_FUNC()
+static inline uint64_t BDK_GPIO_PSPI_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_PSPI_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000000088ll;
+ __bdk_csr_fatal("GPIO_PSPI_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_PSPI_CTL bdk_gpio_pspi_ctl_t
+#define bustype_BDK_GPIO_PSPI_CTL BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_PSPI_CTL "GPIO_PSPI_CTL"
+#define device_bar_BDK_GPIO_PSPI_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_PSPI_CTL 0
+#define arguments_BDK_GPIO_PSPI_CTL -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_rx1_dat
+ *
+ * GPIO Receive Data Extension Register
+ * See GPIO_RX_DAT.
+ */
+union bdk_gpio_rx1_dat
+{
+ uint64_t u;
+ struct bdk_gpio_rx1_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t dat : 32; /**< [ 31: 0](RO/H) GPIO read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 32; /**< [ 31: 0](RO/H) GPIO read data. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_rx1_dat_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_29_63 : 35;
+ uint64_t dat : 29; /**< [ 28: 0](RO/H) GPIO read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 29; /**< [ 28: 0](RO/H) GPIO read data. */
+ uint64_t reserved_29_63 : 35;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_rx1_dat_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t dat : 16; /**< [ 15: 0](RO/H) GPIO read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 16; /**< [ 15: 0](RO/H) GPIO read data. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_gpio_rx1_dat_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t dat : 32; /**< [ 31: 0](RO/H) GPIO read data. Unimplemented pins bits read as zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 32; /**< [ 31: 0](RO/H) GPIO read data. Unimplemented pins bits read as zero. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_rx1_dat bdk_gpio_rx1_dat_t;
+
+#define BDK_GPIO_RX1_DAT BDK_GPIO_RX1_DAT_FUNC()
+static inline uint64_t BDK_GPIO_RX1_DAT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_RX1_DAT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x803000001400ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x803000001400ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001400ll;
+ __bdk_csr_fatal("GPIO_RX1_DAT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_RX1_DAT bdk_gpio_rx1_dat_t
+#define bustype_BDK_GPIO_RX1_DAT BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_RX1_DAT "GPIO_RX1_DAT"
+#define device_bar_BDK_GPIO_RX1_DAT 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_RX1_DAT 0
+#define arguments_BDK_GPIO_RX1_DAT -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_rx_dat
+ *
+ * GPIO Receive Data Register
+ * This register contains the state of the GPIO pins, which is after glitch filter and XOR
+ * inverter (GPIO_BIT_CFG()[PIN_XOR]). GPIO inputs always report to GPIO_RX_DAT despite of
+ * the value of GPIO_BIT_CFG()[PIN_SEL].
+ * GPIO_RX_DAT reads GPIO input data for the first 64 GPIOs, and GPIO_RX1_DAT the remainder.
+ *
+ * Each bit in this register is only accessible to the requestor(s) permitted with
+ * GPIO_BIT_PERMIT(), but error will not be reported when there are bits are not
+ * permitted by GPIO_BIT_PERMIT().
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_rx_dat
+{
+ uint64_t u;
+ struct bdk_gpio_rx_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) GPIO read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) GPIO read data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_rx_dat_s cn9; */
+ struct bdk_gpio_rx_dat_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t dat : 48; /**< [ 47: 0](RO/H) GPIO read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 48; /**< [ 47: 0](RO/H) GPIO read data. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_rx_dat_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t dat : 51; /**< [ 50: 0](RO/H) GPIO read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 51; /**< [ 50: 0](RO/H) GPIO read data. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gpio_rx_dat_s cn83xx; */
+};
+typedef union bdk_gpio_rx_dat bdk_gpio_rx_dat_t;
+
+#define BDK_GPIO_RX_DAT BDK_GPIO_RX_DAT_FUNC()
+static inline uint64_t BDK_GPIO_RX_DAT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_RX_DAT_FUNC(void)
+{
+ return 0x803000000000ll;
+}
+
+#define typedef_BDK_GPIO_RX_DAT bdk_gpio_rx_dat_t
+#define bustype_BDK_GPIO_RX_DAT BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_RX_DAT "GPIO_RX_DAT"
+#define device_bar_BDK_GPIO_RX_DAT 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_RX_DAT 0
+#define arguments_BDK_GPIO_RX_DAT -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_strap
+ *
+ * GPIO Strap Value Register
+ * This register contains the first 64 GPIO strap data captured at the rising edge of DC_OK.
+ * GPIO_STRAP1 contains the remaining GPIOs.
+ *
+ * This register is accessible to all requestors (regardless of GPIO_PERMIT).
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_strap
+{
+ uint64_t u;
+ struct bdk_gpio_strap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t strap : 64; /**< [ 63: 0](RO/H) GPIO strap data. */
+#else /* Word 0 - Little Endian */
+ uint64_t strap : 64; /**< [ 63: 0](RO/H) GPIO strap data. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_strap_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t strap : 64; /**< [ 63: 0](RO/H) GPIO strap data of GPIO pins less than 64. Unimplemented pins bits read as 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t strap : 64; /**< [ 63: 0](RO/H) GPIO strap data of GPIO pins less than 64. Unimplemented pins bits read as 0. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gpio_strap_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t strap : 48; /**< [ 47: 0](RO/H) GPIO strap data of GPIO pins less than 64. Unimplemented pins bits read as 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t strap : 48; /**< [ 47: 0](RO/H) GPIO strap data of GPIO pins less than 64. Unimplemented pins bits read as 0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_strap_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t strap : 51; /**< [ 50: 0](RO/H) GPIO strap data. */
+#else /* Word 0 - Little Endian */
+ uint64_t strap : 51; /**< [ 50: 0](RO/H) GPIO strap data. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gpio_strap_cn9 cn83xx; */
+};
+typedef union bdk_gpio_strap bdk_gpio_strap_t;
+
+#define BDK_GPIO_STRAP BDK_GPIO_STRAP_FUNC()
+static inline uint64_t BDK_GPIO_STRAP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_STRAP_FUNC(void)
+{
+ return 0x803000000028ll;
+}
+
+#define typedef_BDK_GPIO_STRAP bdk_gpio_strap_t
+#define bustype_BDK_GPIO_STRAP BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_STRAP "GPIO_STRAP"
+#define device_bar_BDK_GPIO_STRAP 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_STRAP 0
+#define arguments_BDK_GPIO_STRAP -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_strap1
+ *
+ * GPIO Strap Value Register
+ * See GPIO_STRAP.
+ */
+union bdk_gpio_strap1
+{
+ uint64_t u;
+ struct bdk_gpio_strap1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t strap : 32; /**< [ 31: 0](RO/H) GPIO strap data of GPIO pins 64-79. Unimplemented pins bits read as 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t strap : 32; /**< [ 31: 0](RO/H) GPIO strap data of GPIO pins 64-79. Unimplemented pins bits read as 0. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_strap1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_29_63 : 35;
+ uint64_t strap : 29; /**< [ 28: 0](RO/H) GPIO strap data of GPIO pins less than 64. Unimplemented pins bits read as 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t strap : 29; /**< [ 28: 0](RO/H) GPIO strap data of GPIO pins less than 64. Unimplemented pins bits read as 0. */
+ uint64_t reserved_29_63 : 35;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_strap1_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t strap : 16; /**< [ 15: 0](RO/H) GPIO strap data of GPIO pins 64-79. Unimplemented pins bits read as 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t strap : 16; /**< [ 15: 0](RO/H) GPIO strap data of GPIO pins 64-79. Unimplemented pins bits read as 0. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_gpio_strap1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t strap : 32; /**< [ 31: 0](RO/H) GPIO strap data of GPIO pins 64 and above. Unimplemented pins bits read as 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t strap : 32; /**< [ 31: 0](RO/H) GPIO strap data of GPIO pins 64 and above. Unimplemented pins bits read as 0. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_strap1 bdk_gpio_strap1_t;
+
+#define BDK_GPIO_STRAP1 BDK_GPIO_STRAP1_FUNC()
+static inline uint64_t BDK_GPIO_STRAP1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_STRAP1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x803000001418ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x803000001418ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001418ll;
+ __bdk_csr_fatal("GPIO_STRAP1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_STRAP1 bdk_gpio_strap1_t
+#define bustype_BDK_GPIO_STRAP1 BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_STRAP1 "GPIO_STRAP1"
+#define device_bar_BDK_GPIO_STRAP1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_STRAP1 0
+#define arguments_BDK_GPIO_STRAP1 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_tx1_clr
+ *
+ * GPIO Transmit Clear Mask Register
+ * See GPIO_TX_CLR.
+ */
+union bdk_gpio_tx1_clr
+{
+ uint64_t u;
+ struct bdk_gpio_tx1_clr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t clr : 32; /**< [ 31: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 0. When read, CLR
+ returns the GPIO_TX1_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 32; /**< [ 31: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 0. When read, CLR
+ returns the GPIO_TX1_DAT storage. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_tx1_clr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_29_63 : 35;
+ uint64_t clr : 29; /**< [ 28: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 0. When read, CLR
+ returns the GPIO_TX1_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 29; /**< [ 28: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 0. When read, CLR
+ returns the GPIO_TX1_DAT storage. */
+ uint64_t reserved_29_63 : 35;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_tx1_clr_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t clr : 16; /**< [ 15: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 0. When read, CLR
+ returns the GPIO_TX1_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 16; /**< [ 15: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 0. When read, CLR
+ returns the GPIO_TX1_DAT storage. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_gpio_tx1_clr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t clr : 32; /**< [ 31: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 0. When read, CLR
+ returns the GPIO_TX1_DAT storage. Unimplemented pins bits read as 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 32; /**< [ 31: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 0. When read, CLR
+ returns the GPIO_TX1_DAT storage. Unimplemented pins bits read as 0. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_tx1_clr bdk_gpio_tx1_clr_t;
+
+#define BDK_GPIO_TX1_CLR BDK_GPIO_TX1_CLR_FUNC()
+static inline uint64_t BDK_GPIO_TX1_CLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_TX1_CLR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x803000001410ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x803000001410ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001410ll;
+ __bdk_csr_fatal("GPIO_TX1_CLR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_TX1_CLR bdk_gpio_tx1_clr_t
+#define bustype_BDK_GPIO_TX1_CLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_TX1_CLR "GPIO_TX1_CLR"
+#define device_bar_BDK_GPIO_TX1_CLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_TX1_CLR 0
+#define arguments_BDK_GPIO_TX1_CLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_tx1_set
+ *
+ * GPIO Transmit Set Mask Register
+ * See GPIO_TX_SET.
+ */
+union bdk_gpio_tx1_set
+{
+ uint64_t u;
+ struct bdk_gpio_tx1_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t set : 32; /**< [ 31: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 1. When read, SET
+ returns the GPIO_TX1_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t set : 32; /**< [ 31: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 1. When read, SET
+ returns the GPIO_TX1_DAT storage. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_tx1_set_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_29_63 : 35;
+ uint64_t set : 29; /**< [ 28: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 1. When read, SET
+ returns the GPIO_TX1_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t set : 29; /**< [ 28: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 1. When read, SET
+ returns the GPIO_TX1_DAT storage. */
+ uint64_t reserved_29_63 : 35;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_tx1_set_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t set : 16; /**< [ 15: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 1. When read, SET
+ returns the GPIO_TX1_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t set : 16; /**< [ 15: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 1. When read, SET
+ returns the GPIO_TX1_DAT storage. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_gpio_tx1_set_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t set : 32; /**< [ 31: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 1. When read, SET
+ returns the GPIO_TX1_DAT storage. Unimplemented pins bits read as 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t set : 32; /**< [ 31: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX1_DAT bits to set to 1. When read, SET
+ returns the GPIO_TX1_DAT storage. Unimplemented pins bits read as 0. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gpio_tx1_set bdk_gpio_tx1_set_t;
+
+#define BDK_GPIO_TX1_SET BDK_GPIO_TX1_SET_FUNC()
+static inline uint64_t BDK_GPIO_TX1_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_TX1_SET_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x803000001408ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x803000001408ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x803000001408ll;
+ __bdk_csr_fatal("GPIO_TX1_SET", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GPIO_TX1_SET bdk_gpio_tx1_set_t
+#define bustype_BDK_GPIO_TX1_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_TX1_SET "GPIO_TX1_SET"
+#define device_bar_BDK_GPIO_TX1_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_TX1_SET 0
+#define arguments_BDK_GPIO_TX1_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_tx_clr
+ *
+ * GPIO Transmit Clear Mask Register
+ * This register clears GPIO output data for the first 64 GPIOs, and GPIO_TX1_CLR the
+ * remainder.
+ *
+ * Each bit in this register is only accessible to the requestor(s) permitted with
+ * GPIO_BIT_PERMIT(), but error will not be reported when there are bits are not
+ * permitted by GPIO_BIT_PERMIT().
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_tx_clr
+{
+ uint64_t u;
+ struct bdk_gpio_tx_clr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clr : 64; /**< [ 63: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 0. When read, [CLR]
+ returns the GPIO_TX_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 64; /**< [ 63: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 0. When read, [CLR]
+ returns the GPIO_TX_DAT storage. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gpio_tx_clr_s cn9; */
+ struct bdk_gpio_tx_clr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t clr : 48; /**< [ 47: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 0. When read, [CLR]
+ returns the GPIO_TX_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 48; /**< [ 47: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 0. When read, [CLR]
+ returns the GPIO_TX_DAT storage. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_tx_clr_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t clr : 51; /**< [ 50: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 0. When read, [CLR]
+ returns the GPIO_TX_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 51; /**< [ 50: 0](R/W1C/H) Clear mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 0. When read, [CLR]
+ returns the GPIO_TX_DAT storage. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gpio_tx_clr_s cn83xx; */
+};
+typedef union bdk_gpio_tx_clr bdk_gpio_tx_clr_t;
+
+#define BDK_GPIO_TX_CLR BDK_GPIO_TX_CLR_FUNC()
+static inline uint64_t BDK_GPIO_TX_CLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_TX_CLR_FUNC(void)
+{
+ return 0x803000000010ll;
+}
+
+#define typedef_BDK_GPIO_TX_CLR bdk_gpio_tx_clr_t
+#define bustype_BDK_GPIO_TX_CLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_TX_CLR "GPIO_TX_CLR"
+#define device_bar_BDK_GPIO_TX_CLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_TX_CLR 0
+#define arguments_BDK_GPIO_TX_CLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gpio_tx_set
+ *
+ * GPIO Transmit Set Mask Register
+ * This register sets GPIO output data. GPIO_TX_SET sets the first 64 GPIOs, and
+ * GPIO_TX1_SET the remainder.
+ *
+ * Each bit in this register is only accessible to the requestor(s) permitted with
+ * GPIO_BIT_PERMIT(), but error will not be reported when there are bits are not
+ * permitted by GPIO_BIT_PERMIT().
+ *
+ * When permitted, this register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_gpio_tx_set
+{
+ uint64_t u;
+ struct bdk_gpio_tx_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t set : 64; /**< [ 63: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 1. When read, [SET]
+ returns the GPIO_TX_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t set : 64; /**< [ 63: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 1. When read, [SET]
+ returns the GPIO_TX_DAT storage. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gpio_tx_set_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t set : 64; /**< [ 63: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 1. When read,
+ [SET] returns the GPIO_TX_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t set : 64; /**< [ 63: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 1. When read,
+ [SET] returns the GPIO_TX_DAT storage. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gpio_tx_set_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t set : 48; /**< [ 47: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 1. When read, [SET]
+ returns the GPIO_TX_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t set : 48; /**< [ 47: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 1. When read, [SET]
+ returns the GPIO_TX_DAT storage. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gpio_tx_set_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t set : 51; /**< [ 50: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 1. When read, [SET]
+ returns the GPIO_TX_DAT storage. */
+#else /* Word 0 - Little Endian */
+ uint64_t set : 51; /**< [ 50: 0](R/W1S/H) Set mask. Bit mask to indicate which GPIO_TX_DAT bits to set to 1. When read, [SET]
+ returns the GPIO_TX_DAT storage. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gpio_tx_set_s cn83xx; */
+};
+typedef union bdk_gpio_tx_set bdk_gpio_tx_set_t;
+
+#define BDK_GPIO_TX_SET BDK_GPIO_TX_SET_FUNC()
+static inline uint64_t BDK_GPIO_TX_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GPIO_TX_SET_FUNC(void)
+{
+ return 0x803000000008ll;
+}
+
+#define typedef_BDK_GPIO_TX_SET bdk_gpio_tx_set_t
+#define bustype_BDK_GPIO_TX_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GPIO_TX_SET "GPIO_TX_SET"
+#define device_bar_BDK_GPIO_TX_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GPIO_TX_SET 0
+#define arguments_BDK_GPIO_TX_SET -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_GPIO_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gti.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gti.h
new file mode 100644
index 0000000000..dcc8d35519
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gti.h
@@ -0,0 +1,5352 @@
+#ifndef __BDK_CSRS_GTI_H__
+#define __BDK_CSRS_GTI_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium GTI.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration gti_bar_e
+ *
+ * GTI Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_GTI_BAR_E_GTI_PF_BAR0_CN8 (0x844000000000ll)
+#define BDK_GTI_BAR_E_GTI_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_GTI_BAR_E_GTI_PF_BAR0_CN9 (0x844000000000ll)
+#define BDK_GTI_BAR_E_GTI_PF_BAR0_CN9_SIZE 0x100000ull
+#define BDK_GTI_BAR_E_GTI_PF_BAR4 (0x84400f000000ll)
+#define BDK_GTI_BAR_E_GTI_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration gti_int_vec_e
+ *
+ * GTI MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_GTI_INT_VEC_E_CORE_WDOGX_DEL3T(a) (0xa + (a))
+#define BDK_GTI_INT_VEC_E_CORE_WDOGX_INT_CN8(a) (0x3a + (a))
+#define BDK_GTI_INT_VEC_E_CORE_WDOGX_INT_CN9(a) (0x40 + (a))
+#define BDK_GTI_INT_VEC_E_ERROR (8)
+#define BDK_GTI_INT_VEC_E_MAILBOX_RX (7)
+#define BDK_GTI_INT_VEC_E_SECURE_WATCHDOG (4)
+#define BDK_GTI_INT_VEC_E_SECURE_WATCHDOG_CLEAR (5)
+#define BDK_GTI_INT_VEC_E_SPARE (9)
+#define BDK_GTI_INT_VEC_E_TX_TIMESTAMP (6)
+#define BDK_GTI_INT_VEC_E_WAKE (0)
+#define BDK_GTI_INT_VEC_E_WAKE_CLEAR (1)
+#define BDK_GTI_INT_VEC_E_WATCHDOG (2)
+#define BDK_GTI_INT_VEC_E_WATCHDOG_CLEAR (3)
+
+/**
+ * Register (NCB) gti_bp_test
+ *
+ * INTERNAL: GTI Backpressure Test Register
+ */
+union bdk_gti_bp_test
+{
+ uint64_t u;
+ struct bdk_gti_bp_test_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 2; /**< [ 63: 62](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit the NCBO request FIFO, backpressure doing CSR access to GTI registers
+ \<62\> = Limit the NCBI response FIFO, backpressure doing response for NCBO requests */
+ uint64_t reserved_24_61 : 38;
+ uint64_t bp_cfg : 4; /**< [ 23: 20](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 1.
+ \<21:20\> = Config 0. */
+ uint64_t reserved_12_19 : 8;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_19 : 8;
+ uint64_t bp_cfg : 4; /**< [ 23: 20](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 1.
+ \<21:20\> = Config 0. */
+ uint64_t reserved_24_61 : 38;
+ uint64_t enable : 2; /**< [ 63: 62](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit the NCBO request FIFO, backpressure doing CSR access to GTI registers
+ \<62\> = Limit the NCBI response FIFO, backpressure doing response for NCBO requests */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bp_test_s cn; */
+};
+typedef union bdk_gti_bp_test bdk_gti_bp_test_t;
+
+#define BDK_GTI_BP_TEST BDK_GTI_BP_TEST_FUNC()
+static inline uint64_t BDK_GTI_BP_TEST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BP_TEST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8440000e0008ll;
+ __bdk_csr_fatal("GTI_BP_TEST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_BP_TEST bdk_gti_bp_test_t
+#define bustype_BDK_GTI_BP_TEST BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_BP_TEST "GTI_BP_TEST"
+#define device_bar_BDK_GTI_BP_TEST 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BP_TEST 0
+#define arguments_BDK_GTI_BP_TEST -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_cidr0
+ *
+ * GTI Base Component Identification Register 0
+ */
+union bdk_gti_bz_cidr0
+{
+ uint32_t u;
+ struct bdk_gti_bz_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_cidr0_s cn; */
+};
+typedef union bdk_gti_bz_cidr0 bdk_gti_bz_cidr0_t;
+
+#define BDK_GTI_BZ_CIDR0 BDK_GTI_BZ_CIDR0_FUNC()
+static inline uint64_t BDK_GTI_BZ_CIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_CIDR0_FUNC(void)
+{
+ return 0x844000030ff0ll;
+}
+
+#define typedef_BDK_GTI_BZ_CIDR0 bdk_gti_bz_cidr0_t
+#define bustype_BDK_GTI_BZ_CIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_CIDR0 "GTI_BZ_CIDR0"
+#define device_bar_BDK_GTI_BZ_CIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_CIDR0 0
+#define arguments_BDK_GTI_BZ_CIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_cidr1
+ *
+ * GTI Base Component Identification Register 1
+ */
+union bdk_gti_bz_cidr1
+{
+ uint32_t u;
+ struct bdk_gti_bz_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_cidr1_s cn; */
+};
+typedef union bdk_gti_bz_cidr1 bdk_gti_bz_cidr1_t;
+
+#define BDK_GTI_BZ_CIDR1 BDK_GTI_BZ_CIDR1_FUNC()
+static inline uint64_t BDK_GTI_BZ_CIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_CIDR1_FUNC(void)
+{
+ return 0x844000030ff4ll;
+}
+
+#define typedef_BDK_GTI_BZ_CIDR1 bdk_gti_bz_cidr1_t
+#define bustype_BDK_GTI_BZ_CIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_CIDR1 "GTI_BZ_CIDR1"
+#define device_bar_BDK_GTI_BZ_CIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_CIDR1 0
+#define arguments_BDK_GTI_BZ_CIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_cidr2
+ *
+ * GTI Base Component Identification Register 2
+ */
+union bdk_gti_bz_cidr2
+{
+ uint32_t u;
+ struct bdk_gti_bz_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_cidr2_s cn; */
+};
+typedef union bdk_gti_bz_cidr2 bdk_gti_bz_cidr2_t;
+
+#define BDK_GTI_BZ_CIDR2 BDK_GTI_BZ_CIDR2_FUNC()
+static inline uint64_t BDK_GTI_BZ_CIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_CIDR2_FUNC(void)
+{
+ return 0x844000030ff8ll;
+}
+
+#define typedef_BDK_GTI_BZ_CIDR2 bdk_gti_bz_cidr2_t
+#define bustype_BDK_GTI_BZ_CIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_CIDR2 "GTI_BZ_CIDR2"
+#define device_bar_BDK_GTI_BZ_CIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_CIDR2 0
+#define arguments_BDK_GTI_BZ_CIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_cidr3
+ *
+ * GTI Base Component Identification Register 3
+ */
+union bdk_gti_bz_cidr3
+{
+ uint32_t u;
+ struct bdk_gti_bz_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_cidr3_s cn; */
+};
+typedef union bdk_gti_bz_cidr3 bdk_gti_bz_cidr3_t;
+
+#define BDK_GTI_BZ_CIDR3 BDK_GTI_BZ_CIDR3_FUNC()
+static inline uint64_t BDK_GTI_BZ_CIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_CIDR3_FUNC(void)
+{
+ return 0x844000030ffcll;
+}
+
+#define typedef_BDK_GTI_BZ_CIDR3 bdk_gti_bz_cidr3_t
+#define bustype_BDK_GTI_BZ_CIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_CIDR3 "GTI_BZ_CIDR3"
+#define device_bar_BDK_GTI_BZ_CIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_CIDR3 0
+#define arguments_BDK_GTI_BZ_CIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_cntp_ctl
+ *
+ * GTI Base Physical Timer Control Register
+ */
+union bdk_gti_bz_cntp_ctl
+{
+ uint32_t u;
+ struct bdk_gti_bz_cntp_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t istatus : 1; /**< [ 2: 2](RO/H) Status. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Mask. */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable : 1; /**< [ 0: 0](R/W) Enable. */
+ uint32_t imask : 1; /**< [ 1: 1](R/W) Mask. */
+ uint32_t istatus : 1; /**< [ 2: 2](RO/H) Status. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_cntp_ctl_s cn; */
+};
+typedef union bdk_gti_bz_cntp_ctl bdk_gti_bz_cntp_ctl_t;
+
+#define BDK_GTI_BZ_CNTP_CTL BDK_GTI_BZ_CNTP_CTL_FUNC()
+static inline uint64_t BDK_GTI_BZ_CNTP_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_CNTP_CTL_FUNC(void)
+{
+ return 0x84400003002cll;
+}
+
+#define typedef_BDK_GTI_BZ_CNTP_CTL bdk_gti_bz_cntp_ctl_t
+#define bustype_BDK_GTI_BZ_CNTP_CTL BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_CNTP_CTL "GTI_BZ_CNTP_CTL"
+#define device_bar_BDK_GTI_BZ_CNTP_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_CNTP_CTL 0
+#define arguments_BDK_GTI_BZ_CNTP_CTL -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_bz_cntp_cval
+ *
+ * GTI Base Physical Timer Compare Value Register
+ */
+union bdk_gti_bz_cntp_cval
+{
+ uint64_t u;
+ struct bdk_gti_bz_cntp_cval_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Physical timer compare value. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Physical timer compare value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_cntp_cval_s cn; */
+};
+typedef union bdk_gti_bz_cntp_cval bdk_gti_bz_cntp_cval_t;
+
+#define BDK_GTI_BZ_CNTP_CVAL BDK_GTI_BZ_CNTP_CVAL_FUNC()
+static inline uint64_t BDK_GTI_BZ_CNTP_CVAL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_CNTP_CVAL_FUNC(void)
+{
+ return 0x844000030020ll;
+}
+
+#define typedef_BDK_GTI_BZ_CNTP_CVAL bdk_gti_bz_cntp_cval_t
+#define bustype_BDK_GTI_BZ_CNTP_CVAL BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_BZ_CNTP_CVAL "GTI_BZ_CNTP_CVAL"
+#define device_bar_BDK_GTI_BZ_CNTP_CVAL 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_CNTP_CVAL 0
+#define arguments_BDK_GTI_BZ_CNTP_CVAL -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_cntp_tval
+ *
+ * GTI Base Physical Timer Timer Value Register
+ */
+union bdk_gti_bz_cntp_tval
+{
+ uint32_t u;
+ struct bdk_gti_bz_cntp_tval_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t timervalue : 32; /**< [ 31: 0](R/W/H) Physical timer timer value. */
+#else /* Word 0 - Little Endian */
+ uint32_t timervalue : 32; /**< [ 31: 0](R/W/H) Physical timer timer value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_cntp_tval_s cn; */
+};
+typedef union bdk_gti_bz_cntp_tval bdk_gti_bz_cntp_tval_t;
+
+#define BDK_GTI_BZ_CNTP_TVAL BDK_GTI_BZ_CNTP_TVAL_FUNC()
+static inline uint64_t BDK_GTI_BZ_CNTP_TVAL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_CNTP_TVAL_FUNC(void)
+{
+ return 0x844000030028ll;
+}
+
+#define typedef_BDK_GTI_BZ_CNTP_TVAL bdk_gti_bz_cntp_tval_t
+#define bustype_BDK_GTI_BZ_CNTP_TVAL BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_CNTP_TVAL "GTI_BZ_CNTP_TVAL"
+#define device_bar_BDK_GTI_BZ_CNTP_TVAL 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_CNTP_TVAL 0
+#define arguments_BDK_GTI_BZ_CNTP_TVAL -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_pidr0
+ *
+ * GTI Base Peripheral Identification Register 0
+ */
+union bdk_gti_bz_pidr0
+{
+ uint32_t u;
+ struct bdk_gti_bz_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_BZ. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_BZ. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_pidr0_s cn; */
+};
+typedef union bdk_gti_bz_pidr0 bdk_gti_bz_pidr0_t;
+
+#define BDK_GTI_BZ_PIDR0 BDK_GTI_BZ_PIDR0_FUNC()
+static inline uint64_t BDK_GTI_BZ_PIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_PIDR0_FUNC(void)
+{
+ return 0x844000030fe0ll;
+}
+
+#define typedef_BDK_GTI_BZ_PIDR0 bdk_gti_bz_pidr0_t
+#define bustype_BDK_GTI_BZ_PIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_PIDR0 "GTI_BZ_PIDR0"
+#define device_bar_BDK_GTI_BZ_PIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_PIDR0 0
+#define arguments_BDK_GTI_BZ_PIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_pidr1
+ *
+ * GTI Base Peripheral Identification Register 1
+ */
+union bdk_gti_bz_pidr1
+{
+ uint32_t u;
+ struct bdk_gti_bz_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_pidr1_s cn; */
+};
+typedef union bdk_gti_bz_pidr1 bdk_gti_bz_pidr1_t;
+
+#define BDK_GTI_BZ_PIDR1 BDK_GTI_BZ_PIDR1_FUNC()
+static inline uint64_t BDK_GTI_BZ_PIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_PIDR1_FUNC(void)
+{
+ return 0x844000030fe4ll;
+}
+
+#define typedef_BDK_GTI_BZ_PIDR1 bdk_gti_bz_pidr1_t
+#define bustype_BDK_GTI_BZ_PIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_PIDR1 "GTI_BZ_PIDR1"
+#define device_bar_BDK_GTI_BZ_PIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_PIDR1 0
+#define arguments_BDK_GTI_BZ_PIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_pidr2
+ *
+ * GTI Base Peripheral Identification Register 2
+ */
+union bdk_gti_bz_pidr2
+{
+ uint32_t u;
+ struct bdk_gti_bz_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_pidr2_s cn; */
+};
+typedef union bdk_gti_bz_pidr2 bdk_gti_bz_pidr2_t;
+
+#define BDK_GTI_BZ_PIDR2 BDK_GTI_BZ_PIDR2_FUNC()
+static inline uint64_t BDK_GTI_BZ_PIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_PIDR2_FUNC(void)
+{
+ return 0x844000030fe8ll;
+}
+
+#define typedef_BDK_GTI_BZ_PIDR2 bdk_gti_bz_pidr2_t
+#define bustype_BDK_GTI_BZ_PIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_PIDR2 "GTI_BZ_PIDR2"
+#define device_bar_BDK_GTI_BZ_PIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_PIDR2 0
+#define arguments_BDK_GTI_BZ_PIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_pidr3
+ *
+ * GTI Base Peripheral Identification Register 3
+ */
+union bdk_gti_bz_pidr3
+{
+ uint32_t u;
+ struct bdk_gti_bz_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_pidr3_s cn; */
+};
+typedef union bdk_gti_bz_pidr3 bdk_gti_bz_pidr3_t;
+
+#define BDK_GTI_BZ_PIDR3 BDK_GTI_BZ_PIDR3_FUNC()
+static inline uint64_t BDK_GTI_BZ_PIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_PIDR3_FUNC(void)
+{
+ return 0x844000030fecll;
+}
+
+#define typedef_BDK_GTI_BZ_PIDR3 bdk_gti_bz_pidr3_t
+#define bustype_BDK_GTI_BZ_PIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_PIDR3 "GTI_BZ_PIDR3"
+#define device_bar_BDK_GTI_BZ_PIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_PIDR3 0
+#define arguments_BDK_GTI_BZ_PIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_pidr4
+ *
+ * GTI Base Peripheral Identification Register 4
+ */
+union bdk_gti_bz_pidr4
+{
+ uint32_t u;
+ struct bdk_gti_bz_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_pidr4_s cn; */
+};
+typedef union bdk_gti_bz_pidr4 bdk_gti_bz_pidr4_t;
+
+#define BDK_GTI_BZ_PIDR4 BDK_GTI_BZ_PIDR4_FUNC()
+static inline uint64_t BDK_GTI_BZ_PIDR4_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_PIDR4_FUNC(void)
+{
+ return 0x844000030fd0ll;
+}
+
+#define typedef_BDK_GTI_BZ_PIDR4 bdk_gti_bz_pidr4_t
+#define bustype_BDK_GTI_BZ_PIDR4 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_PIDR4 "GTI_BZ_PIDR4"
+#define device_bar_BDK_GTI_BZ_PIDR4 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_PIDR4 0
+#define arguments_BDK_GTI_BZ_PIDR4 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_pidr5
+ *
+ * GTI Base Peripheral Identification Register 5
+ */
+union bdk_gti_bz_pidr5
+{
+ uint32_t u;
+ struct bdk_gti_bz_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_pidr5_s cn; */
+};
+typedef union bdk_gti_bz_pidr5 bdk_gti_bz_pidr5_t;
+
+#define BDK_GTI_BZ_PIDR5 BDK_GTI_BZ_PIDR5_FUNC()
+static inline uint64_t BDK_GTI_BZ_PIDR5_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_PIDR5_FUNC(void)
+{
+ return 0x844000030fd4ll;
+}
+
+#define typedef_BDK_GTI_BZ_PIDR5 bdk_gti_bz_pidr5_t
+#define bustype_BDK_GTI_BZ_PIDR5 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_PIDR5 "GTI_BZ_PIDR5"
+#define device_bar_BDK_GTI_BZ_PIDR5 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_PIDR5 0
+#define arguments_BDK_GTI_BZ_PIDR5 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_pidr6
+ *
+ * GTI Base Peripheral Identification Register 6
+ */
+union bdk_gti_bz_pidr6
+{
+ uint32_t u;
+ struct bdk_gti_bz_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_pidr6_s cn; */
+};
+typedef union bdk_gti_bz_pidr6 bdk_gti_bz_pidr6_t;
+
+#define BDK_GTI_BZ_PIDR6 BDK_GTI_BZ_PIDR6_FUNC()
+static inline uint64_t BDK_GTI_BZ_PIDR6_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_PIDR6_FUNC(void)
+{
+ return 0x844000030fd8ll;
+}
+
+#define typedef_BDK_GTI_BZ_PIDR6 bdk_gti_bz_pidr6_t
+#define bustype_BDK_GTI_BZ_PIDR6 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_PIDR6 "GTI_BZ_PIDR6"
+#define device_bar_BDK_GTI_BZ_PIDR6 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_PIDR6 0
+#define arguments_BDK_GTI_BZ_PIDR6 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_bz_pidr7
+ *
+ * GTI Base Peripheral Identification Register 7
+ */
+union bdk_gti_bz_pidr7
+{
+ uint32_t u;
+ struct bdk_gti_bz_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_bz_pidr7_s cn; */
+};
+typedef union bdk_gti_bz_pidr7 bdk_gti_bz_pidr7_t;
+
+#define BDK_GTI_BZ_PIDR7 BDK_GTI_BZ_PIDR7_FUNC()
+static inline uint64_t BDK_GTI_BZ_PIDR7_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_BZ_PIDR7_FUNC(void)
+{
+ return 0x844000030fdcll;
+}
+
+#define typedef_BDK_GTI_BZ_PIDR7 bdk_gti_bz_pidr7_t
+#define bustype_BDK_GTI_BZ_PIDR7 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_BZ_PIDR7 "GTI_BZ_PIDR7"
+#define device_bar_BDK_GTI_BZ_PIDR7 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_BZ_PIDR7 0
+#define arguments_BDK_GTI_BZ_PIDR7 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cidr0
+ *
+ * GTI Counter Control Component Identification Secure Register 0
+ */
+union bdk_gti_cc_cidr0
+{
+ uint32_t u;
+ struct bdk_gti_cc_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](SRO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](SRO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cidr0_s cn; */
+};
+typedef union bdk_gti_cc_cidr0 bdk_gti_cc_cidr0_t;
+
+#define BDK_GTI_CC_CIDR0 BDK_GTI_CC_CIDR0_FUNC()
+static inline uint64_t BDK_GTI_CC_CIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CIDR0_FUNC(void)
+{
+ return 0x844000000ff0ll;
+}
+
+#define typedef_BDK_GTI_CC_CIDR0 bdk_gti_cc_cidr0_t
+#define bustype_BDK_GTI_CC_CIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CIDR0 "GTI_CC_CIDR0"
+#define device_bar_BDK_GTI_CC_CIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CIDR0 0
+#define arguments_BDK_GTI_CC_CIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cidr1
+ *
+ * GTI Counter Control Component Identification Secure Register 1
+ */
+union bdk_gti_cc_cidr1
+{
+ uint32_t u;
+ struct bdk_gti_cc_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cclass : 4; /**< [ 7: 4](SRO) Component class. */
+ uint32_t preamble : 4; /**< [ 3: 0](SRO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 4; /**< [ 3: 0](SRO) Preamble identification value. */
+ uint32_t cclass : 4; /**< [ 7: 4](SRO) Component class. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cidr1_s cn; */
+};
+typedef union bdk_gti_cc_cidr1 bdk_gti_cc_cidr1_t;
+
+#define BDK_GTI_CC_CIDR1 BDK_GTI_CC_CIDR1_FUNC()
+static inline uint64_t BDK_GTI_CC_CIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CIDR1_FUNC(void)
+{
+ return 0x844000000ff4ll;
+}
+
+#define typedef_BDK_GTI_CC_CIDR1 bdk_gti_cc_cidr1_t
+#define bustype_BDK_GTI_CC_CIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CIDR1 "GTI_CC_CIDR1"
+#define device_bar_BDK_GTI_CC_CIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CIDR1 0
+#define arguments_BDK_GTI_CC_CIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cidr2
+ *
+ * GTI Counter Control Component Identification Secure Register 2
+ */
+union bdk_gti_cc_cidr2
+{
+ uint32_t u;
+ struct bdk_gti_cc_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](SRO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](SRO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cidr2_s cn; */
+};
+typedef union bdk_gti_cc_cidr2 bdk_gti_cc_cidr2_t;
+
+#define BDK_GTI_CC_CIDR2 BDK_GTI_CC_CIDR2_FUNC()
+static inline uint64_t BDK_GTI_CC_CIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CIDR2_FUNC(void)
+{
+ return 0x844000000ff8ll;
+}
+
+#define typedef_BDK_GTI_CC_CIDR2 bdk_gti_cc_cidr2_t
+#define bustype_BDK_GTI_CC_CIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CIDR2 "GTI_CC_CIDR2"
+#define device_bar_BDK_GTI_CC_CIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CIDR2 0
+#define arguments_BDK_GTI_CC_CIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cidr3
+ *
+ * GTI Counter Control Component Identification Secure Register 3
+ */
+union bdk_gti_cc_cidr3
+{
+ uint32_t u;
+ struct bdk_gti_cc_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](SRO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](SRO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cidr3_s cn; */
+};
+typedef union bdk_gti_cc_cidr3 bdk_gti_cc_cidr3_t;
+
+#define BDK_GTI_CC_CIDR3 BDK_GTI_CC_CIDR3_FUNC()
+static inline uint64_t BDK_GTI_CC_CIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CIDR3_FUNC(void)
+{
+ return 0x844000000ffcll;
+}
+
+#define typedef_BDK_GTI_CC_CIDR3 bdk_gti_cc_cidr3_t
+#define bustype_BDK_GTI_CC_CIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CIDR3 "GTI_CC_CIDR3"
+#define device_bar_BDK_GTI_CC_CIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CIDR3 0
+#define arguments_BDK_GTI_CC_CIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_cntadd
+ *
+ * GTI Counter Control Atomic Add Secure Register
+ * Implementation defined register.
+ */
+union bdk_gti_cc_cntadd
+{
+ uint64_t u;
+ struct bdk_gti_cc_cntadd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cntadd : 64; /**< [ 63: 0](SWO) The value written to [CNTADD] is atomically added to GTI_CC_CNTCV. */
+#else /* Word 0 - Little Endian */
+ uint64_t cntadd : 64; /**< [ 63: 0](SWO) The value written to [CNTADD] is atomically added to GTI_CC_CNTCV. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntadd_s cn; */
+};
+typedef union bdk_gti_cc_cntadd bdk_gti_cc_cntadd_t;
+
+#define BDK_GTI_CC_CNTADD BDK_GTI_CC_CNTADD_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTADD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTADD_FUNC(void)
+{
+ return 0x8440000000c8ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTADD bdk_gti_cc_cntadd_t
+#define bustype_BDK_GTI_CC_CNTADD BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_CNTADD "GTI_CC_CNTADD"
+#define device_bar_BDK_GTI_CC_CNTADD 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTADD 0
+#define arguments_BDK_GTI_CC_CNTADD -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cntcr
+ *
+ * GTI Counter Control Secure Register
+ */
+union bdk_gti_cc_cntcr
+{
+ uint32_t u;
+ struct bdk_gti_cc_cntcr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t fcreq : 1; /**< [ 8: 8](SR/W) Frequency change request. Indicates the number of the entry in the frequency
+ table to select. Selecting an unimplemented entry, or an entry that contains
+ 0x0, has no effect on the counter.
+
+ For CNXXXX, which implements a single frequency table entry, must be 0x0. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t hdbg : 1; /**< [ 1: 1](SR/W) System counter halt-on-debug enable.
+ 0 = System counter ignores halt-on-debug.
+ 1 = Asserted halt-on-debug signal halts system counter update. */
+ uint32_t en : 1; /**< [ 0: 0](SR/W) Enables the system counter. */
+#else /* Word 0 - Little Endian */
+ uint32_t en : 1; /**< [ 0: 0](SR/W) Enables the system counter. */
+ uint32_t hdbg : 1; /**< [ 1: 1](SR/W) System counter halt-on-debug enable.
+ 0 = System counter ignores halt-on-debug.
+ 1 = Asserted halt-on-debug signal halts system counter update. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t fcreq : 1; /**< [ 8: 8](SR/W) Frequency change request. Indicates the number of the entry in the frequency
+ table to select. Selecting an unimplemented entry, or an entry that contains
+ 0x0, has no effect on the counter.
+
+ For CNXXXX, which implements a single frequency table entry, must be 0x0. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntcr_s cn; */
+};
+typedef union bdk_gti_cc_cntcr bdk_gti_cc_cntcr_t;
+
+#define BDK_GTI_CC_CNTCR BDK_GTI_CC_CNTCR_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTCR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTCR_FUNC(void)
+{
+ return 0x844000000000ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTCR bdk_gti_cc_cntcr_t
+#define bustype_BDK_GTI_CC_CNTCR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CNTCR "GTI_CC_CNTCR"
+#define device_bar_BDK_GTI_CC_CNTCR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTCR 0
+#define arguments_BDK_GTI_CC_CNTCR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_cntcv
+ *
+ * GTI Counter Control Count Value Secure Register
+ */
+union bdk_gti_cc_cntcv
+{
+ uint64_t u;
+ struct bdk_gti_cc_cntcv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](SR/W/H) System counter count value. The counter is also read-only accessible by the
+ nonsecure world with GTI_RD_CNTCV. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](SR/W/H) System counter count value. The counter is also read-only accessible by the
+ nonsecure world with GTI_RD_CNTCV. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntcv_s cn; */
+};
+typedef union bdk_gti_cc_cntcv bdk_gti_cc_cntcv_t;
+
+#define BDK_GTI_CC_CNTCV BDK_GTI_CC_CNTCV_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTCV_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTCV_FUNC(void)
+{
+ return 0x844000000008ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTCV bdk_gti_cc_cntcv_t
+#define bustype_BDK_GTI_CC_CNTCV BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_CNTCV "GTI_CC_CNTCV"
+#define device_bar_BDK_GTI_CC_CNTCV 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTCV 0
+#define arguments_BDK_GTI_CC_CNTCV -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cntfid0
+ *
+ * GTI Counter Control Frequency Mode Table Secure Register 0
+ */
+union bdk_gti_cc_cntfid0
+{
+ uint32_t u;
+ struct bdk_gti_cc_cntfid0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](SR/W) Generic timer frequency mode table, index 0.
+ Programmed by boot software with the system counter clock frequency in Hertz.
+ See also GTI_CTL_CNTFRQ. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](SR/W) Generic timer frequency mode table, index 0.
+ Programmed by boot software with the system counter clock frequency in Hertz.
+ See also GTI_CTL_CNTFRQ. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntfid0_s cn; */
+};
+typedef union bdk_gti_cc_cntfid0 bdk_gti_cc_cntfid0_t;
+
+#define BDK_GTI_CC_CNTFID0 BDK_GTI_CC_CNTFID0_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTFID0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTFID0_FUNC(void)
+{
+ return 0x844000000020ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTFID0 bdk_gti_cc_cntfid0_t
+#define bustype_BDK_GTI_CC_CNTFID0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CNTFID0 "GTI_CC_CNTFID0"
+#define device_bar_BDK_GTI_CC_CNTFID0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTFID0 0
+#define arguments_BDK_GTI_CC_CNTFID0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cntfid1
+ *
+ * GTI Counter Control Frequency Mode Table Secure Register 1
+ */
+union bdk_gti_cc_cntfid1
+{
+ uint32_t u;
+ struct bdk_gti_cc_cntfid1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t constant : 32; /**< [ 31: 0](SRO) Generic timer frequency mode table, index 1. Zero to mark the end of the table. */
+#else /* Word 0 - Little Endian */
+ uint32_t constant : 32; /**< [ 31: 0](SRO) Generic timer frequency mode table, index 1. Zero to mark the end of the table. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntfid1_s cn; */
+};
+typedef union bdk_gti_cc_cntfid1 bdk_gti_cc_cntfid1_t;
+
+#define BDK_GTI_CC_CNTFID1 BDK_GTI_CC_CNTFID1_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTFID1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTFID1_FUNC(void)
+{
+ return 0x844000000024ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTFID1 bdk_gti_cc_cntfid1_t
+#define bustype_BDK_GTI_CC_CNTFID1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CNTFID1 "GTI_CC_CNTFID1"
+#define device_bar_BDK_GTI_CC_CNTFID1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTFID1 0
+#define arguments_BDK_GTI_CC_CNTFID1 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_cntmb
+ *
+ * INTERNAL: GTI Counter Control Mailbox Secure Register
+ *
+ * Implementation defined register.
+ */
+union bdk_gti_cc_cntmb
+{
+ uint64_t u;
+ struct bdk_gti_cc_cntmb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](SR/W) When written, GTI_CC_CNTCV is saved in GTI_CC_CNTMBTS.
+
+ For CCPI-enabled chips only.
+
+ Mailboxes are used as follows:
+
+ * An AP on node A does a store to node B's GTI_CC_CNTMB.
+
+ * As the store flies over CCPI/OCX on chip A, OCI signals GTI to capture a
+ transmit timestamp. GTI on chip A saves GTI_CC_CNTCV in GTI_CC_CNTMBTS, and sets
+ the GTI_CC_CNTMB_INT[TXTS] interrupt.
+
+ * As the store flies over CCPI/OCX on chip B, OCI signals GTI to capture a
+ receive timestamp. GTI on chip B saves GTI_CC_CNTCV in GTI_CC_CNTMBTS, and sets
+ the GTI_CC_CNTMB_INT[MBRX] interrupt.
+
+ * GTI on chip B writes GTI_CC_CNTMB with the mailbox value.
+
+ Note that if a CRC error occurs on the link during the store, the store will get
+ retried by CCPI resulting in multiple transmit timestamp captures and
+ TX_TIMESTAMP interrupts. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](SR/W) When written, GTI_CC_CNTCV is saved in GTI_CC_CNTMBTS.
+
+ For CCPI-enabled chips only.
+
+ Mailboxes are used as follows:
+
+ * An AP on node A does a store to node B's GTI_CC_CNTMB.
+
+ * As the store flies over CCPI/OCX on chip A, OCI signals GTI to capture a
+ transmit timestamp. GTI on chip A saves GTI_CC_CNTCV in GTI_CC_CNTMBTS, and sets
+ the GTI_CC_CNTMB_INT[TXTS] interrupt.
+
+ * As the store flies over CCPI/OCX on chip B, OCI signals GTI to capture a
+ receive timestamp. GTI on chip B saves GTI_CC_CNTCV in GTI_CC_CNTMBTS, and sets
+ the GTI_CC_CNTMB_INT[MBRX] interrupt.
+
+ * GTI on chip B writes GTI_CC_CNTMB with the mailbox value.
+
+ Note that if a CRC error occurs on the link during the store, the store will get
+ retried by CCPI resulting in multiple transmit timestamp captures and
+ TX_TIMESTAMP interrupts. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cc_cntmb_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](RAZ) Reserved; for backwards compatibility. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gti_cc_cntmb_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](SR/W) Reserved.
+ Internal:
+ When written, GTI_CC_CNTCV is saved in GTI_CC_CNTMBTS.
+
+ For CCPI-enabled chips only.
+
+ Mailboxes are used as follows:
+
+ * An AP on node A does a store to node B's GTI_CC_CNTMB.
+
+ * As the store flies over CCPI/OCX on chip A, OCI signals GTI to capture a
+ transmit timestamp. GTI on chip A saves GTI_CC_CNTCV in GTI_CC_CNTMBTS, and sets
+ the GTI_CC_CNTMB_INT[TXTS] interrupt.
+
+ * As the store flies over CCPI/OCX on chip B, OCI signals GTI to capture a
+ receive timestamp. GTI on chip B saves GTI_CC_CNTCV in GTI_CC_CNTMBTS, and sets
+ the GTI_CC_CNTMB_INT[MBRX] interrupt.
+
+ * GTI on chip B writes GTI_CC_CNTMB with the mailbox value.
+
+ Note that if a CRC error occurs on the link during the store, the store will get
+ retried by CCPI resulting in multiple transmit timestamp captures and
+ TX_TIMESTAMP interrupts. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](SR/W) Reserved.
+ Internal:
+ When written, GTI_CC_CNTCV is saved in GTI_CC_CNTMBTS.
+
+ For CCPI-enabled chips only.
+
+ Mailboxes are used as follows:
+
+ * An AP on node A does a store to node B's GTI_CC_CNTMB.
+
+ * As the store flies over CCPI/OCX on chip A, OCI signals GTI to capture a
+ transmit timestamp. GTI on chip A saves GTI_CC_CNTCV in GTI_CC_CNTMBTS, and sets
+ the GTI_CC_CNTMB_INT[TXTS] interrupt.
+
+ * As the store flies over CCPI/OCX on chip B, OCI signals GTI to capture a
+ receive timestamp. GTI on chip B saves GTI_CC_CNTCV in GTI_CC_CNTMBTS, and sets
+ the GTI_CC_CNTMB_INT[MBRX] interrupt.
+
+ * GTI on chip B writes GTI_CC_CNTMB with the mailbox value.
+
+ Note that if a CRC error occurs on the link during the store, the store will get
+ retried by CCPI resulting in multiple transmit timestamp captures and
+ TX_TIMESTAMP interrupts. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gti_cc_cntmb_s cn88xx; */
+ /* struct bdk_gti_cc_cntmb_cn81xx cn83xx; */
+};
+typedef union bdk_gti_cc_cntmb bdk_gti_cc_cntmb_t;
+
+#define BDK_GTI_CC_CNTMB BDK_GTI_CC_CNTMB_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTMB_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTMB_FUNC(void)
+{
+ return 0x8440000000d0ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTMB bdk_gti_cc_cntmb_t
+#define bustype_BDK_GTI_CC_CNTMB BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_CNTMB "GTI_CC_CNTMB"
+#define device_bar_BDK_GTI_CC_CNTMB 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTMB 0
+#define arguments_BDK_GTI_CC_CNTMB -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_cntmb_int
+ *
+ * INTERNAL: GTI Counter Control Mailbox Interrupt Register
+ *
+ * Implementation defined register.
+ */
+union bdk_gti_cc_cntmb_int
+{
+ uint64_t u;
+ struct bdk_gti_cc_cntmb_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t mbrx : 1; /**< [ 1: 1](SR/W1C/H) Mailbox receive interrupt. Set whenever CTI_CC_CNTMB is written. See
+ GTI_CC_CNTMB. */
+ uint64_t txts : 1; /**< [ 0: 0](SR/W1C/H) Transmit timestamp interrupt. Set whenever a transmit timestamp is captured in
+ GTI_CC_CNTMBTS. See GTI_CC_CNTMB. */
+#else /* Word 0 - Little Endian */
+ uint64_t txts : 1; /**< [ 0: 0](SR/W1C/H) Transmit timestamp interrupt. Set whenever a transmit timestamp is captured in
+ GTI_CC_CNTMBTS. See GTI_CC_CNTMB. */
+ uint64_t mbrx : 1; /**< [ 1: 1](SR/W1C/H) Mailbox receive interrupt. Set whenever CTI_CC_CNTMB is written. See
+ GTI_CC_CNTMB. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntmb_int_s cn8; */
+ struct bdk_gti_cc_cntmb_int_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t mbrx : 1; /**< [ 1: 1](RAZ) Reserved; for backwards compatibility. */
+ uint64_t txts : 1; /**< [ 0: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t txts : 1; /**< [ 0: 0](RAZ) Reserved; for backwards compatibility. */
+ uint64_t mbrx : 1; /**< [ 1: 1](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_cc_cntmb_int bdk_gti_cc_cntmb_int_t;
+
+#define BDK_GTI_CC_CNTMB_INT BDK_GTI_CC_CNTMB_INT_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTMB_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTMB_INT_FUNC(void)
+{
+ return 0x8440000000e0ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTMB_INT bdk_gti_cc_cntmb_int_t
+#define bustype_BDK_GTI_CC_CNTMB_INT BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_CNTMB_INT "GTI_CC_CNTMB_INT"
+#define device_bar_BDK_GTI_CC_CNTMB_INT 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTMB_INT 0
+#define arguments_BDK_GTI_CC_CNTMB_INT -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_cntmb_int_ena_clr
+ *
+ * INTERNAL: GTI Counter Control Mailbox Interrupt Enable Clear Register
+ */
+union bdk_gti_cc_cntmb_int_ena_clr
+{
+ uint64_t u;
+ struct bdk_gti_cc_cntmb_int_ena_clr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t mbrx : 1; /**< [ 1: 1](SR/W1C/H) Reads or clears enable for GTI_CC_CNTMB_INT[MBRX]. */
+ uint64_t txts : 1; /**< [ 0: 0](SR/W1C/H) Reads or clears enable for GTI_CC_CNTMB_INT[TXTS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t txts : 1; /**< [ 0: 0](SR/W1C/H) Reads or clears enable for GTI_CC_CNTMB_INT[TXTS]. */
+ uint64_t mbrx : 1; /**< [ 1: 1](SR/W1C/H) Reads or clears enable for GTI_CC_CNTMB_INT[MBRX]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntmb_int_ena_clr_s cn8; */
+ struct bdk_gti_cc_cntmb_int_ena_clr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t mbrx : 1; /**< [ 1: 1](RAZ) Reserved; for backwards compatibility. */
+ uint64_t txts : 1; /**< [ 0: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t txts : 1; /**< [ 0: 0](RAZ) Reserved; for backwards compatibility. */
+ uint64_t mbrx : 1; /**< [ 1: 1](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_cc_cntmb_int_ena_clr bdk_gti_cc_cntmb_int_ena_clr_t;
+
+#define BDK_GTI_CC_CNTMB_INT_ENA_CLR BDK_GTI_CC_CNTMB_INT_ENA_CLR_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTMB_INT_ENA_CLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTMB_INT_ENA_CLR_FUNC(void)
+{
+ return 0x8440000000f0ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTMB_INT_ENA_CLR bdk_gti_cc_cntmb_int_ena_clr_t
+#define bustype_BDK_GTI_CC_CNTMB_INT_ENA_CLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_CNTMB_INT_ENA_CLR "GTI_CC_CNTMB_INT_ENA_CLR"
+#define device_bar_BDK_GTI_CC_CNTMB_INT_ENA_CLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTMB_INT_ENA_CLR 0
+#define arguments_BDK_GTI_CC_CNTMB_INT_ENA_CLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_cntmb_int_ena_set
+ *
+ * INTERNAL: GTI Counter Control Mailbox Interrupt Enable Set Register
+ */
+union bdk_gti_cc_cntmb_int_ena_set
+{
+ uint64_t u;
+ struct bdk_gti_cc_cntmb_int_ena_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t mbrx : 1; /**< [ 1: 1](SR/W1S/H) Reads or sets enable for GTI_CC_CNTMB_INT[MBRX]. */
+ uint64_t txts : 1; /**< [ 0: 0](SR/W1S/H) Reads or sets enable for GTI_CC_CNTMB_INT[TXTS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t txts : 1; /**< [ 0: 0](SR/W1S/H) Reads or sets enable for GTI_CC_CNTMB_INT[TXTS]. */
+ uint64_t mbrx : 1; /**< [ 1: 1](SR/W1S/H) Reads or sets enable for GTI_CC_CNTMB_INT[MBRX]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntmb_int_ena_set_s cn8; */
+ struct bdk_gti_cc_cntmb_int_ena_set_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t mbrx : 1; /**< [ 1: 1](RAZ) Reserved; for backwards compatibility. */
+ uint64_t txts : 1; /**< [ 0: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t txts : 1; /**< [ 0: 0](RAZ) Reserved; for backwards compatibility. */
+ uint64_t mbrx : 1; /**< [ 1: 1](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_cc_cntmb_int_ena_set bdk_gti_cc_cntmb_int_ena_set_t;
+
+#define BDK_GTI_CC_CNTMB_INT_ENA_SET BDK_GTI_CC_CNTMB_INT_ENA_SET_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTMB_INT_ENA_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTMB_INT_ENA_SET_FUNC(void)
+{
+ return 0x8440000000f8ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTMB_INT_ENA_SET bdk_gti_cc_cntmb_int_ena_set_t
+#define bustype_BDK_GTI_CC_CNTMB_INT_ENA_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_CNTMB_INT_ENA_SET "GTI_CC_CNTMB_INT_ENA_SET"
+#define device_bar_BDK_GTI_CC_CNTMB_INT_ENA_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTMB_INT_ENA_SET 0
+#define arguments_BDK_GTI_CC_CNTMB_INT_ENA_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_cntmb_int_set
+ *
+ * INTERNAL: GTI Counter Control Mailbox Interrupt Set Register
+ */
+union bdk_gti_cc_cntmb_int_set
+{
+ uint64_t u;
+ struct bdk_gti_cc_cntmb_int_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t mbrx : 1; /**< [ 1: 1](SR/W1S/H) Reads or sets GTI_CC_CNTMB_INT[MBRX]. */
+ uint64_t txts : 1; /**< [ 0: 0](SR/W1S/H) Reads or sets GTI_CC_CNTMB_INT[TXTS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t txts : 1; /**< [ 0: 0](SR/W1S/H) Reads or sets GTI_CC_CNTMB_INT[TXTS]. */
+ uint64_t mbrx : 1; /**< [ 1: 1](SR/W1S/H) Reads or sets GTI_CC_CNTMB_INT[MBRX]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntmb_int_set_s cn8; */
+ struct bdk_gti_cc_cntmb_int_set_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t mbrx : 1; /**< [ 1: 1](RAZ) Reserved; for backwards compatibility. */
+ uint64_t txts : 1; /**< [ 0: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t txts : 1; /**< [ 0: 0](RAZ) Reserved; for backwards compatibility. */
+ uint64_t mbrx : 1; /**< [ 1: 1](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_cc_cntmb_int_set bdk_gti_cc_cntmb_int_set_t;
+
+#define BDK_GTI_CC_CNTMB_INT_SET BDK_GTI_CC_CNTMB_INT_SET_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTMB_INT_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTMB_INT_SET_FUNC(void)
+{
+ return 0x8440000000e8ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTMB_INT_SET bdk_gti_cc_cntmb_int_set_t
+#define bustype_BDK_GTI_CC_CNTMB_INT_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_CNTMB_INT_SET "GTI_CC_CNTMB_INT_SET"
+#define device_bar_BDK_GTI_CC_CNTMB_INT_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTMB_INT_SET 0
+#define arguments_BDK_GTI_CC_CNTMB_INT_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_cntmbts
+ *
+ * INTERNAL: GTI Counter Control Mailbox Time Stamp Secure Register
+ *
+ * Implementation defined register.
+ */
+union bdk_gti_cc_cntmbts
+{
+ uint64_t u;
+ struct bdk_gti_cc_cntmbts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t timestamp : 64; /**< [ 63: 0](SRO/H) Mailbox time stamp. When GTI_CC_CNTMB is written, GTI_CC_CNTCV is saved in GTI_CC_CNTMBTS.
+ See GTI_CC_CNTMB.
+
+ For CCPI-enabled chips only. */
+#else /* Word 0 - Little Endian */
+ uint64_t timestamp : 64; /**< [ 63: 0](SRO/H) Mailbox time stamp. When GTI_CC_CNTMB is written, GTI_CC_CNTCV is saved in GTI_CC_CNTMBTS.
+ See GTI_CC_CNTMB.
+
+ For CCPI-enabled chips only. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cc_cntmbts_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t timestamp : 64; /**< [ 63: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t timestamp : 64; /**< [ 63: 0](RAZ) Reserved; for backwards compatibility. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gti_cc_cntmbts_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t timestamp : 64; /**< [ 63: 0](SRO/H) Reserved.
+ Internal:
+ Mailbox time stamp. When GTI_CC_CNTMB is written, GTI_CC_CNTCV is saved in GTI_CC_CNTMBTS.
+ See GTI_CC_CNTMB.
+
+ For CCPI-enabled chips only. */
+#else /* Word 0 - Little Endian */
+ uint64_t timestamp : 64; /**< [ 63: 0](SRO/H) Reserved.
+ Internal:
+ Mailbox time stamp. When GTI_CC_CNTMB is written, GTI_CC_CNTCV is saved in GTI_CC_CNTMBTS.
+ See GTI_CC_CNTMB.
+
+ For CCPI-enabled chips only. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gti_cc_cntmbts_s cn88xx; */
+ /* struct bdk_gti_cc_cntmbts_cn81xx cn83xx; */
+};
+typedef union bdk_gti_cc_cntmbts bdk_gti_cc_cntmbts_t;
+
+#define BDK_GTI_CC_CNTMBTS BDK_GTI_CC_CNTMBTS_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTMBTS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTMBTS_FUNC(void)
+{
+ return 0x8440000000d8ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTMBTS bdk_gti_cc_cntmbts_t
+#define bustype_BDK_GTI_CC_CNTMBTS BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_CNTMBTS "GTI_CC_CNTMBTS"
+#define device_bar_BDK_GTI_CC_CNTMBTS 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTMBTS 0
+#define arguments_BDK_GTI_CC_CNTMBTS -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cntracc
+ *
+ * GTI Counter Control Count Rate Accumulator Secure Register
+ * Implementation defined register.
+ */
+union bdk_gti_cc_cntracc
+{
+ uint32_t u;
+ struct bdk_gti_cc_cntracc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cntracc : 32; /**< [ 31: 0](SRO/H) Fractional bits of the system counter, GTI_RD_CNTCV. */
+#else /* Word 0 - Little Endian */
+ uint32_t cntracc : 32; /**< [ 31: 0](SRO/H) Fractional bits of the system counter, GTI_RD_CNTCV. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntracc_s cn; */
+};
+typedef union bdk_gti_cc_cntracc bdk_gti_cc_cntracc_t;
+
+#define BDK_GTI_CC_CNTRACC BDK_GTI_CC_CNTRACC_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTRACC_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTRACC_FUNC(void)
+{
+ return 0x8440000000c4ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTRACC bdk_gti_cc_cntracc_t
+#define bustype_BDK_GTI_CC_CNTRACC BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CNTRACC "GTI_CC_CNTRACC"
+#define device_bar_BDK_GTI_CC_CNTRACC 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTRACC 0
+#define arguments_BDK_GTI_CC_CNTRACC -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cntrate
+ *
+ * GTI Counter Control Count Rate Secure Register
+ * Implementation defined register.
+ */
+union bdk_gti_cc_cntrate
+{
+ uint32_t u;
+ struct bdk_gti_cc_cntrate_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cntrate : 32; /**< [ 31: 0](SR/W) Sets the system counter count rate. A 32-bit fraction that is added to
+ GTI_CC_CNTRACC every source clock. */
+#else /* Word 0 - Little Endian */
+ uint32_t cntrate : 32; /**< [ 31: 0](SR/W) Sets the system counter count rate. A 32-bit fraction that is added to
+ GTI_CC_CNTRACC every source clock. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntrate_s cn; */
+};
+typedef union bdk_gti_cc_cntrate bdk_gti_cc_cntrate_t;
+
+#define BDK_GTI_CC_CNTRATE BDK_GTI_CC_CNTRATE_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTRATE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTRATE_FUNC(void)
+{
+ return 0x8440000000c0ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTRATE bdk_gti_cc_cntrate_t
+#define bustype_BDK_GTI_CC_CNTRATE BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CNTRATE "GTI_CC_CNTRATE"
+#define device_bar_BDK_GTI_CC_CNTRATE 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTRATE 0
+#define arguments_BDK_GTI_CC_CNTRATE -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_cntsr
+ *
+ * GTI Counter Control Status Secure Register
+ */
+union bdk_gti_cc_cntsr
+{
+ uint32_t u;
+ struct bdk_gti_cc_cntsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t fcack : 1; /**< [ 8: 8](SRO/H) Frequency change acknowledge. Indicates the currently selected entry in the frequency
+ table.
+
+ For CNXXXX, which implements a single frequency table entry, always 0x0. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t dbgh : 1; /**< [ 1: 1](SRO/H) Indicates whether the counter is halted because the halt-on-debug signal is asserted.
+ 0 = Counter is not halted.
+ 1 = Counter is halted. */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t dbgh : 1; /**< [ 1: 1](SRO/H) Indicates whether the counter is halted because the halt-on-debug signal is asserted.
+ 0 = Counter is not halted.
+ 1 = Counter is halted. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t fcack : 1; /**< [ 8: 8](SRO/H) Frequency change acknowledge. Indicates the currently selected entry in the frequency
+ table.
+
+ For CNXXXX, which implements a single frequency table entry, always 0x0. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_cntsr_s cn; */
+};
+typedef union bdk_gti_cc_cntsr bdk_gti_cc_cntsr_t;
+
+#define BDK_GTI_CC_CNTSR BDK_GTI_CC_CNTSR_FUNC()
+static inline uint64_t BDK_GTI_CC_CNTSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_CNTSR_FUNC(void)
+{
+ return 0x844000000004ll;
+}
+
+#define typedef_BDK_GTI_CC_CNTSR bdk_gti_cc_cntsr_t
+#define bustype_BDK_GTI_CC_CNTSR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_CNTSR "GTI_CC_CNTSR"
+#define device_bar_BDK_GTI_CC_CNTSR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_CNTSR 0
+#define arguments_BDK_GTI_CC_CNTSR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cc_imp_ctl
+ *
+ * GTI Counter Control Implementation Control Register
+ * Implementation defined register.
+ */
+union bdk_gti_cc_imp_ctl
+{
+ uint64_t u;
+ struct bdk_gti_cc_imp_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t clk_src : 1; /**< [ 0: 0](SR/W) Count source clock for GTI_CC_CNTRATE.
+ 0 = Coprocessor clock.
+ 1 = PTP PPS clock. See MIO_PTP_CLOCK_CFG[PPS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t clk_src : 1; /**< [ 0: 0](SR/W) Count source clock for GTI_CC_CNTRATE.
+ 0 = Coprocessor clock.
+ 1 = PTP PPS clock. See MIO_PTP_CLOCK_CFG[PPS]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_imp_ctl_s cn; */
+};
+typedef union bdk_gti_cc_imp_ctl bdk_gti_cc_imp_ctl_t;
+
+#define BDK_GTI_CC_IMP_CTL BDK_GTI_CC_IMP_CTL_FUNC()
+static inline uint64_t BDK_GTI_CC_IMP_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_IMP_CTL_FUNC(void)
+{
+ return 0x844000000100ll;
+}
+
+#define typedef_BDK_GTI_CC_IMP_CTL bdk_gti_cc_imp_ctl_t
+#define bustype_BDK_GTI_CC_IMP_CTL BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CC_IMP_CTL "GTI_CC_IMP_CTL"
+#define device_bar_BDK_GTI_CC_IMP_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_IMP_CTL 0
+#define arguments_BDK_GTI_CC_IMP_CTL -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_pidr0
+ *
+ * GTI Counter Control Peripheral Identification Secure Register 0
+ */
+union bdk_gti_cc_pidr0
+{
+ uint32_t u;
+ struct bdk_gti_cc_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](SRO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_CC. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](SRO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_CC. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_pidr0_s cn; */
+};
+typedef union bdk_gti_cc_pidr0 bdk_gti_cc_pidr0_t;
+
+#define BDK_GTI_CC_PIDR0 BDK_GTI_CC_PIDR0_FUNC()
+static inline uint64_t BDK_GTI_CC_PIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_PIDR0_FUNC(void)
+{
+ return 0x844000000fe0ll;
+}
+
+#define typedef_BDK_GTI_CC_PIDR0 bdk_gti_cc_pidr0_t
+#define bustype_BDK_GTI_CC_PIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_PIDR0 "GTI_CC_PIDR0"
+#define device_bar_BDK_GTI_CC_PIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_PIDR0 0
+#define arguments_BDK_GTI_CC_PIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_pidr1
+ *
+ * GTI Counter Control Peripheral Identification Secure Register 1
+ */
+union bdk_gti_cc_pidr1
+{
+ uint32_t u;
+ struct bdk_gti_cc_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](SRO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](SRO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](SRO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](SRO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_pidr1_s cn; */
+};
+typedef union bdk_gti_cc_pidr1 bdk_gti_cc_pidr1_t;
+
+#define BDK_GTI_CC_PIDR1 BDK_GTI_CC_PIDR1_FUNC()
+static inline uint64_t BDK_GTI_CC_PIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_PIDR1_FUNC(void)
+{
+ return 0x844000000fe4ll;
+}
+
+#define typedef_BDK_GTI_CC_PIDR1 bdk_gti_cc_pidr1_t
+#define bustype_BDK_GTI_CC_PIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_PIDR1 "GTI_CC_PIDR1"
+#define device_bar_BDK_GTI_CC_PIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_PIDR1 0
+#define arguments_BDK_GTI_CC_PIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_pidr2
+ *
+ * GTI Counter Control Peripheral Identification Secure Register 2
+ */
+union bdk_gti_cc_pidr2
+{
+ uint32_t u;
+ struct bdk_gti_cc_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revision : 4; /**< [ 7: 4](SRO) Architectural revision, as assigned by ARM. */
+ uint32_t jedec : 1; /**< [ 3: 3](SRO) JEDEC assigned. */
+ uint32_t idcode : 3; /**< [ 2: 0](SRO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t idcode : 3; /**< [ 2: 0](SRO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t jedec : 1; /**< [ 3: 3](SRO) JEDEC assigned. */
+ uint32_t revision : 4; /**< [ 7: 4](SRO) Architectural revision, as assigned by ARM. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_pidr2_s cn; */
+};
+typedef union bdk_gti_cc_pidr2 bdk_gti_cc_pidr2_t;
+
+#define BDK_GTI_CC_PIDR2 BDK_GTI_CC_PIDR2_FUNC()
+static inline uint64_t BDK_GTI_CC_PIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_PIDR2_FUNC(void)
+{
+ return 0x844000000fe8ll;
+}
+
+#define typedef_BDK_GTI_CC_PIDR2 bdk_gti_cc_pidr2_t
+#define bustype_BDK_GTI_CC_PIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_PIDR2 "GTI_CC_PIDR2"
+#define device_bar_BDK_GTI_CC_PIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_PIDR2 0
+#define arguments_BDK_GTI_CC_PIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_pidr3
+ *
+ * GTI Counter Control Peripheral Identification Secure Register 3
+ */
+union bdk_gti_cc_pidr3
+{
+ uint32_t u;
+ struct bdk_gti_cc_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](SRO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cust : 4; /**< [ 3: 0](SRO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cust : 4; /**< [ 3: 0](SRO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](SRO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_pidr3_s cn; */
+};
+typedef union bdk_gti_cc_pidr3 bdk_gti_cc_pidr3_t;
+
+#define BDK_GTI_CC_PIDR3 BDK_GTI_CC_PIDR3_FUNC()
+static inline uint64_t BDK_GTI_CC_PIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_PIDR3_FUNC(void)
+{
+ return 0x844000000fecll;
+}
+
+#define typedef_BDK_GTI_CC_PIDR3 bdk_gti_cc_pidr3_t
+#define bustype_BDK_GTI_CC_PIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_PIDR3 "GTI_CC_PIDR3"
+#define device_bar_BDK_GTI_CC_PIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_PIDR3 0
+#define arguments_BDK_GTI_CC_PIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_pidr4
+ *
+ * GTI Counter Control Peripheral Identification Secure Register 4
+ */
+union bdk_gti_cc_pidr4
+{
+ uint32_t u;
+ struct bdk_gti_cc_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t pagecnt : 4; /**< [ 7: 4](SRO) Number of log-2 4 KB blocks occupied. */
+ uint32_t jepcont : 4; /**< [ 3: 0](SRO) JEP106 continuation code. Indicates Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepcont : 4; /**< [ 3: 0](SRO) JEP106 continuation code. Indicates Cavium. */
+ uint32_t pagecnt : 4; /**< [ 7: 4](SRO) Number of log-2 4 KB blocks occupied. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_pidr4_s cn; */
+};
+typedef union bdk_gti_cc_pidr4 bdk_gti_cc_pidr4_t;
+
+#define BDK_GTI_CC_PIDR4 BDK_GTI_CC_PIDR4_FUNC()
+static inline uint64_t BDK_GTI_CC_PIDR4_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_PIDR4_FUNC(void)
+{
+ return 0x844000000fd0ll;
+}
+
+#define typedef_BDK_GTI_CC_PIDR4 bdk_gti_cc_pidr4_t
+#define bustype_BDK_GTI_CC_PIDR4 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_PIDR4 "GTI_CC_PIDR4"
+#define device_bar_BDK_GTI_CC_PIDR4 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_PIDR4 0
+#define arguments_BDK_GTI_CC_PIDR4 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_pidr5
+ *
+ * GTI Counter Control Peripheral Identification Secure Register 5
+ */
+union bdk_gti_cc_pidr5
+{
+ uint32_t u;
+ struct bdk_gti_cc_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_pidr5_s cn; */
+};
+typedef union bdk_gti_cc_pidr5 bdk_gti_cc_pidr5_t;
+
+#define BDK_GTI_CC_PIDR5 BDK_GTI_CC_PIDR5_FUNC()
+static inline uint64_t BDK_GTI_CC_PIDR5_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_PIDR5_FUNC(void)
+{
+ return 0x844000000fd4ll;
+}
+
+#define typedef_BDK_GTI_CC_PIDR5 bdk_gti_cc_pidr5_t
+#define bustype_BDK_GTI_CC_PIDR5 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_PIDR5 "GTI_CC_PIDR5"
+#define device_bar_BDK_GTI_CC_PIDR5 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_PIDR5 0
+#define arguments_BDK_GTI_CC_PIDR5 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_pidr6
+ *
+ * GTI Counter Control Peripheral Identification Secure Register 6
+ */
+union bdk_gti_cc_pidr6
+{
+ uint32_t u;
+ struct bdk_gti_cc_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_pidr6_s cn; */
+};
+typedef union bdk_gti_cc_pidr6 bdk_gti_cc_pidr6_t;
+
+#define BDK_GTI_CC_PIDR6 BDK_GTI_CC_PIDR6_FUNC()
+static inline uint64_t BDK_GTI_CC_PIDR6_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_PIDR6_FUNC(void)
+{
+ return 0x844000000fd8ll;
+}
+
+#define typedef_BDK_GTI_CC_PIDR6 bdk_gti_cc_pidr6_t
+#define bustype_BDK_GTI_CC_PIDR6 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_PIDR6 "GTI_CC_PIDR6"
+#define device_bar_BDK_GTI_CC_PIDR6 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_PIDR6 0
+#define arguments_BDK_GTI_CC_PIDR6 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_cc_pidr7
+ *
+ * GTI Counter Control Peripheral Identification Secure Register 7
+ */
+union bdk_gti_cc_pidr7
+{
+ uint32_t u;
+ struct bdk_gti_cc_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cc_pidr7_s cn; */
+};
+typedef union bdk_gti_cc_pidr7 bdk_gti_cc_pidr7_t;
+
+#define BDK_GTI_CC_PIDR7 BDK_GTI_CC_PIDR7_FUNC()
+static inline uint64_t BDK_GTI_CC_PIDR7_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CC_PIDR7_FUNC(void)
+{
+ return 0x844000000fdcll;
+}
+
+#define typedef_BDK_GTI_CC_PIDR7 bdk_gti_cc_pidr7_t
+#define bustype_BDK_GTI_CC_PIDR7 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CC_PIDR7 "GTI_CC_PIDR7"
+#define device_bar_BDK_GTI_CC_PIDR7 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CC_PIDR7 0
+#define arguments_BDK_GTI_CC_PIDR7 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_const
+ *
+ * GTI Constants Register
+ */
+union bdk_gti_const
+{
+ uint32_t u;
+ struct bdk_gti_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_const_s cn; */
+};
+typedef union bdk_gti_const bdk_gti_const_t;
+
+#define BDK_GTI_CONST BDK_GTI_CONST_FUNC()
+static inline uint64_t BDK_GTI_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8440000e0004ll;
+ __bdk_csr_fatal("GTI_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_CONST bdk_gti_const_t
+#define bustype_BDK_GTI_CONST BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CONST "GTI_CONST"
+#define device_bar_BDK_GTI_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CONST 0
+#define arguments_BDK_GTI_CONST -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_cidr0
+ *
+ * GTI Control Component Identification Register 0
+ */
+union bdk_gti_ctl_cidr0
+{
+ uint32_t u;
+ struct bdk_gti_ctl_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_cidr0_s cn; */
+};
+typedef union bdk_gti_ctl_cidr0 bdk_gti_ctl_cidr0_t;
+
+#define BDK_GTI_CTL_CIDR0 BDK_GTI_CTL_CIDR0_FUNC()
+static inline uint64_t BDK_GTI_CTL_CIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_CIDR0_FUNC(void)
+{
+ return 0x844000020ff0ll;
+}
+
+#define typedef_BDK_GTI_CTL_CIDR0 bdk_gti_ctl_cidr0_t
+#define bustype_BDK_GTI_CTL_CIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_CIDR0 "GTI_CTL_CIDR0"
+#define device_bar_BDK_GTI_CTL_CIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_CIDR0 0
+#define arguments_BDK_GTI_CTL_CIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_cidr1
+ *
+ * GTI Control Component Identification Register 1
+ */
+union bdk_gti_ctl_cidr1
+{
+ uint32_t u;
+ struct bdk_gti_ctl_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_cidr1_s cn; */
+};
+typedef union bdk_gti_ctl_cidr1 bdk_gti_ctl_cidr1_t;
+
+#define BDK_GTI_CTL_CIDR1 BDK_GTI_CTL_CIDR1_FUNC()
+static inline uint64_t BDK_GTI_CTL_CIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_CIDR1_FUNC(void)
+{
+ return 0x844000020ff4ll;
+}
+
+#define typedef_BDK_GTI_CTL_CIDR1 bdk_gti_ctl_cidr1_t
+#define bustype_BDK_GTI_CTL_CIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_CIDR1 "GTI_CTL_CIDR1"
+#define device_bar_BDK_GTI_CTL_CIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_CIDR1 0
+#define arguments_BDK_GTI_CTL_CIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_cidr2
+ *
+ * GTI Control Component Identification Register 2
+ */
+union bdk_gti_ctl_cidr2
+{
+ uint32_t u;
+ struct bdk_gti_ctl_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_cidr2_s cn; */
+};
+typedef union bdk_gti_ctl_cidr2 bdk_gti_ctl_cidr2_t;
+
+#define BDK_GTI_CTL_CIDR2 BDK_GTI_CTL_CIDR2_FUNC()
+static inline uint64_t BDK_GTI_CTL_CIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_CIDR2_FUNC(void)
+{
+ return 0x844000020ff8ll;
+}
+
+#define typedef_BDK_GTI_CTL_CIDR2 bdk_gti_ctl_cidr2_t
+#define bustype_BDK_GTI_CTL_CIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_CIDR2 "GTI_CTL_CIDR2"
+#define device_bar_BDK_GTI_CTL_CIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_CIDR2 0
+#define arguments_BDK_GTI_CTL_CIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_cidr3
+ *
+ * GTI Control Component Identification Register 3
+ */
+union bdk_gti_ctl_cidr3
+{
+ uint32_t u;
+ struct bdk_gti_ctl_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_cidr3_s cn; */
+};
+typedef union bdk_gti_ctl_cidr3 bdk_gti_ctl_cidr3_t;
+
+#define BDK_GTI_CTL_CIDR3 BDK_GTI_CTL_CIDR3_FUNC()
+static inline uint64_t BDK_GTI_CTL_CIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_CIDR3_FUNC(void)
+{
+ return 0x844000020ffcll;
+}
+
+#define typedef_BDK_GTI_CTL_CIDR3 bdk_gti_ctl_cidr3_t
+#define bustype_BDK_GTI_CTL_CIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_CIDR3 "GTI_CTL_CIDR3"
+#define device_bar_BDK_GTI_CTL_CIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_CIDR3 0
+#define arguments_BDK_GTI_CTL_CIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_cntacr0
+ *
+ * GTI Control Access Control 0 Register
+ */
+union bdk_gti_ctl_cntacr0
+{
+ uint32_t u;
+ struct bdk_gti_ctl_cntacr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t constant : 32; /**< [ 31: 0](RO) Access control 0. */
+#else /* Word 0 - Little Endian */
+ uint32_t constant : 32; /**< [ 31: 0](RO) Access control 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_cntacr0_s cn; */
+};
+typedef union bdk_gti_ctl_cntacr0 bdk_gti_ctl_cntacr0_t;
+
+#define BDK_GTI_CTL_CNTACR0 BDK_GTI_CTL_CNTACR0_FUNC()
+static inline uint64_t BDK_GTI_CTL_CNTACR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_CNTACR0_FUNC(void)
+{
+ return 0x844000020040ll;
+}
+
+#define typedef_BDK_GTI_CTL_CNTACR0 bdk_gti_ctl_cntacr0_t
+#define bustype_BDK_GTI_CTL_CNTACR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_CNTACR0 "GTI_CTL_CNTACR0"
+#define device_bar_BDK_GTI_CTL_CNTACR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_CNTACR0 0
+#define arguments_BDK_GTI_CTL_CNTACR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_cntfrq
+ *
+ * GTI Control Counter Frequency Secure Register
+ */
+union bdk_gti_ctl_cntfrq
+{
+ uint32_t u;
+ struct bdk_gti_ctl_cntfrq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](SR/W) Programmed by boot software with the system counter clock frequency in Hertz.
+ See also GTI_CC_CNTFID0. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](SR/W) Programmed by boot software with the system counter clock frequency in Hertz.
+ See also GTI_CC_CNTFID0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_cntfrq_s cn; */
+};
+typedef union bdk_gti_ctl_cntfrq bdk_gti_ctl_cntfrq_t;
+
+#define BDK_GTI_CTL_CNTFRQ BDK_GTI_CTL_CNTFRQ_FUNC()
+static inline uint64_t BDK_GTI_CTL_CNTFRQ_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_CNTFRQ_FUNC(void)
+{
+ return 0x844000020000ll;
+}
+
+#define typedef_BDK_GTI_CTL_CNTFRQ bdk_gti_ctl_cntfrq_t
+#define bustype_BDK_GTI_CTL_CNTFRQ BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_CNTFRQ "GTI_CTL_CNTFRQ"
+#define device_bar_BDK_GTI_CTL_CNTFRQ 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_CNTFRQ 0
+#define arguments_BDK_GTI_CTL_CNTFRQ -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_cntnsar
+ *
+ * GTI Control Counter Nonsecure Access Secure Register
+ */
+union bdk_gti_ctl_cntnsar
+{
+ uint32_t u;
+ struct bdk_gti_ctl_cntnsar_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t constant : 32; /**< [ 31: 0](SRO) Counter nonsecure access. */
+#else /* Word 0 - Little Endian */
+ uint32_t constant : 32; /**< [ 31: 0](SRO) Counter nonsecure access. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_cntnsar_s cn; */
+};
+typedef union bdk_gti_ctl_cntnsar bdk_gti_ctl_cntnsar_t;
+
+#define BDK_GTI_CTL_CNTNSAR BDK_GTI_CTL_CNTNSAR_FUNC()
+static inline uint64_t BDK_GTI_CTL_CNTNSAR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_CNTNSAR_FUNC(void)
+{
+ return 0x844000020004ll;
+}
+
+#define typedef_BDK_GTI_CTL_CNTNSAR bdk_gti_ctl_cntnsar_t
+#define bustype_BDK_GTI_CTL_CNTNSAR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_CNTNSAR "GTI_CTL_CNTNSAR"
+#define device_bar_BDK_GTI_CTL_CNTNSAR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_CNTNSAR 0
+#define arguments_BDK_GTI_CTL_CNTNSAR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_cnttidr
+ *
+ * GTI Control Counter Timer ID Register
+ */
+union bdk_gti_ctl_cnttidr
+{
+ uint32_t u;
+ struct bdk_gti_ctl_cnttidr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t constant : 32; /**< [ 31: 0](RO) Counter timer ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t constant : 32; /**< [ 31: 0](RO) Counter timer ID. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_cnttidr_s cn; */
+};
+typedef union bdk_gti_ctl_cnttidr bdk_gti_ctl_cnttidr_t;
+
+#define BDK_GTI_CTL_CNTTIDR BDK_GTI_CTL_CNTTIDR_FUNC()
+static inline uint64_t BDK_GTI_CTL_CNTTIDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_CNTTIDR_FUNC(void)
+{
+ return 0x844000020008ll;
+}
+
+#define typedef_BDK_GTI_CTL_CNTTIDR bdk_gti_ctl_cnttidr_t
+#define bustype_BDK_GTI_CTL_CNTTIDR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_CNTTIDR "GTI_CTL_CNTTIDR"
+#define device_bar_BDK_GTI_CTL_CNTTIDR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_CNTTIDR 0
+#define arguments_BDK_GTI_CTL_CNTTIDR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_pidr0
+ *
+ * GTI Control Peripheral Identification Register 0
+ */
+union bdk_gti_ctl_pidr0
+{
+ uint32_t u;
+ struct bdk_gti_ctl_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_CTL. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_CTL. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_pidr0_s cn; */
+};
+typedef union bdk_gti_ctl_pidr0 bdk_gti_ctl_pidr0_t;
+
+#define BDK_GTI_CTL_PIDR0 BDK_GTI_CTL_PIDR0_FUNC()
+static inline uint64_t BDK_GTI_CTL_PIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_PIDR0_FUNC(void)
+{
+ return 0x844000020fe0ll;
+}
+
+#define typedef_BDK_GTI_CTL_PIDR0 bdk_gti_ctl_pidr0_t
+#define bustype_BDK_GTI_CTL_PIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_PIDR0 "GTI_CTL_PIDR0"
+#define device_bar_BDK_GTI_CTL_PIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_PIDR0 0
+#define arguments_BDK_GTI_CTL_PIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_pidr1
+ *
+ * GTI Control Peripheral Identification Register 1
+ */
+union bdk_gti_ctl_pidr1
+{
+ uint32_t u;
+ struct bdk_gti_ctl_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_pidr1_s cn; */
+};
+typedef union bdk_gti_ctl_pidr1 bdk_gti_ctl_pidr1_t;
+
+#define BDK_GTI_CTL_PIDR1 BDK_GTI_CTL_PIDR1_FUNC()
+static inline uint64_t BDK_GTI_CTL_PIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_PIDR1_FUNC(void)
+{
+ return 0x844000020fe4ll;
+}
+
+#define typedef_BDK_GTI_CTL_PIDR1 bdk_gti_ctl_pidr1_t
+#define bustype_BDK_GTI_CTL_PIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_PIDR1 "GTI_CTL_PIDR1"
+#define device_bar_BDK_GTI_CTL_PIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_PIDR1 0
+#define arguments_BDK_GTI_CTL_PIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_pidr2
+ *
+ * GTI Control Peripheral Identification Register 2
+ */
+union bdk_gti_ctl_pidr2
+{
+ uint32_t u;
+ struct bdk_gti_ctl_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_pidr2_s cn; */
+};
+typedef union bdk_gti_ctl_pidr2 bdk_gti_ctl_pidr2_t;
+
+#define BDK_GTI_CTL_PIDR2 BDK_GTI_CTL_PIDR2_FUNC()
+static inline uint64_t BDK_GTI_CTL_PIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_PIDR2_FUNC(void)
+{
+ return 0x844000020fe8ll;
+}
+
+#define typedef_BDK_GTI_CTL_PIDR2 bdk_gti_ctl_pidr2_t
+#define bustype_BDK_GTI_CTL_PIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_PIDR2 "GTI_CTL_PIDR2"
+#define device_bar_BDK_GTI_CTL_PIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_PIDR2 0
+#define arguments_BDK_GTI_CTL_PIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_pidr3
+ *
+ * GTI Control Peripheral Identification Register 3
+ */
+union bdk_gti_ctl_pidr3
+{
+ uint32_t u;
+ struct bdk_gti_ctl_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_pidr3_s cn; */
+};
+typedef union bdk_gti_ctl_pidr3 bdk_gti_ctl_pidr3_t;
+
+#define BDK_GTI_CTL_PIDR3 BDK_GTI_CTL_PIDR3_FUNC()
+static inline uint64_t BDK_GTI_CTL_PIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_PIDR3_FUNC(void)
+{
+ return 0x844000020fecll;
+}
+
+#define typedef_BDK_GTI_CTL_PIDR3 bdk_gti_ctl_pidr3_t
+#define bustype_BDK_GTI_CTL_PIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_PIDR3 "GTI_CTL_PIDR3"
+#define device_bar_BDK_GTI_CTL_PIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_PIDR3 0
+#define arguments_BDK_GTI_CTL_PIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_pidr4
+ *
+ * GTI Control Peripheral Identification Register 4
+ */
+union bdk_gti_ctl_pidr4
+{
+ uint32_t u;
+ struct bdk_gti_ctl_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_pidr4_s cn; */
+};
+typedef union bdk_gti_ctl_pidr4 bdk_gti_ctl_pidr4_t;
+
+#define BDK_GTI_CTL_PIDR4 BDK_GTI_CTL_PIDR4_FUNC()
+static inline uint64_t BDK_GTI_CTL_PIDR4_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_PIDR4_FUNC(void)
+{
+ return 0x844000020fd0ll;
+}
+
+#define typedef_BDK_GTI_CTL_PIDR4 bdk_gti_ctl_pidr4_t
+#define bustype_BDK_GTI_CTL_PIDR4 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_PIDR4 "GTI_CTL_PIDR4"
+#define device_bar_BDK_GTI_CTL_PIDR4 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_PIDR4 0
+#define arguments_BDK_GTI_CTL_PIDR4 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_pidr5
+ *
+ * GTI Control Peripheral Identification Register 5
+ */
+union bdk_gti_ctl_pidr5
+{
+ uint32_t u;
+ struct bdk_gti_ctl_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_pidr5_s cn; */
+};
+typedef union bdk_gti_ctl_pidr5 bdk_gti_ctl_pidr5_t;
+
+#define BDK_GTI_CTL_PIDR5 BDK_GTI_CTL_PIDR5_FUNC()
+static inline uint64_t BDK_GTI_CTL_PIDR5_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_PIDR5_FUNC(void)
+{
+ return 0x844000020fd4ll;
+}
+
+#define typedef_BDK_GTI_CTL_PIDR5 bdk_gti_ctl_pidr5_t
+#define bustype_BDK_GTI_CTL_PIDR5 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_PIDR5 "GTI_CTL_PIDR5"
+#define device_bar_BDK_GTI_CTL_PIDR5 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_PIDR5 0
+#define arguments_BDK_GTI_CTL_PIDR5 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_pidr6
+ *
+ * GTI Control Peripheral Identification Register 6
+ */
+union bdk_gti_ctl_pidr6
+{
+ uint32_t u;
+ struct bdk_gti_ctl_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_pidr6_s cn; */
+};
+typedef union bdk_gti_ctl_pidr6 bdk_gti_ctl_pidr6_t;
+
+#define BDK_GTI_CTL_PIDR6 BDK_GTI_CTL_PIDR6_FUNC()
+static inline uint64_t BDK_GTI_CTL_PIDR6_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_PIDR6_FUNC(void)
+{
+ return 0x844000020fd8ll;
+}
+
+#define typedef_BDK_GTI_CTL_PIDR6 bdk_gti_ctl_pidr6_t
+#define bustype_BDK_GTI_CTL_PIDR6 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_PIDR6 "GTI_CTL_PIDR6"
+#define device_bar_BDK_GTI_CTL_PIDR6 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_PIDR6 0
+#define arguments_BDK_GTI_CTL_PIDR6 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_ctl_pidr7
+ *
+ * GTI Control Peripheral Identification Register 7
+ */
+union bdk_gti_ctl_pidr7
+{
+ uint32_t u;
+ struct bdk_gti_ctl_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_ctl_pidr7_s cn; */
+};
+typedef union bdk_gti_ctl_pidr7 bdk_gti_ctl_pidr7_t;
+
+#define BDK_GTI_CTL_PIDR7 BDK_GTI_CTL_PIDR7_FUNC()
+static inline uint64_t BDK_GTI_CTL_PIDR7_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CTL_PIDR7_FUNC(void)
+{
+ return 0x844000020fdcll;
+}
+
+#define typedef_BDK_GTI_CTL_PIDR7 bdk_gti_ctl_pidr7_t
+#define bustype_BDK_GTI_CTL_PIDR7 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_CTL_PIDR7 "GTI_CTL_PIDR7"
+#define device_bar_BDK_GTI_CTL_PIDR7 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CTL_PIDR7 0
+#define arguments_BDK_GTI_CTL_PIDR7 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_del3t
+ *
+ * GTI Per-core Watchdog DEL3T Interrupt Register
+ * Generic timer per-core watchdog DEL3T interrupts.
+ */
+union bdk_gti_cwd_del3t
+{
+ uint64_t u;
+ struct bdk_gti_cwd_del3t_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t core : 54; /**< [ 53: 0](R/W1C/H) Per-core watchdog DEL3T interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 54; /**< [ 53: 0](R/W1C/H) Per-core watchdog DEL3T interrupt. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cwd_del3t_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t core : 48; /**< [ 47: 0](R/W1C/H) Per-core watchdog DEL3T interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 48; /**< [ 47: 0](R/W1C/H) Per-core watchdog DEL3T interrupt. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_cwd_del3t_s cn9; */
+};
+typedef union bdk_gti_cwd_del3t bdk_gti_cwd_del3t_t;
+
+#define BDK_GTI_CWD_DEL3T BDK_GTI_CWD_DEL3T_FUNC()
+static inline uint64_t BDK_GTI_CWD_DEL3T_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_DEL3T_FUNC(void)
+{
+ return 0x844000040220ll;
+}
+
+#define typedef_BDK_GTI_CWD_DEL3T bdk_gti_cwd_del3t_t
+#define bustype_BDK_GTI_CWD_DEL3T BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_DEL3T "GTI_CWD_DEL3T"
+#define device_bar_BDK_GTI_CWD_DEL3T 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_DEL3T 0
+#define arguments_BDK_GTI_CWD_DEL3T -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_del3t_ena_clr
+ *
+ * GTI Per-core Watchdog Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_gti_cwd_del3t_ena_clr
+{
+ uint64_t u;
+ struct bdk_gti_cwd_del3t_ena_clr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t core : 54; /**< [ 53: 0](R/W1C/H) Reads or clears enable for GTI_CWD_DEL3T[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 54; /**< [ 53: 0](R/W1C/H) Reads or clears enable for GTI_CWD_DEL3T[CORE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cwd_del3t_ena_clr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t core : 48; /**< [ 47: 0](R/W1C/H) Reads or clears enable for GTI_CWD_DEL3T[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 48; /**< [ 47: 0](R/W1C/H) Reads or clears enable for GTI_CWD_DEL3T[CORE]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_cwd_del3t_ena_clr_s cn9; */
+};
+typedef union bdk_gti_cwd_del3t_ena_clr bdk_gti_cwd_del3t_ena_clr_t;
+
+#define BDK_GTI_CWD_DEL3T_ENA_CLR BDK_GTI_CWD_DEL3T_ENA_CLR_FUNC()
+static inline uint64_t BDK_GTI_CWD_DEL3T_ENA_CLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_DEL3T_ENA_CLR_FUNC(void)
+{
+ return 0x844000040230ll;
+}
+
+#define typedef_BDK_GTI_CWD_DEL3T_ENA_CLR bdk_gti_cwd_del3t_ena_clr_t
+#define bustype_BDK_GTI_CWD_DEL3T_ENA_CLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_DEL3T_ENA_CLR "GTI_CWD_DEL3T_ENA_CLR"
+#define device_bar_BDK_GTI_CWD_DEL3T_ENA_CLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_DEL3T_ENA_CLR 0
+#define arguments_BDK_GTI_CWD_DEL3T_ENA_CLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_del3t_ena_set
+ *
+ * GTI Per-core Watchdog DEL3T Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_gti_cwd_del3t_ena_set
+{
+ uint64_t u;
+ struct bdk_gti_cwd_del3t_ena_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t core : 54; /**< [ 53: 0](R/W1S/H) Reads or sets enable for GTI_CWD_DEL3T[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 54; /**< [ 53: 0](R/W1S/H) Reads or sets enable for GTI_CWD_DEL3T[CORE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cwd_del3t_ena_set_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t core : 48; /**< [ 47: 0](R/W1S/H) Reads or sets enable for GTI_CWD_DEL3T[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 48; /**< [ 47: 0](R/W1S/H) Reads or sets enable for GTI_CWD_DEL3T[CORE]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_cwd_del3t_ena_set_s cn9; */
+};
+typedef union bdk_gti_cwd_del3t_ena_set bdk_gti_cwd_del3t_ena_set_t;
+
+#define BDK_GTI_CWD_DEL3T_ENA_SET BDK_GTI_CWD_DEL3T_ENA_SET_FUNC()
+static inline uint64_t BDK_GTI_CWD_DEL3T_ENA_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_DEL3T_ENA_SET_FUNC(void)
+{
+ return 0x844000040238ll;
+}
+
+#define typedef_BDK_GTI_CWD_DEL3T_ENA_SET bdk_gti_cwd_del3t_ena_set_t
+#define bustype_BDK_GTI_CWD_DEL3T_ENA_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_DEL3T_ENA_SET "GTI_CWD_DEL3T_ENA_SET"
+#define device_bar_BDK_GTI_CWD_DEL3T_ENA_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_DEL3T_ENA_SET 0
+#define arguments_BDK_GTI_CWD_DEL3T_ENA_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_del3t_set
+ *
+ * GTI Per-core Watchdog DEL3T Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_gti_cwd_del3t_set
+{
+ uint64_t u;
+ struct bdk_gti_cwd_del3t_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t core : 54; /**< [ 53: 0](R/W1S/H) Reads or sets GTI_CWD_DEL3T[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 54; /**< [ 53: 0](R/W1S/H) Reads or sets GTI_CWD_DEL3T[CORE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cwd_del3t_set_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t core : 48; /**< [ 47: 0](R/W1S/H) Reads or sets GTI_CWD_DEL3T[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 48; /**< [ 47: 0](R/W1S/H) Reads or sets GTI_CWD_DEL3T[CORE]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_cwd_del3t_set_s cn9; */
+};
+typedef union bdk_gti_cwd_del3t_set bdk_gti_cwd_del3t_set_t;
+
+#define BDK_GTI_CWD_DEL3T_SET BDK_GTI_CWD_DEL3T_SET_FUNC()
+static inline uint64_t BDK_GTI_CWD_DEL3T_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_DEL3T_SET_FUNC(void)
+{
+ return 0x844000040228ll;
+}
+
+#define typedef_BDK_GTI_CWD_DEL3T_SET bdk_gti_cwd_del3t_set_t
+#define bustype_BDK_GTI_CWD_DEL3T_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_DEL3T_SET "GTI_CWD_DEL3T_SET"
+#define device_bar_BDK_GTI_CWD_DEL3T_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_DEL3T_SET 0
+#define arguments_BDK_GTI_CWD_DEL3T_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_int
+ *
+ * GTI Per-core Watchdog Interrupt Register
+ * Generic timer per-core watchdog interrupts.
+ */
+union bdk_gti_cwd_int
+{
+ uint64_t u;
+ struct bdk_gti_cwd_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t core : 54; /**< [ 53: 0](R/W1C/H) Per-core watchdog interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 54; /**< [ 53: 0](R/W1C/H) Per-core watchdog interrupt. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cwd_int_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t core : 48; /**< [ 47: 0](R/W1C/H) Per-core watchdog interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 48; /**< [ 47: 0](R/W1C/H) Per-core watchdog interrupt. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_cwd_int_s cn9; */
+};
+typedef union bdk_gti_cwd_int bdk_gti_cwd_int_t;
+
+#define BDK_GTI_CWD_INT BDK_GTI_CWD_INT_FUNC()
+static inline uint64_t BDK_GTI_CWD_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_INT_FUNC(void)
+{
+ return 0x844000040200ll;
+}
+
+#define typedef_BDK_GTI_CWD_INT bdk_gti_cwd_int_t
+#define bustype_BDK_GTI_CWD_INT BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_INT "GTI_CWD_INT"
+#define device_bar_BDK_GTI_CWD_INT 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_INT 0
+#define arguments_BDK_GTI_CWD_INT -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_int_ena_clr
+ *
+ * GTI Per-core Watchdog Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_gti_cwd_int_ena_clr
+{
+ uint64_t u;
+ struct bdk_gti_cwd_int_ena_clr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t core : 54; /**< [ 53: 0](R/W1C/H) Reads or clears enable for GTI_CWD_INT[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 54; /**< [ 53: 0](R/W1C/H) Reads or clears enable for GTI_CWD_INT[CORE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cwd_int_ena_clr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t core : 48; /**< [ 47: 0](R/W1C/H) Reads or clears enable for GTI_CWD_INT[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 48; /**< [ 47: 0](R/W1C/H) Reads or clears enable for GTI_CWD_INT[CORE]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_cwd_int_ena_clr_s cn9; */
+};
+typedef union bdk_gti_cwd_int_ena_clr bdk_gti_cwd_int_ena_clr_t;
+
+#define BDK_GTI_CWD_INT_ENA_CLR BDK_GTI_CWD_INT_ENA_CLR_FUNC()
+static inline uint64_t BDK_GTI_CWD_INT_ENA_CLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_INT_ENA_CLR_FUNC(void)
+{
+ return 0x844000040210ll;
+}
+
+#define typedef_BDK_GTI_CWD_INT_ENA_CLR bdk_gti_cwd_int_ena_clr_t
+#define bustype_BDK_GTI_CWD_INT_ENA_CLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_INT_ENA_CLR "GTI_CWD_INT_ENA_CLR"
+#define device_bar_BDK_GTI_CWD_INT_ENA_CLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_INT_ENA_CLR 0
+#define arguments_BDK_GTI_CWD_INT_ENA_CLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_int_ena_set
+ *
+ * GTI Per-core Watchdog Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_gti_cwd_int_ena_set
+{
+ uint64_t u;
+ struct bdk_gti_cwd_int_ena_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t core : 54; /**< [ 53: 0](R/W1S/H) Reads or sets enable for GTI_CWD_INT[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 54; /**< [ 53: 0](R/W1S/H) Reads or sets enable for GTI_CWD_INT[CORE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cwd_int_ena_set_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t core : 48; /**< [ 47: 0](R/W1S/H) Reads or sets enable for GTI_CWD_INT[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 48; /**< [ 47: 0](R/W1S/H) Reads or sets enable for GTI_CWD_INT[CORE]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_cwd_int_ena_set_s cn9; */
+};
+typedef union bdk_gti_cwd_int_ena_set bdk_gti_cwd_int_ena_set_t;
+
+#define BDK_GTI_CWD_INT_ENA_SET BDK_GTI_CWD_INT_ENA_SET_FUNC()
+static inline uint64_t BDK_GTI_CWD_INT_ENA_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_INT_ENA_SET_FUNC(void)
+{
+ return 0x844000040218ll;
+}
+
+#define typedef_BDK_GTI_CWD_INT_ENA_SET bdk_gti_cwd_int_ena_set_t
+#define bustype_BDK_GTI_CWD_INT_ENA_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_INT_ENA_SET "GTI_CWD_INT_ENA_SET"
+#define device_bar_BDK_GTI_CWD_INT_ENA_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_INT_ENA_SET 0
+#define arguments_BDK_GTI_CWD_INT_ENA_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_int_set
+ *
+ * GTI Per-core Watchdog Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_gti_cwd_int_set
+{
+ uint64_t u;
+ struct bdk_gti_cwd_int_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t core : 54; /**< [ 53: 0](R/W1S/H) Reads or sets GTI_CWD_INT[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 54; /**< [ 53: 0](R/W1S/H) Reads or sets GTI_CWD_INT[CORE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_cwd_int_set_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t core : 48; /**< [ 47: 0](R/W1S/H) Reads or sets GTI_CWD_INT[CORE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t core : 48; /**< [ 47: 0](R/W1S/H) Reads or sets GTI_CWD_INT[CORE]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_cwd_int_set_s cn9; */
+};
+typedef union bdk_gti_cwd_int_set bdk_gti_cwd_int_set_t;
+
+#define BDK_GTI_CWD_INT_SET BDK_GTI_CWD_INT_SET_FUNC()
+static inline uint64_t BDK_GTI_CWD_INT_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_INT_SET_FUNC(void)
+{
+ return 0x844000040208ll;
+}
+
+#define typedef_BDK_GTI_CWD_INT_SET bdk_gti_cwd_int_set_t
+#define bustype_BDK_GTI_CWD_INT_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_INT_SET "GTI_CWD_INT_SET"
+#define device_bar_BDK_GTI_CWD_INT_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_INT_SET 0
+#define arguments_BDK_GTI_CWD_INT_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_poke#
+ *
+ * GTI Per-core Watchdog Poke Registers
+ * Per-core watchdog poke. Writing any value to this register does the following:
+ * * Clears any pending interrupt generated by the associated watchdog.
+ * * Resets GTI_CWD_WDOG()[STATE] to 0x0.
+ * * Sets GTI_CWD_WDOG()[CNT] to (GTI_CWD_WDOG()[LEN] \<\< 8).
+ *
+ * Reading this register returns the associated GTI_CWD_WDOG() register.
+ */
+union bdk_gti_cwd_pokex
+{
+ uint64_t u;
+ struct bdk_gti_cwd_pokex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t zero : 18; /**< [ 63: 46](WO) Reserved. */
+ uint64_t gstop : 1; /**< [ 45: 45](RO/H) Global-stop enable. */
+ uint64_t dstop : 1; /**< [ 44: 44](RO/H) Debug-stop enable. */
+ uint64_t cnt : 24; /**< [ 43: 20](RO/H) Number of 1024-cycle intervals until next watchdog expiration. Set on write to
+ associated GTI_CWD_POKE(). */
+ uint64_t len : 16; /**< [ 19: 4](RO/H) Watchdog time-expiration length. The most-significant 16 bits of a 24-bit value to be
+ decremented every 1024 cycles. */
+ uint64_t state : 2; /**< [ 3: 2](RO/H) Watchdog state. The number of watchdog time expirations since last core poke. Cleared on
+ write to associated GTI_CWD_POKE(). */
+ uint64_t mode : 2; /**< [ 1: 0](RO/H) Watchdog mode:
+ 0x0 = Off.
+ 0x1 = Interrupt only.
+ 0x2 = Interrupt + DEL3T.
+ 0x3 = Interrupt + DEL3T + soft reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t mode : 2; /**< [ 1: 0](RO/H) Watchdog mode:
+ 0x0 = Off.
+ 0x1 = Interrupt only.
+ 0x2 = Interrupt + DEL3T.
+ 0x3 = Interrupt + DEL3T + soft reset. */
+ uint64_t state : 2; /**< [ 3: 2](RO/H) Watchdog state. The number of watchdog time expirations since last core poke. Cleared on
+ write to associated GTI_CWD_POKE(). */
+ uint64_t len : 16; /**< [ 19: 4](RO/H) Watchdog time-expiration length. The most-significant 16 bits of a 24-bit value to be
+ decremented every 1024 cycles. */
+ uint64_t cnt : 24; /**< [ 43: 20](RO/H) Number of 1024-cycle intervals until next watchdog expiration. Set on write to
+ associated GTI_CWD_POKE(). */
+ uint64_t dstop : 1; /**< [ 44: 44](RO/H) Debug-stop enable. */
+ uint64_t gstop : 1; /**< [ 45: 45](RO/H) Global-stop enable. */
+ uint64_t zero : 18; /**< [ 63: 46](WO) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cwd_pokex_s cn; */
+};
+typedef union bdk_gti_cwd_pokex bdk_gti_cwd_pokex_t;
+
+static inline uint64_t BDK_GTI_CWD_POKEX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_POKEX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=47))
+ return 0x844000050000ll + 8ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=53))
+ return 0x844000050000ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("GTI_CWD_POKEX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_CWD_POKEX(a) bdk_gti_cwd_pokex_t
+#define bustype_BDK_GTI_CWD_POKEX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_POKEX(a) "GTI_CWD_POKEX"
+#define device_bar_BDK_GTI_CWD_POKEX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_POKEX(a) (a)
+#define arguments_BDK_GTI_CWD_POKEX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gti_cwd_wdog#
+ *
+ * GTI Per-core Watchdog Registers
+ */
+union bdk_gti_cwd_wdogx
+{
+ uint64_t u;
+ struct bdk_gti_cwd_wdogx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t gstop : 1; /**< [ 45: 45](R/W) Global-stop enable. */
+ uint64_t dstop : 1; /**< [ 44: 44](R/W) Debug-stop enable. */
+ uint64_t cnt : 24; /**< [ 43: 20](R/W/H) Number of 1024-cycle intervals until next watchdog expiration. Set on write to
+ associated GTI_CWD_POKE().
+
+ Typically on each write to CTI_CMD_WDOG(), [CNT] should be set to [LEN] * 256. */
+ uint64_t len : 16; /**< [ 19: 4](R/W) Watchdog time-expiration length. The most-significant 16 bits of a 24-bit value to be
+ decremented every 1024 cycles. */
+ uint64_t state : 2; /**< [ 3: 2](R/W/H) Watchdog state. The number of watchdog time expirations since last core poke. Cleared on
+ write to associated GTI_CWD_POKE(). */
+ uint64_t mode : 2; /**< [ 1: 0](R/W) Watchdog mode:
+ 0x0 = Off.
+ 0x1 = Interrupt only.
+ 0x2 = Interrupt + DEL3T.
+ 0x3 = Interrupt + DEL3T + soft reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t mode : 2; /**< [ 1: 0](R/W) Watchdog mode:
+ 0x0 = Off.
+ 0x1 = Interrupt only.
+ 0x2 = Interrupt + DEL3T.
+ 0x3 = Interrupt + DEL3T + soft reset. */
+ uint64_t state : 2; /**< [ 3: 2](R/W/H) Watchdog state. The number of watchdog time expirations since last core poke. Cleared on
+ write to associated GTI_CWD_POKE(). */
+ uint64_t len : 16; /**< [ 19: 4](R/W) Watchdog time-expiration length. The most-significant 16 bits of a 24-bit value to be
+ decremented every 1024 cycles. */
+ uint64_t cnt : 24; /**< [ 43: 20](R/W/H) Number of 1024-cycle intervals until next watchdog expiration. Set on write to
+ associated GTI_CWD_POKE().
+
+ Typically on each write to CTI_CMD_WDOG(), [CNT] should be set to [LEN] * 256. */
+ uint64_t dstop : 1; /**< [ 44: 44](R/W) Debug-stop enable. */
+ uint64_t gstop : 1; /**< [ 45: 45](R/W) Global-stop enable. */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_cwd_wdogx_s cn8; */
+ struct bdk_gti_cwd_wdogx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t gstop : 1; /**< [ 45: 45](R/W) Global-stop enable. */
+ uint64_t dstop : 1; /**< [ 44: 44](R/W) Debug-stop enable. */
+ uint64_t cnt : 24; /**< [ 43: 20](R/W/H) Number of one microsecond intervals until next watchdog expiration.
+ Set on write to associated GTI_CWD_POKE().
+ Typically on each write to GTI_CWD_WDOG(), [CNT] should be set to [LEN] * 256. */
+ uint64_t len : 16; /**< [ 19: 4](R/W) Watchdog time-expiration length. The most-significant 16 bits of a 24-bit value to be
+ decremented every one microsecond. */
+ uint64_t state : 2; /**< [ 3: 2](R/W/H) Watchdog state. The number of watchdog time expirations since last core poke. Cleared on
+ write to associated GTI_CWD_POKE(). */
+ uint64_t mode : 2; /**< [ 1: 0](R/W) Watchdog mode:
+ 0x0 = Off.
+ 0x1 = Interrupt only.
+ 0x2 = Interrupt + DEL3T.
+ 0x3 = Interrupt + DEL3T + soft reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t mode : 2; /**< [ 1: 0](R/W) Watchdog mode:
+ 0x0 = Off.
+ 0x1 = Interrupt only.
+ 0x2 = Interrupt + DEL3T.
+ 0x3 = Interrupt + DEL3T + soft reset. */
+ uint64_t state : 2; /**< [ 3: 2](R/W/H) Watchdog state. The number of watchdog time expirations since last core poke. Cleared on
+ write to associated GTI_CWD_POKE(). */
+ uint64_t len : 16; /**< [ 19: 4](R/W) Watchdog time-expiration length. The most-significant 16 bits of a 24-bit value to be
+ decremented every one microsecond. */
+ uint64_t cnt : 24; /**< [ 43: 20](R/W/H) Number of one microsecond intervals until next watchdog expiration.
+ Set on write to associated GTI_CWD_POKE().
+ Typically on each write to GTI_CWD_WDOG(), [CNT] should be set to [LEN] * 256. */
+ uint64_t dstop : 1; /**< [ 44: 44](R/W) Debug-stop enable. */
+ uint64_t gstop : 1; /**< [ 45: 45](R/W) Global-stop enable. */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_cwd_wdogx bdk_gti_cwd_wdogx_t;
+
+static inline uint64_t BDK_GTI_CWD_WDOGX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_CWD_WDOGX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=47))
+ return 0x844000040000ll + 8ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=53))
+ return 0x844000040000ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("GTI_CWD_WDOGX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_CWD_WDOGX(a) bdk_gti_cwd_wdogx_t
+#define bustype_BDK_GTI_CWD_WDOGX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_CWD_WDOGX(a) "GTI_CWD_WDOGX"
+#define device_bar_BDK_GTI_CWD_WDOGX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_CWD_WDOGX(a) (a)
+#define arguments_BDK_GTI_CWD_WDOGX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gti_err_bist_status
+ *
+ * GTI BIST Status Register
+ */
+union bdk_gti_err_bist_status
+{
+ uint64_t u;
+ struct bdk_gti_err_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t ram : 2; /**< [ 1: 0](RO/H) BIST status. */
+#else /* Word 0 - Little Endian */
+ uint64_t ram : 2; /**< [ 1: 0](RO/H) BIST status. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_err_bist_status_s cn; */
+};
+typedef union bdk_gti_err_bist_status bdk_gti_err_bist_status_t;
+
+#define BDK_GTI_ERR_BIST_STATUS BDK_GTI_ERR_BIST_STATUS_FUNC()
+static inline uint64_t BDK_GTI_ERR_BIST_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_ERR_BIST_STATUS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x8440000f0030ll;
+ __bdk_csr_fatal("GTI_ERR_BIST_STATUS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_ERR_BIST_STATUS bdk_gti_err_bist_status_t
+#define bustype_BDK_GTI_ERR_BIST_STATUS BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_ERR_BIST_STATUS "GTI_ERR_BIST_STATUS"
+#define device_bar_BDK_GTI_ERR_BIST_STATUS 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_ERR_BIST_STATUS 0
+#define arguments_BDK_GTI_ERR_BIST_STATUS -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_err_ecc_disable
+ *
+ * GTI ECC Disable Register
+ */
+union bdk_gti_err_ecc_disable
+{
+ uint64_t u;
+ struct bdk_gti_err_ecc_disable_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t ram : 2; /**< [ 1: 0](R/W) Each bit disables correction of respective RAM. */
+#else /* Word 0 - Little Endian */
+ uint64_t ram : 2; /**< [ 1: 0](R/W) Each bit disables correction of respective RAM. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_err_ecc_disable_s cn; */
+};
+typedef union bdk_gti_err_ecc_disable bdk_gti_err_ecc_disable_t;
+
+#define BDK_GTI_ERR_ECC_DISABLE BDK_GTI_ERR_ECC_DISABLE_FUNC()
+static inline uint64_t BDK_GTI_ERR_ECC_DISABLE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_ERR_ECC_DISABLE_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x8440000f0020ll;
+ __bdk_csr_fatal("GTI_ERR_ECC_DISABLE", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_ERR_ECC_DISABLE bdk_gti_err_ecc_disable_t
+#define bustype_BDK_GTI_ERR_ECC_DISABLE BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_ERR_ECC_DISABLE "GTI_ERR_ECC_DISABLE"
+#define device_bar_BDK_GTI_ERR_ECC_DISABLE 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_ERR_ECC_DISABLE 0
+#define arguments_BDK_GTI_ERR_ECC_DISABLE -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_err_ecc_flip
+ *
+ * GTI ECC Flip Register
+ */
+union bdk_gti_err_ecc_flip
+{
+ uint64_t u;
+ struct bdk_gti_err_ecc_flip_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t ramx : 2; /**< [ 33: 32](R/W) Each bit flips a second bit of syndrome in the respective RAM. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t ram : 2; /**< [ 1: 0](R/W) Each bit flips the first bit of syndrome in the respective RAM. */
+#else /* Word 0 - Little Endian */
+ uint64_t ram : 2; /**< [ 1: 0](R/W) Each bit flips the first bit of syndrome in the respective RAM. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t ramx : 2; /**< [ 33: 32](R/W) Each bit flips a second bit of syndrome in the respective RAM. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_err_ecc_flip_s cn; */
+};
+typedef union bdk_gti_err_ecc_flip bdk_gti_err_ecc_flip_t;
+
+#define BDK_GTI_ERR_ECC_FLIP BDK_GTI_ERR_ECC_FLIP_FUNC()
+static inline uint64_t BDK_GTI_ERR_ECC_FLIP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_ERR_ECC_FLIP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x8440000f0028ll;
+ __bdk_csr_fatal("GTI_ERR_ECC_FLIP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_ERR_ECC_FLIP bdk_gti_err_ecc_flip_t
+#define bustype_BDK_GTI_ERR_ECC_FLIP BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_ERR_ECC_FLIP "GTI_ERR_ECC_FLIP"
+#define device_bar_BDK_GTI_ERR_ECC_FLIP 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_ERR_ECC_FLIP 0
+#define arguments_BDK_GTI_ERR_ECC_FLIP -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_err_int
+ *
+ * INTERNAL: GTI Error Interrupt Register
+ */
+union bdk_gti_err_int
+{
+ uint64_t u;
+ struct bdk_gti_err_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](R/W1C/H) Double bit error. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t sbe : 2; /**< [ 1: 0](R/W1C/H) Single bit error. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 2; /**< [ 1: 0](R/W1C/H) Single bit error. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](R/W1C/H) Double bit error. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_err_int_s cn8; */
+ struct bdk_gti_err_int_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t sbe : 2; /**< [ 1: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 2; /**< [ 1: 0](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_err_int bdk_gti_err_int_t;
+
+#define BDK_GTI_ERR_INT BDK_GTI_ERR_INT_FUNC()
+static inline uint64_t BDK_GTI_ERR_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_ERR_INT_FUNC(void)
+{
+ return 0x8440000f0000ll;
+}
+
+#define typedef_BDK_GTI_ERR_INT bdk_gti_err_int_t
+#define bustype_BDK_GTI_ERR_INT BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_ERR_INT "GTI_ERR_INT"
+#define device_bar_BDK_GTI_ERR_INT 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_ERR_INT 0
+#define arguments_BDK_GTI_ERR_INT -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_err_int_ena_clr
+ *
+ * INTERNAL: GTI Error Interrupt Enable Clear Register
+ */
+union bdk_gti_err_int_ena_clr
+{
+ uint64_t u;
+ struct bdk_gti_err_int_ena_clr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](R/W1C/H) Reads or clears enable for GTI_ERR_INT[DBE]. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t sbe : 2; /**< [ 1: 0](R/W1C/H) Reads or clears enable for GTI_ERR_INT[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 2; /**< [ 1: 0](R/W1C/H) Reads or clears enable for GTI_ERR_INT[SBE]. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](R/W1C/H) Reads or clears enable for GTI_ERR_INT[DBE]. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_err_int_ena_clr_s cn8; */
+ struct bdk_gti_err_int_ena_clr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t sbe : 2; /**< [ 1: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 2; /**< [ 1: 0](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_err_int_ena_clr bdk_gti_err_int_ena_clr_t;
+
+#define BDK_GTI_ERR_INT_ENA_CLR BDK_GTI_ERR_INT_ENA_CLR_FUNC()
+static inline uint64_t BDK_GTI_ERR_INT_ENA_CLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_ERR_INT_ENA_CLR_FUNC(void)
+{
+ return 0x8440000f0010ll;
+}
+
+#define typedef_BDK_GTI_ERR_INT_ENA_CLR bdk_gti_err_int_ena_clr_t
+#define bustype_BDK_GTI_ERR_INT_ENA_CLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_ERR_INT_ENA_CLR "GTI_ERR_INT_ENA_CLR"
+#define device_bar_BDK_GTI_ERR_INT_ENA_CLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_ERR_INT_ENA_CLR 0
+#define arguments_BDK_GTI_ERR_INT_ENA_CLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_err_int_ena_set
+ *
+ * INTERNAL: GTI Error Interrupt Enable Set Register
+ */
+union bdk_gti_err_int_ena_set
+{
+ uint64_t u;
+ struct bdk_gti_err_int_ena_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](R/W1S/H) Reads or sets enable for GTI_ERR_INT[DBE]. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t sbe : 2; /**< [ 1: 0](R/W1S/H) Reads or sets enable for GTI_ERR_INT[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 2; /**< [ 1: 0](R/W1S/H) Reads or sets enable for GTI_ERR_INT[SBE]. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](R/W1S/H) Reads or sets enable for GTI_ERR_INT[DBE]. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_err_int_ena_set_s cn8; */
+ struct bdk_gti_err_int_ena_set_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t sbe : 2; /**< [ 1: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 2; /**< [ 1: 0](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_err_int_ena_set bdk_gti_err_int_ena_set_t;
+
+#define BDK_GTI_ERR_INT_ENA_SET BDK_GTI_ERR_INT_ENA_SET_FUNC()
+static inline uint64_t BDK_GTI_ERR_INT_ENA_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_ERR_INT_ENA_SET_FUNC(void)
+{
+ return 0x8440000f0018ll;
+}
+
+#define typedef_BDK_GTI_ERR_INT_ENA_SET bdk_gti_err_int_ena_set_t
+#define bustype_BDK_GTI_ERR_INT_ENA_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_ERR_INT_ENA_SET "GTI_ERR_INT_ENA_SET"
+#define device_bar_BDK_GTI_ERR_INT_ENA_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_ERR_INT_ENA_SET 0
+#define arguments_BDK_GTI_ERR_INT_ENA_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_err_int_set
+ *
+ * INTERNAL: GTI Error Interrupt Set Register
+ */
+union bdk_gti_err_int_set
+{
+ uint64_t u;
+ struct bdk_gti_err_int_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](R/W1S/H) Reads or sets GTI_ERR_INT[DBE]. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t sbe : 2; /**< [ 1: 0](R/W1S/H) Reads or sets GTI_ERR_INT[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 2; /**< [ 1: 0](R/W1S/H) Reads or sets GTI_ERR_INT[SBE]. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](R/W1S/H) Reads or sets GTI_ERR_INT[DBE]. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_err_int_set_s cn8; */
+ struct bdk_gti_err_int_set_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t sbe : 2; /**< [ 1: 0](RAZ) Reserved; for backwards compatibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 2; /**< [ 1: 0](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t dbe : 2; /**< [ 33: 32](RAZ) Reserved; for backwards compatibility. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gti_err_int_set bdk_gti_err_int_set_t;
+
+#define BDK_GTI_ERR_INT_SET BDK_GTI_ERR_INT_SET_FUNC()
+static inline uint64_t BDK_GTI_ERR_INT_SET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_ERR_INT_SET_FUNC(void)
+{
+ return 0x8440000f0008ll;
+}
+
+#define typedef_BDK_GTI_ERR_INT_SET bdk_gti_err_int_set_t
+#define bustype_BDK_GTI_ERR_INT_SET BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_ERR_INT_SET "GTI_ERR_INT_SET"
+#define device_bar_BDK_GTI_ERR_INT_SET 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_ERR_INT_SET 0
+#define arguments_BDK_GTI_ERR_INT_SET -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_force_clken
+ *
+ * GTI Force Clock Enable Register
+ */
+union bdk_gti_force_clken
+{
+ uint32_t u;
+ struct bdk_gti_force_clken_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t clken : 1; /**< [ 0: 0](R/W) Force the conditional clocking within GTI to be always on. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint32_t clken : 1; /**< [ 0: 0](R/W) Force the conditional clocking within GTI to be always on. For diagnostic use only. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_force_clken_s cn; */
+};
+typedef union bdk_gti_force_clken bdk_gti_force_clken_t;
+
+#define BDK_GTI_FORCE_CLKEN BDK_GTI_FORCE_CLKEN_FUNC()
+static inline uint64_t BDK_GTI_FORCE_CLKEN_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_FORCE_CLKEN_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8440000e0000ll;
+ __bdk_csr_fatal("GTI_FORCE_CLKEN", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_FORCE_CLKEN bdk_gti_force_clken_t
+#define bustype_BDK_GTI_FORCE_CLKEN BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_FORCE_CLKEN "GTI_FORCE_CLKEN"
+#define device_bar_BDK_GTI_FORCE_CLKEN 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_FORCE_CLKEN 0
+#define arguments_BDK_GTI_FORCE_CLKEN -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_imp_const
+ *
+ * GTI Implementation Constant Register
+ */
+union bdk_gti_imp_const
+{
+ uint32_t u;
+ struct bdk_gti_imp_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t wdogs : 8; /**< [ 7: 0](RO) The number of watchdog timers implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t wdogs : 8; /**< [ 7: 0](RO) The number of watchdog timers implemented. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_imp_const_s cn; */
+};
+typedef union bdk_gti_imp_const bdk_gti_imp_const_t;
+
+#define BDK_GTI_IMP_CONST BDK_GTI_IMP_CONST_FUNC()
+static inline uint64_t BDK_GTI_IMP_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_IMP_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8440000e0010ll;
+ __bdk_csr_fatal("GTI_IMP_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_IMP_CONST bdk_gti_imp_const_t
+#define bustype_BDK_GTI_IMP_CONST BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_IMP_CONST "GTI_IMP_CONST"
+#define device_bar_BDK_GTI_IMP_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_IMP_CONST 0
+#define arguments_BDK_GTI_IMP_CONST -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_msix_pba#
+ *
+ * GTI MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table, the bit number is indexed by the GTI_INT_VEC_E enumeration.
+ */
+union bdk_gti_msix_pbax
+{
+ uint64_t u;
+ struct bdk_gti_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for each interrupt, enumerated by GTI_INT_VEC_E.
+ Bits that have no associated GTI_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for each interrupt, enumerated by GTI_INT_VEC_E.
+ Bits that have no associated GTI_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_msix_pbax_s cn; */
+};
+typedef union bdk_gti_msix_pbax bdk_gti_msix_pbax_t;
+
+static inline uint64_t BDK_GTI_MSIX_PBAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_MSIX_PBAX(unsigned long a)
+{
+ if (a<=1)
+ return 0x84400f0f0000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_MSIX_PBAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_MSIX_PBAX(a) bdk_gti_msix_pbax_t
+#define bustype_BDK_GTI_MSIX_PBAX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_MSIX_PBAX(a) "GTI_MSIX_PBAX"
+#define device_bar_BDK_GTI_MSIX_PBAX(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GTI_MSIX_PBAX(a) (a)
+#define arguments_BDK_GTI_MSIX_PBAX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gti_msix_vec#_addr
+ *
+ * GTI MSI-X Vector Table Address Registers
+ * This register is the MSI-X vector table, indexed by the GTI_INT_VEC_E enumeration.
+ */
+union bdk_gti_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_gti_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's GTI_MSIX_VEC()_ADDR, GTI_MSIX_VEC()_CTL, and corresponding
+ bit of GTI_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_GTI_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's GTI_MSIX_VEC()_ADDR, GTI_MSIX_VEC()_CTL, and corresponding
+ bit of GTI_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_GTI_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_msix_vecx_addr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's GTI_MSIX_VEC()_ADDR, GTI_MSIX_VEC()_CTL, and corresponding
+ bit of GTI_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_GTI_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's GTI_MSIX_VEC()_ADDR, GTI_MSIX_VEC()_CTL, and corresponding
+ bit of GTI_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_GTI_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_msix_vecx_addr_s cn9; */
+};
+typedef union bdk_gti_msix_vecx_addr bdk_gti_msix_vecx_addr_t;
+
+static inline uint64_t BDK_GTI_MSIX_VECX_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_MSIX_VECX_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=105))
+ return 0x84400f000000ll + 0x10ll * ((a) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=117))
+ return 0x84400f000000ll + 0x10ll * ((a) & 0x7f);
+ __bdk_csr_fatal("GTI_MSIX_VECX_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_MSIX_VECX_ADDR(a) bdk_gti_msix_vecx_addr_t
+#define bustype_BDK_GTI_MSIX_VECX_ADDR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_MSIX_VECX_ADDR(a) "GTI_MSIX_VECX_ADDR"
+#define device_bar_BDK_GTI_MSIX_VECX_ADDR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GTI_MSIX_VECX_ADDR(a) (a)
+#define arguments_BDK_GTI_MSIX_VECX_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gti_msix_vec#_ctl
+ *
+ * GTI MSI-X Vector Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the GTI_INT_VEC_E enumeration.
+ */
+union bdk_gti_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_gti_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gti_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gti_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_gti_msix_vecx_ctl bdk_gti_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_GTI_MSIX_VECX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_MSIX_VECX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=105))
+ return 0x84400f000008ll + 0x10ll * ((a) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=117))
+ return 0x84400f000008ll + 0x10ll * ((a) & 0x7f);
+ __bdk_csr_fatal("GTI_MSIX_VECX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_MSIX_VECX_CTL(a) bdk_gti_msix_vecx_ctl_t
+#define bustype_BDK_GTI_MSIX_VECX_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_MSIX_VECX_CTL(a) "GTI_MSIX_VECX_CTL"
+#define device_bar_BDK_GTI_MSIX_VECX_CTL(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GTI_MSIX_VECX_CTL(a) (a)
+#define arguments_BDK_GTI_MSIX_VECX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_cidr0
+ *
+ * GTI Counter Read Component Identification Register 0
+ */
+union bdk_gti_rd_cidr0
+{
+ uint32_t u;
+ struct bdk_gti_rd_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_cidr0_s cn; */
+};
+typedef union bdk_gti_rd_cidr0 bdk_gti_rd_cidr0_t;
+
+#define BDK_GTI_RD_CIDR0 BDK_GTI_RD_CIDR0_FUNC()
+static inline uint64_t BDK_GTI_RD_CIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_CIDR0_FUNC(void)
+{
+ return 0x844000010ff0ll;
+}
+
+#define typedef_BDK_GTI_RD_CIDR0 bdk_gti_rd_cidr0_t
+#define bustype_BDK_GTI_RD_CIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_CIDR0 "GTI_RD_CIDR0"
+#define device_bar_BDK_GTI_RD_CIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_CIDR0 0
+#define arguments_BDK_GTI_RD_CIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_cidr1
+ *
+ * GTI Counter Read Component Identification Register 1
+ */
+union bdk_gti_rd_cidr1
+{
+ uint32_t u;
+ struct bdk_gti_rd_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_cidr1_s cn; */
+};
+typedef union bdk_gti_rd_cidr1 bdk_gti_rd_cidr1_t;
+
+#define BDK_GTI_RD_CIDR1 BDK_GTI_RD_CIDR1_FUNC()
+static inline uint64_t BDK_GTI_RD_CIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_CIDR1_FUNC(void)
+{
+ return 0x844000010ff4ll;
+}
+
+#define typedef_BDK_GTI_RD_CIDR1 bdk_gti_rd_cidr1_t
+#define bustype_BDK_GTI_RD_CIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_CIDR1 "GTI_RD_CIDR1"
+#define device_bar_BDK_GTI_RD_CIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_CIDR1 0
+#define arguments_BDK_GTI_RD_CIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_cidr2
+ *
+ * GTI Counter Read Component Identification Register 2
+ */
+union bdk_gti_rd_cidr2
+{
+ uint32_t u;
+ struct bdk_gti_rd_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_cidr2_s cn; */
+};
+typedef union bdk_gti_rd_cidr2 bdk_gti_rd_cidr2_t;
+
+#define BDK_GTI_RD_CIDR2 BDK_GTI_RD_CIDR2_FUNC()
+static inline uint64_t BDK_GTI_RD_CIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_CIDR2_FUNC(void)
+{
+ return 0x844000010ff8ll;
+}
+
+#define typedef_BDK_GTI_RD_CIDR2 bdk_gti_rd_cidr2_t
+#define bustype_BDK_GTI_RD_CIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_CIDR2 "GTI_RD_CIDR2"
+#define device_bar_BDK_GTI_RD_CIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_CIDR2 0
+#define arguments_BDK_GTI_RD_CIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_cidr3
+ *
+ * GTI Counter Read Component Identification Register 3
+ */
+union bdk_gti_rd_cidr3
+{
+ uint32_t u;
+ struct bdk_gti_rd_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_cidr3_s cn; */
+};
+typedef union bdk_gti_rd_cidr3 bdk_gti_rd_cidr3_t;
+
+#define BDK_GTI_RD_CIDR3 BDK_GTI_RD_CIDR3_FUNC()
+static inline uint64_t BDK_GTI_RD_CIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_CIDR3_FUNC(void)
+{
+ return 0x844000010ffcll;
+}
+
+#define typedef_BDK_GTI_RD_CIDR3 bdk_gti_rd_cidr3_t
+#define bustype_BDK_GTI_RD_CIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_CIDR3 "GTI_RD_CIDR3"
+#define device_bar_BDK_GTI_RD_CIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_CIDR3 0
+#define arguments_BDK_GTI_RD_CIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_rd_cntcv
+ *
+ * GTI Counter Read Value Register
+ */
+union bdk_gti_rd_cntcv
+{
+ uint64_t u;
+ struct bdk_gti_rd_cntcv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) System counter count value. The counter is writable with GTI_CC_CNTCV. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) System counter count value. The counter is writable with GTI_CC_CNTCV. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_cntcv_s cn; */
+};
+typedef union bdk_gti_rd_cntcv bdk_gti_rd_cntcv_t;
+
+#define BDK_GTI_RD_CNTCV BDK_GTI_RD_CNTCV_FUNC()
+static inline uint64_t BDK_GTI_RD_CNTCV_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_CNTCV_FUNC(void)
+{
+ return 0x844000010000ll;
+}
+
+#define typedef_BDK_GTI_RD_CNTCV bdk_gti_rd_cntcv_t
+#define bustype_BDK_GTI_RD_CNTCV BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_RD_CNTCV "GTI_RD_CNTCV"
+#define device_bar_BDK_GTI_RD_CNTCV 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_CNTCV 0
+#define arguments_BDK_GTI_RD_CNTCV -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_pidr0
+ *
+ * GTI Counter Read Peripheral Identification Register 0
+ */
+union bdk_gti_rd_pidr0
+{
+ uint32_t u;
+ struct bdk_gti_rd_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_RD. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_RD. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_pidr0_s cn; */
+};
+typedef union bdk_gti_rd_pidr0 bdk_gti_rd_pidr0_t;
+
+#define BDK_GTI_RD_PIDR0 BDK_GTI_RD_PIDR0_FUNC()
+static inline uint64_t BDK_GTI_RD_PIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_PIDR0_FUNC(void)
+{
+ return 0x844000010fe0ll;
+}
+
+#define typedef_BDK_GTI_RD_PIDR0 bdk_gti_rd_pidr0_t
+#define bustype_BDK_GTI_RD_PIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_PIDR0 "GTI_RD_PIDR0"
+#define device_bar_BDK_GTI_RD_PIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_PIDR0 0
+#define arguments_BDK_GTI_RD_PIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_pidr1
+ *
+ * GTI Counter Read Peripheral Identification Register 1
+ */
+union bdk_gti_rd_pidr1
+{
+ uint32_t u;
+ struct bdk_gti_rd_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_pidr1_s cn; */
+};
+typedef union bdk_gti_rd_pidr1 bdk_gti_rd_pidr1_t;
+
+#define BDK_GTI_RD_PIDR1 BDK_GTI_RD_PIDR1_FUNC()
+static inline uint64_t BDK_GTI_RD_PIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_PIDR1_FUNC(void)
+{
+ return 0x844000010fe4ll;
+}
+
+#define typedef_BDK_GTI_RD_PIDR1 bdk_gti_rd_pidr1_t
+#define bustype_BDK_GTI_RD_PIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_PIDR1 "GTI_RD_PIDR1"
+#define device_bar_BDK_GTI_RD_PIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_PIDR1 0
+#define arguments_BDK_GTI_RD_PIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_pidr2
+ *
+ * GTI Counter Read Peripheral Identification Register 2
+ */
+union bdk_gti_rd_pidr2
+{
+ uint32_t u;
+ struct bdk_gti_rd_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_pidr2_s cn; */
+};
+typedef union bdk_gti_rd_pidr2 bdk_gti_rd_pidr2_t;
+
+#define BDK_GTI_RD_PIDR2 BDK_GTI_RD_PIDR2_FUNC()
+static inline uint64_t BDK_GTI_RD_PIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_PIDR2_FUNC(void)
+{
+ return 0x844000010fe8ll;
+}
+
+#define typedef_BDK_GTI_RD_PIDR2 bdk_gti_rd_pidr2_t
+#define bustype_BDK_GTI_RD_PIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_PIDR2 "GTI_RD_PIDR2"
+#define device_bar_BDK_GTI_RD_PIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_PIDR2 0
+#define arguments_BDK_GTI_RD_PIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_pidr3
+ *
+ * GTI Counter Read Peripheral Identification Register 3
+ */
+union bdk_gti_rd_pidr3
+{
+ uint32_t u;
+ struct bdk_gti_rd_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_pidr3_s cn; */
+};
+typedef union bdk_gti_rd_pidr3 bdk_gti_rd_pidr3_t;
+
+#define BDK_GTI_RD_PIDR3 BDK_GTI_RD_PIDR3_FUNC()
+static inline uint64_t BDK_GTI_RD_PIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_PIDR3_FUNC(void)
+{
+ return 0x844000010fecll;
+}
+
+#define typedef_BDK_GTI_RD_PIDR3 bdk_gti_rd_pidr3_t
+#define bustype_BDK_GTI_RD_PIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_PIDR3 "GTI_RD_PIDR3"
+#define device_bar_BDK_GTI_RD_PIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_PIDR3 0
+#define arguments_BDK_GTI_RD_PIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_pidr4
+ *
+ * GTI Counter Read Peripheral Identification Register 4
+ */
+union bdk_gti_rd_pidr4
+{
+ uint32_t u;
+ struct bdk_gti_rd_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_pidr4_s cn; */
+};
+typedef union bdk_gti_rd_pidr4 bdk_gti_rd_pidr4_t;
+
+#define BDK_GTI_RD_PIDR4 BDK_GTI_RD_PIDR4_FUNC()
+static inline uint64_t BDK_GTI_RD_PIDR4_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_PIDR4_FUNC(void)
+{
+ return 0x844000010fd0ll;
+}
+
+#define typedef_BDK_GTI_RD_PIDR4 bdk_gti_rd_pidr4_t
+#define bustype_BDK_GTI_RD_PIDR4 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_PIDR4 "GTI_RD_PIDR4"
+#define device_bar_BDK_GTI_RD_PIDR4 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_PIDR4 0
+#define arguments_BDK_GTI_RD_PIDR4 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_pidr5
+ *
+ * GTI Counter Read Peripheral Identification Register 5
+ */
+union bdk_gti_rd_pidr5
+{
+ uint32_t u;
+ struct bdk_gti_rd_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_pidr5_s cn; */
+};
+typedef union bdk_gti_rd_pidr5 bdk_gti_rd_pidr5_t;
+
+#define BDK_GTI_RD_PIDR5 BDK_GTI_RD_PIDR5_FUNC()
+static inline uint64_t BDK_GTI_RD_PIDR5_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_PIDR5_FUNC(void)
+{
+ return 0x844000010fd4ll;
+}
+
+#define typedef_BDK_GTI_RD_PIDR5 bdk_gti_rd_pidr5_t
+#define bustype_BDK_GTI_RD_PIDR5 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_PIDR5 "GTI_RD_PIDR5"
+#define device_bar_BDK_GTI_RD_PIDR5 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_PIDR5 0
+#define arguments_BDK_GTI_RD_PIDR5 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_pidr6
+ *
+ * GTI Counter Read Peripheral Identification Register 6
+ */
+union bdk_gti_rd_pidr6
+{
+ uint32_t u;
+ struct bdk_gti_rd_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_pidr6_s cn; */
+};
+typedef union bdk_gti_rd_pidr6 bdk_gti_rd_pidr6_t;
+
+#define BDK_GTI_RD_PIDR6 BDK_GTI_RD_PIDR6_FUNC()
+static inline uint64_t BDK_GTI_RD_PIDR6_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_PIDR6_FUNC(void)
+{
+ return 0x844000010fd8ll;
+}
+
+#define typedef_BDK_GTI_RD_PIDR6 bdk_gti_rd_pidr6_t
+#define bustype_BDK_GTI_RD_PIDR6 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_PIDR6 "GTI_RD_PIDR6"
+#define device_bar_BDK_GTI_RD_PIDR6 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_PIDR6 0
+#define arguments_BDK_GTI_RD_PIDR6 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_rd_pidr7
+ *
+ * GTI Counter Read Peripheral Identification Register 7
+ */
+union bdk_gti_rd_pidr7
+{
+ uint32_t u;
+ struct bdk_gti_rd_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_rd_pidr7_s cn; */
+};
+typedef union bdk_gti_rd_pidr7 bdk_gti_rd_pidr7_t;
+
+#define BDK_GTI_RD_PIDR7 BDK_GTI_RD_PIDR7_FUNC()
+static inline uint64_t BDK_GTI_RD_PIDR7_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_RD_PIDR7_FUNC(void)
+{
+ return 0x844000010fdcll;
+}
+
+#define typedef_BDK_GTI_RD_PIDR7 bdk_gti_rd_pidr7_t
+#define bustype_BDK_GTI_RD_PIDR7 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_RD_PIDR7 "GTI_RD_PIDR7"
+#define device_bar_BDK_GTI_RD_PIDR7 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_RD_PIDR7 0
+#define arguments_BDK_GTI_RD_PIDR7 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gti_scratch
+ *
+ * INTERNAL: GTI Scratch Register
+ */
+union bdk_gti_scratch
+{
+ uint64_t u;
+ struct bdk_gti_scratch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t scratch : 64; /**< [ 63: 0](R/W) Scratch register. */
+#else /* Word 0 - Little Endian */
+ uint64_t scratch : 64; /**< [ 63: 0](R/W) Scratch register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_scratch_s cn; */
+};
+typedef union bdk_gti_scratch bdk_gti_scratch_t;
+
+#define BDK_GTI_SCRATCH BDK_GTI_SCRATCH_FUNC()
+static inline uint64_t BDK_GTI_SCRATCH_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_SCRATCH_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8440000e0018ll;
+ __bdk_csr_fatal("GTI_SCRATCH", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_SCRATCH bdk_gti_scratch_t
+#define bustype_BDK_GTI_SCRATCH BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_SCRATCH "GTI_SCRATCH"
+#define device_bar_BDK_GTI_SCRATCH 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_SCRATCH 0
+#define arguments_BDK_GTI_SCRATCH -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_cidr0
+ *
+ * GTI Watchdog Control Component Identification Register 0
+ */
+union bdk_gti_wcx_cidr0
+{
+ uint32_t u;
+ struct bdk_gti_wcx_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_cidr0_s cn; */
+};
+typedef union bdk_gti_wcx_cidr0 bdk_gti_wcx_cidr0_t;
+
+static inline uint64_t BDK_GTI_WCX_CIDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_CIDR0(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080ff0ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_CIDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_CIDR0(a) bdk_gti_wcx_cidr0_t
+#define bustype_BDK_GTI_WCX_CIDR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_CIDR0(a) "GTI_WCX_CIDR0"
+#define device_bar_BDK_GTI_WCX_CIDR0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_CIDR0(a) (a)
+#define arguments_BDK_GTI_WCX_CIDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_cidr1
+ *
+ * GTI Watchdog Control Component Identification Register 1
+ */
+union bdk_gti_wcx_cidr1
+{
+ uint32_t u;
+ struct bdk_gti_wcx_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_cidr1_s cn; */
+};
+typedef union bdk_gti_wcx_cidr1 bdk_gti_wcx_cidr1_t;
+
+static inline uint64_t BDK_GTI_WCX_CIDR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_CIDR1(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080ff4ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_CIDR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_CIDR1(a) bdk_gti_wcx_cidr1_t
+#define bustype_BDK_GTI_WCX_CIDR1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_CIDR1(a) "GTI_WCX_CIDR1"
+#define device_bar_BDK_GTI_WCX_CIDR1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_CIDR1(a) (a)
+#define arguments_BDK_GTI_WCX_CIDR1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_cidr2
+ *
+ * GTI Watchdog Control Component Identification Register 2
+ */
+union bdk_gti_wcx_cidr2
+{
+ uint32_t u;
+ struct bdk_gti_wcx_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_cidr2_s cn; */
+};
+typedef union bdk_gti_wcx_cidr2 bdk_gti_wcx_cidr2_t;
+
+static inline uint64_t BDK_GTI_WCX_CIDR2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_CIDR2(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080ff8ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_CIDR2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_CIDR2(a) bdk_gti_wcx_cidr2_t
+#define bustype_BDK_GTI_WCX_CIDR2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_CIDR2(a) "GTI_WCX_CIDR2"
+#define device_bar_BDK_GTI_WCX_CIDR2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_CIDR2(a) (a)
+#define arguments_BDK_GTI_WCX_CIDR2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_cidr3
+ *
+ * GTI Watchdog Control Component Identification Register 3
+ */
+union bdk_gti_wcx_cidr3
+{
+ uint32_t u;
+ struct bdk_gti_wcx_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_cidr3_s cn; */
+};
+typedef union bdk_gti_wcx_cidr3 bdk_gti_wcx_cidr3_t;
+
+static inline uint64_t BDK_GTI_WCX_CIDR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_CIDR3(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080ffcll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_CIDR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_CIDR3(a) bdk_gti_wcx_cidr3_t
+#define bustype_BDK_GTI_WCX_CIDR3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_CIDR3(a) "GTI_WCX_CIDR3"
+#define device_bar_BDK_GTI_WCX_CIDR3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_CIDR3(a) (a)
+#define arguments_BDK_GTI_WCX_CIDR3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_pidr0
+ *
+ * GTI Watchdog Control Peripheral Identification Register 0
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_pidr0
+{
+ uint32_t u;
+ struct bdk_gti_wcx_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_WC. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_WC. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_pidr0_s cn; */
+};
+typedef union bdk_gti_wcx_pidr0 bdk_gti_wcx_pidr0_t;
+
+static inline uint64_t BDK_GTI_WCX_PIDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_PIDR0(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fe0ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_PIDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_PIDR0(a) bdk_gti_wcx_pidr0_t
+#define bustype_BDK_GTI_WCX_PIDR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_PIDR0(a) "GTI_WCX_PIDR0"
+#define device_bar_BDK_GTI_WCX_PIDR0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_PIDR0(a) (a)
+#define arguments_BDK_GTI_WCX_PIDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_pidr1
+ *
+ * GTI Watchdog Control Peripheral Identification Register 1
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_pidr1
+{
+ uint32_t u;
+ struct bdk_gti_wcx_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_pidr1_s cn; */
+};
+typedef union bdk_gti_wcx_pidr1 bdk_gti_wcx_pidr1_t;
+
+static inline uint64_t BDK_GTI_WCX_PIDR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_PIDR1(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fe4ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_PIDR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_PIDR1(a) bdk_gti_wcx_pidr1_t
+#define bustype_BDK_GTI_WCX_PIDR1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_PIDR1(a) "GTI_WCX_PIDR1"
+#define device_bar_BDK_GTI_WCX_PIDR1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_PIDR1(a) (a)
+#define arguments_BDK_GTI_WCX_PIDR1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_pidr2
+ *
+ * GTI Watchdog Control Peripheral Identification Register 2
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_pidr2
+{
+ uint32_t u;
+ struct bdk_gti_wcx_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_pidr2_s cn; */
+};
+typedef union bdk_gti_wcx_pidr2 bdk_gti_wcx_pidr2_t;
+
+static inline uint64_t BDK_GTI_WCX_PIDR2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_PIDR2(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fe8ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_PIDR2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_PIDR2(a) bdk_gti_wcx_pidr2_t
+#define bustype_BDK_GTI_WCX_PIDR2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_PIDR2(a) "GTI_WCX_PIDR2"
+#define device_bar_BDK_GTI_WCX_PIDR2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_PIDR2(a) (a)
+#define arguments_BDK_GTI_WCX_PIDR2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_pidr3
+ *
+ * GTI Watchdog Control Peripheral Identification Register 3
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_pidr3
+{
+ uint32_t u;
+ struct bdk_gti_wcx_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_pidr3_s cn; */
+};
+typedef union bdk_gti_wcx_pidr3 bdk_gti_wcx_pidr3_t;
+
+static inline uint64_t BDK_GTI_WCX_PIDR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_PIDR3(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fecll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_PIDR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_PIDR3(a) bdk_gti_wcx_pidr3_t
+#define bustype_BDK_GTI_WCX_PIDR3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_PIDR3(a) "GTI_WCX_PIDR3"
+#define device_bar_BDK_GTI_WCX_PIDR3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_PIDR3(a) (a)
+#define arguments_BDK_GTI_WCX_PIDR3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_pidr4
+ *
+ * GTI Watchdog Control Peripheral Identification Register 4
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_pidr4
+{
+ uint32_t u;
+ struct bdk_gti_wcx_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_pidr4_s cn; */
+};
+typedef union bdk_gti_wcx_pidr4 bdk_gti_wcx_pidr4_t;
+
+static inline uint64_t BDK_GTI_WCX_PIDR4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_PIDR4(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fd0ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_PIDR4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_PIDR4(a) bdk_gti_wcx_pidr4_t
+#define bustype_BDK_GTI_WCX_PIDR4(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_PIDR4(a) "GTI_WCX_PIDR4"
+#define device_bar_BDK_GTI_WCX_PIDR4(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_PIDR4(a) (a)
+#define arguments_BDK_GTI_WCX_PIDR4(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_pidr5
+ *
+ * GTI Watchdog Control Peripheral Identification Register 5
+ */
+union bdk_gti_wcx_pidr5
+{
+ uint32_t u;
+ struct bdk_gti_wcx_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_pidr5_s cn; */
+};
+typedef union bdk_gti_wcx_pidr5 bdk_gti_wcx_pidr5_t;
+
+static inline uint64_t BDK_GTI_WCX_PIDR5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_PIDR5(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fd4ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_PIDR5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_PIDR5(a) bdk_gti_wcx_pidr5_t
+#define bustype_BDK_GTI_WCX_PIDR5(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_PIDR5(a) "GTI_WCX_PIDR5"
+#define device_bar_BDK_GTI_WCX_PIDR5(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_PIDR5(a) (a)
+#define arguments_BDK_GTI_WCX_PIDR5(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_pidr6
+ *
+ * GTI Watchdog Control Peripheral Identification Register 6
+ */
+union bdk_gti_wcx_pidr6
+{
+ uint32_t u;
+ struct bdk_gti_wcx_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_pidr6_s cn; */
+};
+typedef union bdk_gti_wcx_pidr6 bdk_gti_wcx_pidr6_t;
+
+static inline uint64_t BDK_GTI_WCX_PIDR6(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_PIDR6(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fd8ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_PIDR6", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_PIDR6(a) bdk_gti_wcx_pidr6_t
+#define bustype_BDK_GTI_WCX_PIDR6(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_PIDR6(a) "GTI_WCX_PIDR6"
+#define device_bar_BDK_GTI_WCX_PIDR6(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_PIDR6(a) (a)
+#define arguments_BDK_GTI_WCX_PIDR6(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_pidr7
+ *
+ * GTI Watchdog Control Peripheral Identification Register 7
+ */
+union bdk_gti_wcx_pidr7
+{
+ uint32_t u;
+ struct bdk_gti_wcx_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_pidr7_s cn; */
+};
+typedef union bdk_gti_wcx_pidr7 bdk_gti_wcx_pidr7_t;
+
+static inline uint64_t BDK_GTI_WCX_PIDR7(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_PIDR7(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fdcll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_PIDR7", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_PIDR7(a) bdk_gti_wcx_pidr7_t
+#define bustype_BDK_GTI_WCX_PIDR7(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_PIDR7(a) "GTI_WCX_PIDR7"
+#define device_bar_BDK_GTI_WCX_PIDR7(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_PIDR7(a) (a)
+#define arguments_BDK_GTI_WCX_PIDR7(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_w_iidr
+ *
+ * GTI Watchdog Control Interface Identification Register
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_w_iidr
+{
+ uint32_t u;
+ struct bdk_gti_wcx_w_iidr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+ uint32_t variant : 4; /**< [ 23: 20](RO) Variant field.
+ Note in the SBSA this is defined as part of the product identification.
+ In CNXXXX, the major pass number. */
+ uint32_t arch : 4; /**< [ 19: 16](RO) Architecture revision. 0x0 = SBSA 1.0 watchdogs. */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ In CNXXXX, the minor pass number. */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer: 0x34C = Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer: 0x34C = Cavium. */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ In CNXXXX, the minor pass number. */
+ uint32_t arch : 4; /**< [ 19: 16](RO) Architecture revision. 0x0 = SBSA 1.0 watchdogs. */
+ uint32_t variant : 4; /**< [ 23: 20](RO) Variant field.
+ Note in the SBSA this is defined as part of the product identification.
+ In CNXXXX, the major pass number. */
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_w_iidr_s cn; */
+};
+typedef union bdk_gti_wcx_w_iidr bdk_gti_wcx_w_iidr_t;
+
+static inline uint64_t BDK_GTI_WCX_W_IIDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_W_IIDR(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080fccll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_W_IIDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_W_IIDR(a) bdk_gti_wcx_w_iidr_t
+#define bustype_BDK_GTI_WCX_W_IIDR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_W_IIDR(a) "GTI_WCX_W_IIDR"
+#define device_bar_BDK_GTI_WCX_W_IIDR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_W_IIDR(a) (a)
+#define arguments_BDK_GTI_WCX_W_IIDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_wcs
+ *
+ * GTI Watchdog Control and Status Register
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_wcs
+{
+ uint32_t u;
+ struct bdk_gti_wcx_wcs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t ws1 : 1; /**< [ 2: 2](RO/H) WS1 */
+ uint32_t ws0 : 1; /**< [ 1: 1](RO/H) WS0 */
+ uint32_t en : 1; /**< [ 0: 0](R/W) Enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t en : 1; /**< [ 0: 0](R/W) Enable. */
+ uint32_t ws0 : 1; /**< [ 1: 1](RO/H) WS0 */
+ uint32_t ws1 : 1; /**< [ 2: 2](RO/H) WS1 */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_wcs_s cn; */
+};
+typedef union bdk_gti_wcx_wcs bdk_gti_wcx_wcs_t;
+
+static inline uint64_t BDK_GTI_WCX_WCS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_WCS(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080000ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_WCS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_WCS(a) bdk_gti_wcx_wcs_t
+#define bustype_BDK_GTI_WCX_WCS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_WCS(a) "GTI_WCX_WCS"
+#define device_bar_BDK_GTI_WCX_WCS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_WCS(a) (a)
+#define arguments_BDK_GTI_WCX_WCS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gti_wc#_wcv
+ *
+ * GTI Watchdog Control Compare Value Register
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_wcv
+{
+ uint64_t u;
+ struct bdk_gti_wcx_wcv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wdcv : 64; /**< [ 63: 0](R/W/H) Watchdog compare value. */
+#else /* Word 0 - Little Endian */
+ uint64_t wdcv : 64; /**< [ 63: 0](R/W/H) Watchdog compare value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_wcv_s cn; */
+};
+typedef union bdk_gti_wcx_wcv bdk_gti_wcx_wcv_t;
+
+static inline uint64_t BDK_GTI_WCX_WCV(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_WCV(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080010ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_WCV", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_WCV(a) bdk_gti_wcx_wcv_t
+#define bustype_BDK_GTI_WCX_WCV(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GTI_WCX_WCV(a) "GTI_WCX_WCV"
+#define device_bar_BDK_GTI_WCX_WCV(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_WCV(a) (a)
+#define arguments_BDK_GTI_WCX_WCV(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wc#_wor
+ *
+ * GTI Watchdog Control Offset Register
+ * GTI_WC(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WC(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wcx_wor
+{
+ uint32_t u;
+ struct bdk_gti_wcx_wor_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t offset : 32; /**< [ 31: 0](R/W/H) Watchdog offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t offset : 32; /**< [ 31: 0](R/W/H) Watchdog offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wcx_wor_s cn; */
+};
+typedef union bdk_gti_wcx_wor bdk_gti_wcx_wor_t;
+
+static inline uint64_t BDK_GTI_WCX_WOR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WCX_WOR(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000080008ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WCX_WOR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WCX_WOR(a) bdk_gti_wcx_wor_t
+#define bustype_BDK_GTI_WCX_WOR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WCX_WOR(a) "GTI_WCX_WOR"
+#define device_bar_BDK_GTI_WCX_WOR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WCX_WOR(a) (a)
+#define arguments_BDK_GTI_WCX_WOR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_cidr0
+ *
+ * GTI Watchdog Refresh Component Identification Register 0
+ */
+union bdk_gti_wrx_cidr0
+{
+ uint32_t u;
+ struct bdk_gti_wrx_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_cidr0_s cn; */
+};
+typedef union bdk_gti_wrx_cidr0 bdk_gti_wrx_cidr0_t;
+
+static inline uint64_t BDK_GTI_WRX_CIDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_CIDR0(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090ff0ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_CIDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_CIDR0(a) bdk_gti_wrx_cidr0_t
+#define bustype_BDK_GTI_WRX_CIDR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_CIDR0(a) "GTI_WRX_CIDR0"
+#define device_bar_BDK_GTI_WRX_CIDR0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_CIDR0(a) (a)
+#define arguments_BDK_GTI_WRX_CIDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_cidr1
+ *
+ * GTI Watchdog Refresh Component Identification Register 1
+ */
+union bdk_gti_wrx_cidr1
+{
+ uint32_t u;
+ struct bdk_gti_wrx_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 4; /**< [ 3: 0](RO) Preamble identification value. */
+ uint32_t cclass : 4; /**< [ 7: 4](RO) Component class. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_cidr1_s cn; */
+};
+typedef union bdk_gti_wrx_cidr1 bdk_gti_wrx_cidr1_t;
+
+static inline uint64_t BDK_GTI_WRX_CIDR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_CIDR1(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090ff4ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_CIDR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_CIDR1(a) bdk_gti_wrx_cidr1_t
+#define bustype_BDK_GTI_WRX_CIDR1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_CIDR1(a) "GTI_WRX_CIDR1"
+#define device_bar_BDK_GTI_WRX_CIDR1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_CIDR1(a) (a)
+#define arguments_BDK_GTI_WRX_CIDR1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_cidr2
+ *
+ * GTI Watchdog Refresh Component Identification Register 2
+ */
+union bdk_gti_wrx_cidr2
+{
+ uint32_t u;
+ struct bdk_gti_wrx_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_cidr2_s cn; */
+};
+typedef union bdk_gti_wrx_cidr2 bdk_gti_wrx_cidr2_t;
+
+static inline uint64_t BDK_GTI_WRX_CIDR2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_CIDR2(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090ff8ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_CIDR2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_CIDR2(a) bdk_gti_wrx_cidr2_t
+#define bustype_BDK_GTI_WRX_CIDR2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_CIDR2(a) "GTI_WRX_CIDR2"
+#define device_bar_BDK_GTI_WRX_CIDR2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_CIDR2(a) (a)
+#define arguments_BDK_GTI_WRX_CIDR2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_cidr3
+ *
+ * GTI Watchdog Refresh Component Identification Register 3
+ */
+union bdk_gti_wrx_cidr3
+{
+ uint32_t u;
+ struct bdk_gti_wrx_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_cidr3_s cn; */
+};
+typedef union bdk_gti_wrx_cidr3 bdk_gti_wrx_cidr3_t;
+
+static inline uint64_t BDK_GTI_WRX_CIDR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_CIDR3(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090ffcll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_CIDR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_CIDR3(a) bdk_gti_wrx_cidr3_t
+#define bustype_BDK_GTI_WRX_CIDR3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_CIDR3(a) "GTI_WRX_CIDR3"
+#define device_bar_BDK_GTI_WRX_CIDR3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_CIDR3(a) (a)
+#define arguments_BDK_GTI_WRX_CIDR3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_pidr0
+ *
+ * GTI Watchdog Refresh Peripheral Identification Register 0
+ */
+union bdk_gti_wrx_pidr0
+{
+ uint32_t u;
+ struct bdk_gti_wrx_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GTI_WR. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_pidr0_s cn; */
+};
+typedef union bdk_gti_wrx_pidr0 bdk_gti_wrx_pidr0_t;
+
+static inline uint64_t BDK_GTI_WRX_PIDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_PIDR0(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fe0ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_PIDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_PIDR0(a) bdk_gti_wrx_pidr0_t
+#define bustype_BDK_GTI_WRX_PIDR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_PIDR0(a) "GTI_WRX_PIDR0"
+#define device_bar_BDK_GTI_WRX_PIDR0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_PIDR0(a) (a)
+#define arguments_BDK_GTI_WRX_PIDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_pidr1
+ *
+ * GTI Watchdog Refresh Peripheral Identification Register 1
+ */
+union bdk_gti_wrx_pidr1
+{
+ uint32_t u;
+ struct bdk_gti_wrx_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_pidr1_s cn; */
+};
+typedef union bdk_gti_wrx_pidr1 bdk_gti_wrx_pidr1_t;
+
+static inline uint64_t BDK_GTI_WRX_PIDR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_PIDR1(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fe4ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_PIDR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_PIDR1(a) bdk_gti_wrx_pidr1_t
+#define bustype_BDK_GTI_WRX_PIDR1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_PIDR1(a) "GTI_WRX_PIDR1"
+#define device_bar_BDK_GTI_WRX_PIDR1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_PIDR1(a) (a)
+#define arguments_BDK_GTI_WRX_PIDR1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_pidr2
+ *
+ * GTI Watchdog Refresh Peripheral Identification Register 2
+ */
+union bdk_gti_wrx_pidr2
+{
+ uint32_t u;
+ struct bdk_gti_wrx_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t revision : 4; /**< [ 7: 4](RO) Architectural revision, as assigned by ARM. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_pidr2_s cn; */
+};
+typedef union bdk_gti_wrx_pidr2 bdk_gti_wrx_pidr2_t;
+
+static inline uint64_t BDK_GTI_WRX_PIDR2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_PIDR2(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fe8ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_PIDR2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_PIDR2(a) bdk_gti_wrx_pidr2_t
+#define bustype_BDK_GTI_WRX_PIDR2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_PIDR2(a) "GTI_WRX_PIDR2"
+#define device_bar_BDK_GTI_WRX_PIDR2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_PIDR2(a) (a)
+#define arguments_BDK_GTI_WRX_PIDR2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_pidr3
+ *
+ * GTI Watchdog Refresh Peripheral Identification Register 3
+ */
+union bdk_gti_wrx_pidr3
+{
+ uint32_t u;
+ struct bdk_gti_wrx_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_pidr3_s cn; */
+};
+typedef union bdk_gti_wrx_pidr3 bdk_gti_wrx_pidr3_t;
+
+static inline uint64_t BDK_GTI_WRX_PIDR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_PIDR3(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fecll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_PIDR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_PIDR3(a) bdk_gti_wrx_pidr3_t
+#define bustype_BDK_GTI_WRX_PIDR3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_PIDR3(a) "GTI_WRX_PIDR3"
+#define device_bar_BDK_GTI_WRX_PIDR3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_PIDR3(a) (a)
+#define arguments_BDK_GTI_WRX_PIDR3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_pidr4
+ *
+ * GTI Watchdog Refresh Peripheral Identification Register 4
+ */
+union bdk_gti_wrx_pidr4
+{
+ uint32_t u;
+ struct bdk_gti_wrx_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepcont : 4; /**< [ 3: 0](RO) JEP106 continuation code. Indicates Cavium. */
+ uint32_t pagecnt : 4; /**< [ 7: 4](RO) Number of log-2 4 KB blocks occupied. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_pidr4_s cn; */
+};
+typedef union bdk_gti_wrx_pidr4 bdk_gti_wrx_pidr4_t;
+
+static inline uint64_t BDK_GTI_WRX_PIDR4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_PIDR4(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fd0ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_PIDR4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_PIDR4(a) bdk_gti_wrx_pidr4_t
+#define bustype_BDK_GTI_WRX_PIDR4(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_PIDR4(a) "GTI_WRX_PIDR4"
+#define device_bar_BDK_GTI_WRX_PIDR4(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_PIDR4(a) (a)
+#define arguments_BDK_GTI_WRX_PIDR4(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_pidr5
+ *
+ * GTI Watchdog Refresh Peripheral Identification Register 5
+ */
+union bdk_gti_wrx_pidr5
+{
+ uint32_t u;
+ struct bdk_gti_wrx_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_pidr5_s cn; */
+};
+typedef union bdk_gti_wrx_pidr5 bdk_gti_wrx_pidr5_t;
+
+static inline uint64_t BDK_GTI_WRX_PIDR5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_PIDR5(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fd4ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_PIDR5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_PIDR5(a) bdk_gti_wrx_pidr5_t
+#define bustype_BDK_GTI_WRX_PIDR5(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_PIDR5(a) "GTI_WRX_PIDR5"
+#define device_bar_BDK_GTI_WRX_PIDR5(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_PIDR5(a) (a)
+#define arguments_BDK_GTI_WRX_PIDR5(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_pidr6
+ *
+ * GTI Watchdog Refresh Peripheral Identification Register 6
+ */
+union bdk_gti_wrx_pidr6
+{
+ uint32_t u;
+ struct bdk_gti_wrx_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_pidr6_s cn; */
+};
+typedef union bdk_gti_wrx_pidr6 bdk_gti_wrx_pidr6_t;
+
+static inline uint64_t BDK_GTI_WRX_PIDR6(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_PIDR6(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fd8ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_PIDR6", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_PIDR6(a) bdk_gti_wrx_pidr6_t
+#define bustype_BDK_GTI_WRX_PIDR6(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_PIDR6(a) "GTI_WRX_PIDR6"
+#define device_bar_BDK_GTI_WRX_PIDR6(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_PIDR6(a) (a)
+#define arguments_BDK_GTI_WRX_PIDR6(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_pidr7
+ *
+ * GTI Watchdog Refresh Peripheral Identification Register 7
+ */
+union bdk_gti_wrx_pidr7
+{
+ uint32_t u;
+ struct bdk_gti_wrx_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_pidr7_s cn; */
+};
+typedef union bdk_gti_wrx_pidr7 bdk_gti_wrx_pidr7_t;
+
+static inline uint64_t BDK_GTI_WRX_PIDR7(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_PIDR7(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fdcll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_PIDR7", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_PIDR7(a) bdk_gti_wrx_pidr7_t
+#define bustype_BDK_GTI_WRX_PIDR7(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_PIDR7(a) "GTI_WRX_PIDR7"
+#define device_bar_BDK_GTI_WRX_PIDR7(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_PIDR7(a) (a)
+#define arguments_BDK_GTI_WRX_PIDR7(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_w_iidr
+ *
+ * GTI Watchdog Refresh Interface Identification Register
+ * GTI_WR(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WR(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wrx_w_iidr
+{
+ uint32_t u;
+ struct bdk_gti_wrx_w_iidr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+ uint32_t variant : 4; /**< [ 23: 20](RO) Variant field.
+ Note in the SBSA this is defined as part of the product identification.
+ In CNXXXX, the major pass number. */
+ uint32_t arch : 4; /**< [ 19: 16](RO) Architecture revision. 0x0 = SBSA 1.0 watchdogs. */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ In CNXXXX, the minor pass number. */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer: 0x34C = Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer: 0x34C = Cavium. */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ In CNXXXX, the minor pass number. */
+ uint32_t arch : 4; /**< [ 19: 16](RO) Architecture revision. 0x0 = SBSA 1.0 watchdogs. */
+ uint32_t variant : 4; /**< [ 23: 20](RO) Variant field.
+ Note in the SBSA this is defined as part of the product identification.
+ In CNXXXX, the major pass number. */
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_w_iidr_s cn; */
+};
+typedef union bdk_gti_wrx_w_iidr bdk_gti_wrx_w_iidr_t;
+
+static inline uint64_t BDK_GTI_WRX_W_IIDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_W_IIDR(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090fccll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_W_IIDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_W_IIDR(a) bdk_gti_wrx_w_iidr_t
+#define bustype_BDK_GTI_WRX_W_IIDR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_W_IIDR(a) "GTI_WRX_W_IIDR"
+#define device_bar_BDK_GTI_WRX_W_IIDR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_W_IIDR(a) (a)
+#define arguments_BDK_GTI_WRX_W_IIDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gti_wr#_wrr
+ *
+ * GTI Watchdog Refresh Register
+ * GTI_WR(0) accesses the secure watchdog and is accessible only by the
+ * secure-world. GTI_WR(1) accesses the nonsecure watchdog.
+ */
+union bdk_gti_wrx_wrr
+{
+ uint32_t u;
+ struct bdk_gti_wrx_wrr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t zero : 32; /**< [ 31: 0](WO) Watchdog refresh. */
+#else /* Word 0 - Little Endian */
+ uint32_t zero : 32; /**< [ 31: 0](WO) Watchdog refresh. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gti_wrx_wrr_s cn; */
+};
+typedef union bdk_gti_wrx_wrr bdk_gti_wrx_wrr_t;
+
+static inline uint64_t BDK_GTI_WRX_WRR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GTI_WRX_WRR(unsigned long a)
+{
+ if (a<=1)
+ return 0x844000090000ll + 0x20000ll * ((a) & 0x1);
+ __bdk_csr_fatal("GTI_WRX_WRR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GTI_WRX_WRR(a) bdk_gti_wrx_wrr_t
+#define bustype_BDK_GTI_WRX_WRR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GTI_WRX_WRR(a) "GTI_WRX_WRR"
+#define device_bar_BDK_GTI_WRX_WRR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GTI_WRX_WRR(a) (a)
+#define arguments_BDK_GTI_WRX_WRR(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_GTI_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c.h
new file mode 100644
index 0000000000..3b32bc59f7
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c.h
@@ -0,0 +1,2637 @@
+#ifndef __BDK_CSRS_L2C_H__
+#define __BDK_CSRS_L2C_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium L2C.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration inv_cmd_e
+ *
+ * INTERNAL: INV Command Enumeration
+ *
+ * Enumerates the different INV command encodings.
+ */
+#define BDK_INV_CMD_E_ALLEX (0xc)
+#define BDK_INV_CMD_E_ASIDE1 (0xd)
+#define BDK_INV_CMD_E_GBLSYNC (0xf)
+#define BDK_INV_CMD_E_IALLU (9)
+#define BDK_INV_CMD_E_INV (8)
+#define BDK_INV_CMD_E_IPAS2E1 (7)
+#define BDK_INV_CMD_E_IVAU (4)
+#define BDK_INV_CMD_E_NOP (0)
+#define BDK_INV_CMD_E_SEV (0xe)
+#define BDK_INV_CMD_E_VAAE1 (6)
+#define BDK_INV_CMD_E_VAEX (5)
+#define BDK_INV_CMD_E_VMALLE1 (0xa)
+#define BDK_INV_CMD_E_VMALLS12 (0xb)
+
+/**
+ * Enumeration ioc_cmd_e
+ *
+ * INTERNAL: IOC Command Enumeration
+ *
+ * Enumerates the different IOC command encodings.
+ */
+#define BDK_IOC_CMD_E_ADDR (6)
+#define BDK_IOC_CMD_E_IAADD (0xc)
+#define BDK_IOC_CMD_E_IACAS (0xa)
+#define BDK_IOC_CMD_E_IACLR (0xd)
+#define BDK_IOC_CMD_E_IASET (0xe)
+#define BDK_IOC_CMD_E_IASWP (0xb)
+#define BDK_IOC_CMD_E_IDLE (0)
+#define BDK_IOC_CMD_E_LMTST (3)
+#define BDK_IOC_CMD_E_LOAD (2)
+#define BDK_IOC_CMD_E_SLILD (8)
+#define BDK_IOC_CMD_E_SLIST (7)
+#define BDK_IOC_CMD_E_STORE (1)
+#define BDK_IOC_CMD_E_STOREP (9)
+
+/**
+ * Enumeration ior_cmd_e
+ *
+ * INTERNAL: IOR Command Enumeration
+ *
+ * Enumerates the different IOR command encodings.
+ */
+#define BDK_IOR_CMD_E_DATA (1)
+#define BDK_IOR_CMD_E_IDLE (0)
+#define BDK_IOR_CMD_E_SLIRSP (3)
+
+/**
+ * Enumeration l2c_bar_e
+ *
+ * L2C Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_L2C_BAR_E_L2C_PF_BAR0 (0x87e080800000ll)
+#define BDK_L2C_BAR_E_L2C_PF_BAR0_SIZE 0x800000ull
+
+/**
+ * Enumeration l2c_dat_errprio_e
+ *
+ * L2C Quad Error Priority Enumeration
+ * Enumerates the different quad error priorities.
+ */
+#define BDK_L2C_DAT_ERRPRIO_E_FBFDBE (4)
+#define BDK_L2C_DAT_ERRPRIO_E_FBFSBE (1)
+#define BDK_L2C_DAT_ERRPRIO_E_L2DDBE (6)
+#define BDK_L2C_DAT_ERRPRIO_E_L2DSBE (3)
+#define BDK_L2C_DAT_ERRPRIO_E_NBE (0)
+#define BDK_L2C_DAT_ERRPRIO_E_SBFDBE (5)
+#define BDK_L2C_DAT_ERRPRIO_E_SBFSBE (2)
+
+/**
+ * Enumeration l2c_tad_prf_sel_e
+ *
+ * L2C TAD Performance Counter Select Enumeration
+ * Enumerates the different TAD performance counter selects.
+ */
+#define BDK_L2C_TAD_PRF_SEL_E_L2T_HIT (1)
+#define BDK_L2C_TAD_PRF_SEL_E_L2T_MISS (2)
+#define BDK_L2C_TAD_PRF_SEL_E_L2T_NOALLOC (3)
+#define BDK_L2C_TAD_PRF_SEL_E_L2_OPEN_OCI (0x48)
+#define BDK_L2C_TAD_PRF_SEL_E_L2_RTG_VIC (0x44)
+#define BDK_L2C_TAD_PRF_SEL_E_L2_VIC (4)
+#define BDK_L2C_TAD_PRF_SEL_E_LFB_OCC (7)
+#define BDK_L2C_TAD_PRF_SEL_E_LMC_WR (0x4e)
+#define BDK_L2C_TAD_PRF_SEL_E_LMC_WR_SBLKDTY (0x4f)
+#define BDK_L2C_TAD_PRF_SEL_E_LOOKUP (0x40)
+#define BDK_L2C_TAD_PRF_SEL_E_LOOKUP_ALL (0x44)
+#define BDK_L2C_TAD_PRF_SEL_E_LOOKUP_MIB (0x43)
+#define BDK_L2C_TAD_PRF_SEL_E_LOOKUP_XMC_LCL (0x41)
+#define BDK_L2C_TAD_PRF_SEL_E_LOOKUP_XMC_RMT (0x42)
+#define BDK_L2C_TAD_PRF_SEL_E_NONE (0)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_FLDX_TAG_E_DAT (0x6d)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_FLDX_TAG_E_NODAT (0x6c)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_FWD_CYC_HIT (0x69)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_FWD_RACE (0x6a)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_HAKS (0x6b)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RLDD (0x6e)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RLDD_PEMD (0x6f)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RRQ_DAT_CNT (0x70)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RRQ_DAT_DMASK (0x71)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RSP_DAT_CNT (0x72)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RSP_DAT_DMASK (0x73)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RSP_DAT_VICD_CNT (0x74)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RSP_DAT_VICD_DMASK (0x75)
+#define BDK_L2C_TAD_PRF_SEL_E_OCI_RTG_WAIT (0x68)
+#define BDK_L2C_TAD_PRF_SEL_E_OPEN_CCPI (0xa)
+#define BDK_L2C_TAD_PRF_SEL_E_QDX_BNKS(a) (0x82 + 0x10 * (a))
+#define BDK_L2C_TAD_PRF_SEL_E_QDX_IDX(a) (0x80 + 0x10 * (a))
+#define BDK_L2C_TAD_PRF_SEL_E_QDX_RDAT(a) (0x81 + 0x10 * (a))
+#define BDK_L2C_TAD_PRF_SEL_E_QDX_WDAT(a) (0x83 + 0x10 * (a))
+#define BDK_L2C_TAD_PRF_SEL_E_RTG_ALC (0x5d)
+#define BDK_L2C_TAD_PRF_SEL_E_RTG_ALC_EVICT (0x76)
+#define BDK_L2C_TAD_PRF_SEL_E_RTG_ALC_HIT (0x5e)
+#define BDK_L2C_TAD_PRF_SEL_E_RTG_ALC_HITWB (0x5f)
+#define BDK_L2C_TAD_PRF_SEL_E_RTG_ALC_VIC (0x77)
+#define BDK_L2C_TAD_PRF_SEL_E_RTG_HIT (0x41)
+#define BDK_L2C_TAD_PRF_SEL_E_RTG_MISS (0x42)
+#define BDK_L2C_TAD_PRF_SEL_E_SC_FAIL (5)
+#define BDK_L2C_TAD_PRF_SEL_E_SC_PASS (6)
+#define BDK_L2C_TAD_PRF_SEL_E_STC_LCL (0x64)
+#define BDK_L2C_TAD_PRF_SEL_E_STC_LCL_FAIL (0x65)
+#define BDK_L2C_TAD_PRF_SEL_E_STC_RMT (0x62)
+#define BDK_L2C_TAD_PRF_SEL_E_STC_RMT_FAIL (0x63)
+#define BDK_L2C_TAD_PRF_SEL_E_STC_TOTAL (0x60)
+#define BDK_L2C_TAD_PRF_SEL_E_STC_TOTAL_FAIL (0x61)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_HIT (0x48)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_LCL_CLNVIC (0x59)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_LCL_DTYVIC (0x5a)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_LCL_EVICT (0x58)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_MISS (0x49)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_NALC (0x4a)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_RMT_EVICT (0x5b)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_RMT_VIC (0x5c)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_RTG_HIT (0x50)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_RTG_HITE (0x51)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_RTG_HITS (0x52)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_ALC_RTG_MISS (0x53)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_NALC_HIT (0x4b)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_NALC_MISS (0x4c)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_NALC_RTG_HIT (0x54)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_NALC_RTG_HITE (0x56)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_NALC_RTG_HITS (0x57)
+#define BDK_L2C_TAD_PRF_SEL_E_TAG_NALC_RTG_MISS (0x55)
+#define BDK_L2C_TAD_PRF_SEL_E_WAIT_LFB (8)
+#define BDK_L2C_TAD_PRF_SEL_E_WAIT_VAB (9)
+
+/**
+ * Enumeration l2c_tag_errprio_e
+ *
+ * L2C Tag Error Priority Enumeration
+ * Enumerates the different TAG error priorities.
+ */
+#define BDK_L2C_TAG_ERRPRIO_E_DBE (3)
+#define BDK_L2C_TAG_ERRPRIO_E_NBE (0)
+#define BDK_L2C_TAG_ERRPRIO_E_NOWAY (1)
+#define BDK_L2C_TAG_ERRPRIO_E_SBE (2)
+
+/**
+ * Enumeration oci_ireq_cmd_e
+ *
+ * INTERNAL: OCI IREQ Command Enumeration
+ */
+#define BDK_OCI_IREQ_CMD_E_IAADD (0x10)
+#define BDK_OCI_IREQ_CMD_E_IACAS (0x15)
+#define BDK_OCI_IREQ_CMD_E_IACLR (0x12)
+#define BDK_OCI_IREQ_CMD_E_IASET (0x13)
+#define BDK_OCI_IREQ_CMD_E_IASWP (0x14)
+#define BDK_OCI_IREQ_CMD_E_IDLE (0x1f)
+#define BDK_OCI_IREQ_CMD_E_IOBADDR (6)
+#define BDK_OCI_IREQ_CMD_E_IOBADDRA (7)
+#define BDK_OCI_IREQ_CMD_E_IOBLD (0)
+#define BDK_OCI_IREQ_CMD_E_IOBST (2)
+#define BDK_OCI_IREQ_CMD_E_IOBSTA (3)
+#define BDK_OCI_IREQ_CMD_E_IOBSTP (4)
+#define BDK_OCI_IREQ_CMD_E_IOBSTPA (5)
+#define BDK_OCI_IREQ_CMD_E_LMTST (8)
+#define BDK_OCI_IREQ_CMD_E_LMTSTA (9)
+#define BDK_OCI_IREQ_CMD_E_SLILD (0x1c)
+#define BDK_OCI_IREQ_CMD_E_SLIST (0x1d)
+
+/**
+ * Enumeration oci_irsp_cmd_e
+ *
+ * INTERNAL: OCI IRSP Command Enumeration
+ */
+#define BDK_OCI_IRSP_CMD_E_IDLE (0x1f)
+#define BDK_OCI_IRSP_CMD_E_IOBACK (1)
+#define BDK_OCI_IRSP_CMD_E_IOBRSP (0)
+#define BDK_OCI_IRSP_CMD_E_SLIRSP (2)
+
+/**
+ * Enumeration oci_mfwd_cmd_e
+ *
+ * INTERNAL: OCI MFWD Command Enumeration
+ */
+#define BDK_OCI_MFWD_CMD_E_FEVX_EH (0xb)
+#define BDK_OCI_MFWD_CMD_E_FEVX_OH (0xc)
+#define BDK_OCI_MFWD_CMD_E_FLDRO_E (0)
+#define BDK_OCI_MFWD_CMD_E_FLDRO_O (1)
+#define BDK_OCI_MFWD_CMD_E_FLDRS_E (2)
+#define BDK_OCI_MFWD_CMD_E_FLDRS_EH (4)
+#define BDK_OCI_MFWD_CMD_E_FLDRS_O (3)
+#define BDK_OCI_MFWD_CMD_E_FLDRS_OH (5)
+#define BDK_OCI_MFWD_CMD_E_FLDT_E (6)
+#define BDK_OCI_MFWD_CMD_E_FLDX_E (7)
+#define BDK_OCI_MFWD_CMD_E_FLDX_EH (9)
+#define BDK_OCI_MFWD_CMD_E_FLDX_O (8)
+#define BDK_OCI_MFWD_CMD_E_FLDX_OH (0xa)
+#define BDK_OCI_MFWD_CMD_E_IDLE (0x1f)
+#define BDK_OCI_MFWD_CMD_E_SINV (0xd)
+#define BDK_OCI_MFWD_CMD_E_SINV_H (0xe)
+
+/**
+ * Enumeration oci_mreq_cmd_e
+ *
+ * INTERNAL: OCI MREQ Command Enumeration
+ */
+#define BDK_OCI_MREQ_CMD_E_GINV (0x14)
+#define BDK_OCI_MREQ_CMD_E_GSYNC (0x18)
+#define BDK_OCI_MREQ_CMD_E_IDLE (0x1f)
+#define BDK_OCI_MREQ_CMD_E_RADD (0xd)
+#define BDK_OCI_MREQ_CMD_E_RC2D_O (6)
+#define BDK_OCI_MREQ_CMD_E_RC2D_S (7)
+#define BDK_OCI_MREQ_CMD_E_RCAS (0x13)
+#define BDK_OCI_MREQ_CMD_E_RCAS_O (0x15)
+#define BDK_OCI_MREQ_CMD_E_RCAS_S (0x16)
+#define BDK_OCI_MREQ_CMD_E_RCLR (0x12)
+#define BDK_OCI_MREQ_CMD_E_RDEC (0xf)
+#define BDK_OCI_MREQ_CMD_E_REOR (0xb)
+#define BDK_OCI_MREQ_CMD_E_RINC (0xe)
+#define BDK_OCI_MREQ_CMD_E_RLDD (0)
+#define BDK_OCI_MREQ_CMD_E_RLDI (1)
+#define BDK_OCI_MREQ_CMD_E_RLDT (2)
+#define BDK_OCI_MREQ_CMD_E_RLDWB (4)
+#define BDK_OCI_MREQ_CMD_E_RLDX (5)
+#define BDK_OCI_MREQ_CMD_E_RLDY (3)
+#define BDK_OCI_MREQ_CMD_E_RSET (0x11)
+#define BDK_OCI_MREQ_CMD_E_RSMAX (0x1b)
+#define BDK_OCI_MREQ_CMD_E_RSMIN (0x1c)
+#define BDK_OCI_MREQ_CMD_E_RSTC (0x17)
+#define BDK_OCI_MREQ_CMD_E_RSTC_O (0x19)
+#define BDK_OCI_MREQ_CMD_E_RSTC_S (0x1a)
+#define BDK_OCI_MREQ_CMD_E_RSTP (0xa)
+#define BDK_OCI_MREQ_CMD_E_RSTT (8)
+#define BDK_OCI_MREQ_CMD_E_RSTY (9)
+#define BDK_OCI_MREQ_CMD_E_RSWP (0x10)
+#define BDK_OCI_MREQ_CMD_E_RUMAX (0x1d)
+#define BDK_OCI_MREQ_CMD_E_RUMIN (0x1e)
+
+/**
+ * Enumeration oci_mrsp_cmd_e
+ *
+ * INTERNAL: OCI MRSP Command Enumeration
+ */
+#define BDK_OCI_MRSP_CMD_E_GSDN (0x18)
+#define BDK_OCI_MRSP_CMD_E_HAKD (4)
+#define BDK_OCI_MRSP_CMD_E_HAKI (6)
+#define BDK_OCI_MRSP_CMD_E_HAKN_S (5)
+#define BDK_OCI_MRSP_CMD_E_HAKS (7)
+#define BDK_OCI_MRSP_CMD_E_HAKV (8)
+#define BDK_OCI_MRSP_CMD_E_IDLE (0x1f)
+#define BDK_OCI_MRSP_CMD_E_P2DF (0xd)
+#define BDK_OCI_MRSP_CMD_E_PACK (0xc)
+#define BDK_OCI_MRSP_CMD_E_PATM (0xb)
+#define BDK_OCI_MRSP_CMD_E_PEMD (0xa)
+#define BDK_OCI_MRSP_CMD_E_PSHA (9)
+#define BDK_OCI_MRSP_CMD_E_VICC (1)
+#define BDK_OCI_MRSP_CMD_E_VICD (0)
+#define BDK_OCI_MRSP_CMD_E_VICDHI (3)
+#define BDK_OCI_MRSP_CMD_E_VICS (2)
+
+/**
+ * Enumeration rsc_cmd_e
+ *
+ * INTERNAL: RSC Command Enumeration
+ *
+ * Enumerates the different RSC command encodings.
+ */
+#define BDK_RSC_CMD_E_FLDN (3)
+#define BDK_RSC_CMD_E_GSDN (2)
+#define BDK_RSC_CMD_E_IACK (5)
+#define BDK_RSC_CMD_E_IFDN (1)
+#define BDK_RSC_CMD_E_NOP (0)
+#define BDK_RSC_CMD_E_SCDN (6)
+#define BDK_RSC_CMD_E_SCFL (7)
+#define BDK_RSC_CMD_E_STDN (4)
+
+/**
+ * Enumeration xmc_cmd_e
+ *
+ * INTERNAL: XMC Command Enumeration
+ *
+ * Enumerates the different XMC command encodings.
+ */
+#define BDK_XMC_CMD_E_AADD (0x28)
+#define BDK_XMC_CMD_E_ACAS (0x26)
+#define BDK_XMC_CMD_E_ACLR (0x29)
+#define BDK_XMC_CMD_E_ADEC (0x25)
+#define BDK_XMC_CMD_E_AEOR (0x2a)
+#define BDK_XMC_CMD_E_AINC (0x24)
+#define BDK_XMC_CMD_E_ALLEX (0x3c)
+#define BDK_XMC_CMD_E_ASET (0x2b)
+#define BDK_XMC_CMD_E_ASIDE1 (0x3d)
+#define BDK_XMC_CMD_E_ASMAX (0x2c)
+#define BDK_XMC_CMD_E_ASMIN (0x2d)
+#define BDK_XMC_CMD_E_ASWP (0x27)
+#define BDK_XMC_CMD_E_AUMAX (0x2e)
+#define BDK_XMC_CMD_E_AUMIN (0x2f)
+#define BDK_XMC_CMD_E_DWB (5)
+#define BDK_XMC_CMD_E_GBLSYNC (0x3f)
+#define BDK_XMC_CMD_E_IAADD (0x68)
+#define BDK_XMC_CMD_E_IACAS (0x66)
+#define BDK_XMC_CMD_E_IACLR (0x69)
+#define BDK_XMC_CMD_E_IALLU (0x39)
+#define BDK_XMC_CMD_E_IASET (0x6b)
+#define BDK_XMC_CMD_E_IASWP (0x67)
+#define BDK_XMC_CMD_E_INVL2 (0x1c)
+#define BDK_XMC_CMD_E_IOBADDR (0x43)
+#define BDK_XMC_CMD_E_IOBADDRA (0x53)
+#define BDK_XMC_CMD_E_IOBLD (0x40)
+#define BDK_XMC_CMD_E_IOBST (0x41)
+#define BDK_XMC_CMD_E_IOBSTA (0x51)
+#define BDK_XMC_CMD_E_IOBSTP (0x42)
+#define BDK_XMC_CMD_E_IOBSTPA (0x52)
+#define BDK_XMC_CMD_E_IPAS2E1 (0x37)
+#define BDK_XMC_CMD_E_IVAU (0x34)
+#define BDK_XMC_CMD_E_LCKL2 (0x1f)
+#define BDK_XMC_CMD_E_LDD (8)
+#define BDK_XMC_CMD_E_LDDT (0xc)
+#define BDK_XMC_CMD_E_LDE (0xb)
+#define BDK_XMC_CMD_E_LDI (2)
+#define BDK_XMC_CMD_E_LDP (7)
+#define BDK_XMC_CMD_E_LDT (1)
+#define BDK_XMC_CMD_E_LDWB (0xd)
+#define BDK_XMC_CMD_E_LDY (6)
+#define BDK_XMC_CMD_E_LMTST (0x45)
+#define BDK_XMC_CMD_E_LMTSTA (0x55)
+#define BDK_XMC_CMD_E_LTGL2I (0x19)
+#define BDK_XMC_CMD_E_NOP (0)
+#define BDK_XMC_CMD_E_PL2 (3)
+#define BDK_XMC_CMD_E_PL2T (0x16)
+#define BDK_XMC_CMD_E_PS2 (0xa)
+#define BDK_XMC_CMD_E_PS2T (0x17)
+#define BDK_XMC_CMD_E_PSL1 (9)
+#define BDK_XMC_CMD_E_RPL2 (4)
+#define BDK_XMC_CMD_E_RSTP (0xf)
+#define BDK_XMC_CMD_E_SEV (0x3e)
+#define BDK_XMC_CMD_E_STC (0x13)
+#define BDK_XMC_CMD_E_STF (0x10)
+#define BDK_XMC_CMD_E_STFIL1 (0x14)
+#define BDK_XMC_CMD_E_STGL2I (0x1a)
+#define BDK_XMC_CMD_E_STP (0x12)
+#define BDK_XMC_CMD_E_STT (0x11)
+#define BDK_XMC_CMD_E_STTIL1 (0x15)
+#define BDK_XMC_CMD_E_STY (0xe)
+#define BDK_XMC_CMD_E_VAAE1 (0x36)
+#define BDK_XMC_CMD_E_VAEX (0x35)
+#define BDK_XMC_CMD_E_VMALLE1 (0x3a)
+#define BDK_XMC_CMD_E_VMALLS12 (0x3b)
+#define BDK_XMC_CMD_E_WBIL2 (0x1d)
+#define BDK_XMC_CMD_E_WBIL2I (0x18)
+#define BDK_XMC_CMD_E_WBL2 (0x1e)
+#define BDK_XMC_CMD_E_WBL2I (0x1b)
+
+/**
+ * Register (RSL) l2c_asc_region#_attr
+ *
+ * L2C Address Space Control Region Attributes Registers
+ */
+union bdk_l2c_asc_regionx_attr
+{
+ uint64_t u;
+ struct bdk_l2c_asc_regionx_attr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t s_en : 1; /**< [ 1: 1](R/W) Enables secure access to region.
+ Undefined if both [S_EN] and [NS_EN] are set for the same region. */
+ uint64_t ns_en : 1; /**< [ 0: 0](R/W) Enables nonsecure access to region.
+ Undefined if both [S_EN] and [NS_EN] are set for the same region.
+ See also DFA_ASC_REGION()_ATTR[NS_EN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ns_en : 1; /**< [ 0: 0](R/W) Enables nonsecure access to region.
+ Undefined if both [S_EN] and [NS_EN] are set for the same region.
+ See also DFA_ASC_REGION()_ATTR[NS_EN]. */
+ uint64_t s_en : 1; /**< [ 1: 1](R/W) Enables secure access to region.
+ Undefined if both [S_EN] and [NS_EN] are set for the same region. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_asc_regionx_attr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t s_en : 1; /**< [ 1: 1](R/W) Enables secure access to region.
+ Undefined if both [S_EN] and [NS_EN] are set for the same region. */
+ uint64_t ns_en : 1; /**< [ 0: 0](R/W) Enables nonsecure access to region.
+ Undefined if both [S_EN] and [NS_EN] are set for the same region.
+
+ Internal:
+ See also DFA_ASC_REGION()_ATTR[NS_EN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ns_en : 1; /**< [ 0: 0](R/W) Enables nonsecure access to region.
+ Undefined if both [S_EN] and [NS_EN] are set for the same region.
+
+ Internal:
+ See also DFA_ASC_REGION()_ATTR[NS_EN]. */
+ uint64_t s_en : 1; /**< [ 1: 1](R/W) Enables secure access to region.
+ Undefined if both [S_EN] and [NS_EN] are set for the same region. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_l2c_asc_regionx_attr_s cn88xx; */
+ /* struct bdk_l2c_asc_regionx_attr_cn81xx cn83xx; */
+};
+typedef union bdk_l2c_asc_regionx_attr bdk_l2c_asc_regionx_attr_t;
+
+static inline uint64_t BDK_L2C_ASC_REGIONX_ATTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_ASC_REGIONX_ATTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=3))
+ return 0x87e080801010ll + 0x40ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_ASC_REGIONX_ATTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_ASC_REGIONX_ATTR(a) bdk_l2c_asc_regionx_attr_t
+#define bustype_BDK_L2C_ASC_REGIONX_ATTR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_ASC_REGIONX_ATTR(a) "L2C_ASC_REGIONX_ATTR"
+#define device_bar_BDK_L2C_ASC_REGIONX_ATTR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_ASC_REGIONX_ATTR(a) (a)
+#define arguments_BDK_L2C_ASC_REGIONX_ATTR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_asc_region#_end
+ *
+ * L2C Address Space Control Region End Address Registers
+ */
+union bdk_l2c_asc_regionx_end
+{
+ uint64_t u;
+ struct bdk_l2c_asc_regionx_end_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t addr : 20; /**< [ 39: 20](R/W) Node-local physical address \<39:20\> marking the inclusive end of the corresponding ASC
+ region.
+ Note that the region includes this address.
+ Software must ensure that regions do not overlap.
+ To specify an empty region, clear both the [S_EN] and [NS_EN] fields of
+ the corresponding L2C_ASC_REGION()_ATTR register. */
+ uint64_t reserved_0_19 : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_19 : 20;
+ uint64_t addr : 20; /**< [ 39: 20](R/W) Node-local physical address \<39:20\> marking the inclusive end of the corresponding ASC
+ region.
+ Note that the region includes this address.
+ Software must ensure that regions do not overlap.
+ To specify an empty region, clear both the [S_EN] and [NS_EN] fields of
+ the corresponding L2C_ASC_REGION()_ATTR register. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_asc_regionx_end_s cn; */
+};
+typedef union bdk_l2c_asc_regionx_end bdk_l2c_asc_regionx_end_t;
+
+static inline uint64_t BDK_L2C_ASC_REGIONX_END(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_ASC_REGIONX_END(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=3))
+ return 0x87e080801008ll + 0x40ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_ASC_REGIONX_END", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_ASC_REGIONX_END(a) bdk_l2c_asc_regionx_end_t
+#define bustype_BDK_L2C_ASC_REGIONX_END(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_ASC_REGIONX_END(a) "L2C_ASC_REGIONX_END"
+#define device_bar_BDK_L2C_ASC_REGIONX_END(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_ASC_REGIONX_END(a) (a)
+#define arguments_BDK_L2C_ASC_REGIONX_END(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_asc_region#_start
+ *
+ * L2C Address Space Control Region Start Address Registers
+ */
+union bdk_l2c_asc_regionx_start
+{
+ uint64_t u;
+ struct bdk_l2c_asc_regionx_start_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t addr : 20; /**< [ 39: 20](R/W) Node-local physical address \<39:20\> marking the start of the corresponding ASC region.
+ Software must ensure that regions do not overlap. */
+ uint64_t reserved_0_19 : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_19 : 20;
+ uint64_t addr : 20; /**< [ 39: 20](R/W) Node-local physical address \<39:20\> marking the start of the corresponding ASC region.
+ Software must ensure that regions do not overlap. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_asc_regionx_start_s cn; */
+};
+typedef union bdk_l2c_asc_regionx_start bdk_l2c_asc_regionx_start_t;
+
+static inline uint64_t BDK_L2C_ASC_REGIONX_START(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_ASC_REGIONX_START(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=3))
+ return 0x87e080801000ll + 0x40ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_ASC_REGIONX_START", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_ASC_REGIONX_START(a) bdk_l2c_asc_regionx_start_t
+#define bustype_BDK_L2C_ASC_REGIONX_START(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_ASC_REGIONX_START(a) "L2C_ASC_REGIONX_START"
+#define device_bar_BDK_L2C_ASC_REGIONX_START(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_ASC_REGIONX_START(a) (a)
+#define arguments_BDK_L2C_ASC_REGIONX_START(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_bist_status
+ *
+ * L2C CBC BIST Status Registers
+ */
+union bdk_l2c_cbcx_bist_status
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t mibfl : 5; /**< [ 36: 32](RO/H) BIST failure status for various MIB memories. ({XMD, IPM, IRM, MXD, MXN}) */
+ uint64_t rsdfl : 32; /**< [ 31: 0](RO/H) BIST failure status for RSDQW0-31. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdfl : 32; /**< [ 31: 0](RO/H) BIST failure status for RSDQW0-31. */
+ uint64_t mibfl : 5; /**< [ 36: 32](RO/H) BIST failure status for various MIB memories. ({XMD, IPM, IRM, MXD, MXN}) */
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_bist_status_s cn; */
+};
+typedef union bdk_l2c_cbcx_bist_status bdk_l2c_cbcx_bist_status_t;
+
+static inline uint64_t BDK_L2C_CBCX_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0580a0000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0580a0000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0580a0000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_BIST_STATUS(a) bdk_l2c_cbcx_bist_status_t
+#define bustype_BDK_L2C_CBCX_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_BIST_STATUS(a) "L2C_CBCX_BIST_STATUS"
+#define device_bar_BDK_L2C_CBCX_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_BIST_STATUS(a) (a)
+#define arguments_BDK_L2C_CBCX_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_dll
+ *
+ * L2C CBC DLL Observability Register
+ * Register for DLL observability.
+ */
+union bdk_l2c_cbcx_dll
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_dll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t max_dll_setting : 12; /**< [ 59: 48](RO/H) Max reported DLL setting. */
+ uint64_t min_dll_setting : 12; /**< [ 47: 36](RO/H) Min reported DLL setting. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 35: 35](RO/H) Phase detector output. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 34: 34](RO/H) Phase detector output. */
+ uint64_t pdr_rclk_refclk : 1; /**< [ 33: 33](RO/H) Phase detector output. */
+ uint64_t reserved_32 : 1;
+ uint64_t dly_elem_enable : 16; /**< [ 31: 16](RO/H) Delay element enable. */
+ uint64_t dll_setting : 12; /**< [ 15: 4](RO/H) DLL setting. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t dll_lock : 1; /**< [ 0: 0](RO/H) DLL locked. */
+#else /* Word 0 - Little Endian */
+ uint64_t dll_lock : 1; /**< [ 0: 0](RO/H) DLL locked. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t dll_setting : 12; /**< [ 15: 4](RO/H) DLL setting. */
+ uint64_t dly_elem_enable : 16; /**< [ 31: 16](RO/H) Delay element enable. */
+ uint64_t reserved_32 : 1;
+ uint64_t pdr_rclk_refclk : 1; /**< [ 33: 33](RO/H) Phase detector output. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 34: 34](RO/H) Phase detector output. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 35: 35](RO/H) Phase detector output. */
+ uint64_t min_dll_setting : 12; /**< [ 47: 36](RO/H) Min reported DLL setting. */
+ uint64_t max_dll_setting : 12; /**< [ 59: 48](RO/H) Max reported DLL setting. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_dll_s cn; */
+};
+typedef union bdk_l2c_cbcx_dll bdk_l2c_cbcx_dll_t;
+
+static inline uint64_t BDK_L2C_CBCX_DLL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_DLL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058040000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058040000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058040000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_DLL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_DLL(a) bdk_l2c_cbcx_dll_t
+#define bustype_BDK_L2C_CBCX_DLL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_DLL(a) "L2C_CBCX_DLL"
+#define device_bar_BDK_L2C_CBCX_DLL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_DLL(a) (a)
+#define arguments_BDK_L2C_CBCX_DLL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_iocerr
+ *
+ * L2C CBC Error Information Registers
+ * Reserved.
+ */
+union bdk_l2c_cbcx_iocerr
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_iocerr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_iocerr_s cn; */
+};
+typedef union bdk_l2c_cbcx_iocerr bdk_l2c_cbcx_iocerr_t;
+
+static inline uint64_t BDK_L2C_CBCX_IOCERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_IOCERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058080010ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058080010ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058080010ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_IOCERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_IOCERR(a) bdk_l2c_cbcx_iocerr_t
+#define bustype_BDK_L2C_CBCX_IOCERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_IOCERR(a) "L2C_CBCX_IOCERR"
+#define device_bar_BDK_L2C_CBCX_IOCERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_IOCERR(a) (a)
+#define arguments_BDK_L2C_CBCX_IOCERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_iodisocierr
+ *
+ * L2C CBC IODISOCI Error Information Registers
+ * This register records error information associated with IORDDISOCI/IOWRDISOCI interrupts.
+ * IOWRDISOCI events take priority over previously captured IORDDISOCI events. Of the available
+ * I/O transactions, some commands will either set [IORDDISOCI], set [IOWRDISOCI], or set both
+ * [IORDDISOCI] and [IOWRDISOCI]. See L2C_CBC()_INT_W1C for information about which I/O
+ * transactions
+ * may result in IORDDISOCI/IOWRDISOCI interrupts.
+ */
+union bdk_l2c_cbcx_iodisocierr
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_iodisocierr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t iorddisoci : 1; /**< [ 63: 63](RO/H) Logged information is for a IORDDISOCI error. */
+ uint64_t iowrdisoci : 1; /**< [ 62: 62](RO/H) Logged information is for a IOWRDISOCI error. */
+ uint64_t reserved_59_61 : 3;
+ uint64_t cmd : 7; /**< [ 58: 52](RO/H) Encoding of XMC command.
+ Internal:
+ Enumerated by XMC_CMD_E. */
+ uint64_t ppvid : 6; /**< [ 51: 46](RO/H) CMB source PPVID. */
+ uint64_t node : 2; /**< [ 45: 44](RO/H) Destination node ID. */
+ uint64_t did : 8; /**< [ 43: 36](RO/H) Destination device ID. */
+ uint64_t addr : 36; /**< [ 35: 0](RO/H) I/O address. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 36; /**< [ 35: 0](RO/H) I/O address. */
+ uint64_t did : 8; /**< [ 43: 36](RO/H) Destination device ID. */
+ uint64_t node : 2; /**< [ 45: 44](RO/H) Destination node ID. */
+ uint64_t ppvid : 6; /**< [ 51: 46](RO/H) CMB source PPVID. */
+ uint64_t cmd : 7; /**< [ 58: 52](RO/H) Encoding of XMC command.
+ Internal:
+ Enumerated by XMC_CMD_E. */
+ uint64_t reserved_59_61 : 3;
+ uint64_t iowrdisoci : 1; /**< [ 62: 62](RO/H) Logged information is for a IOWRDISOCI error. */
+ uint64_t iorddisoci : 1; /**< [ 63: 63](RO/H) Logged information is for a IORDDISOCI error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_iodisocierr_s cn; */
+};
+typedef union bdk_l2c_cbcx_iodisocierr bdk_l2c_cbcx_iodisocierr_t;
+
+static inline uint64_t BDK_L2C_CBCX_IODISOCIERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_IODISOCIERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058080008ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058080008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058080008ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_IODISOCIERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_IODISOCIERR(a) bdk_l2c_cbcx_iodisocierr_t
+#define bustype_BDK_L2C_CBCX_IODISOCIERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_IODISOCIERR(a) "L2C_CBCX_IODISOCIERR"
+#define device_bar_BDK_L2C_CBCX_IODISOCIERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_IODISOCIERR(a) (a)
+#define arguments_BDK_L2C_CBCX_IODISOCIERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_miberr
+ *
+ * L2C CBC MIB Error Information Registers
+ * This register records error information for all CBC MIB errors. An error locks the INDEX and
+ * [SYN] fields and set the bit corresponding to the error received. MIBDBE errors take priority
+ * and overwrite an earlier logged MIBSBE error. Only one of [MIBSBE]/[MIBDBE] is set at any
+ * given
+ * time and serves to document which error the INDEX/[SYN] is associated with. The syndrome is
+ * recorded for DBE errors, though the utility of the value is not clear.
+ */
+union bdk_l2c_cbcx_miberr
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_miberr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mibdbe : 1; /**< [ 63: 63](RO/H) INDEX/SYN corresponds to a double-bit MIB ECC error. */
+ uint64_t mibsbe : 1; /**< [ 62: 62](RO/H) INDEX/SYN corresponds to a single-bit MIB ECC error. */
+ uint64_t reserved_40_61 : 22;
+ uint64_t syn : 8; /**< [ 39: 32](RO/H) Error syndrome. */
+ uint64_t reserved_3_31 : 29;
+ uint64_t memid : 2; /**< [ 2: 1](RO/H) Indicates the memory that had the error.
+ 0x0 = Error from MXB_VC_MRN, MXB_VC_MFN, MXB_VC_MPN VCs.
+ 0x1 = Error from MXB_VC_MRD, MXB_VC_MPD VCs.
+ 0x2 = Error from MXB_VC_IRM VC.
+ 0x3 = Error from MXB_VC_IPM VC. */
+ uint64_t mibnum : 1; /**< [ 0: 0](RO/H) Indicates the MIB bus that had the error. */
+#else /* Word 0 - Little Endian */
+ uint64_t mibnum : 1; /**< [ 0: 0](RO/H) Indicates the MIB bus that had the error. */
+ uint64_t memid : 2; /**< [ 2: 1](RO/H) Indicates the memory that had the error.
+ 0x0 = Error from MXB_VC_MRN, MXB_VC_MFN, MXB_VC_MPN VCs.
+ 0x1 = Error from MXB_VC_MRD, MXB_VC_MPD VCs.
+ 0x2 = Error from MXB_VC_IRM VC.
+ 0x3 = Error from MXB_VC_IPM VC. */
+ uint64_t reserved_3_31 : 29;
+ uint64_t syn : 8; /**< [ 39: 32](RO/H) Error syndrome. */
+ uint64_t reserved_40_61 : 22;
+ uint64_t mibsbe : 1; /**< [ 62: 62](RO/H) INDEX/SYN corresponds to a single-bit MIB ECC error. */
+ uint64_t mibdbe : 1; /**< [ 63: 63](RO/H) INDEX/SYN corresponds to a double-bit MIB ECC error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_miberr_s cn; */
+};
+typedef union bdk_l2c_cbcx_miberr bdk_l2c_cbcx_miberr_t;
+
+static inline uint64_t BDK_L2C_CBCX_MIBERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_MIBERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058080020ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058080020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058080020ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_MIBERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_MIBERR(a) bdk_l2c_cbcx_miberr_t
+#define bustype_BDK_L2C_CBCX_MIBERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_MIBERR(a) "L2C_CBCX_MIBERR"
+#define device_bar_BDK_L2C_CBCX_MIBERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_MIBERR(a) (a)
+#define arguments_BDK_L2C_CBCX_MIBERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_rsderr
+ *
+ * L2C CBC RSD Error Information Registers
+ * This register records error information for all CBC RSD errors.
+ * An error locks the INDEX and [SYN] fields and set the bit corresponding to the error received.
+ * RSDDBE errors take priority and overwrite an earlier logged RSDSBE error. Only one of
+ * [RSDSBE]/[RSDDBE] is set at any given time and serves to document which error the INDEX/[SYN]
+ * is
+ * associated with.
+ * The syndrome is recorded for DBE errors, though the utility of the value is not clear.
+ */
+union bdk_l2c_cbcx_rsderr
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_rsderr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rsddbe : 1; /**< [ 63: 63](RO/H) INDEX/SYN corresponds to a double-bit RSD ECC error. */
+ uint64_t rsdsbe : 1; /**< [ 62: 62](RO/H) INDEX/SYN corresponds to a single-bit RSD ECC error. */
+ uint64_t reserved_40_61 : 22;
+ uint64_t syn : 8; /**< [ 39: 32](RO/H) Error syndrome. */
+ uint64_t reserved_9_31 : 23;
+ uint64_t tadnum : 3; /**< [ 8: 6](RO/H) Indicates the TAD FIFO containing the error. */
+ uint64_t qwnum : 2; /**< [ 5: 4](RO/H) Indicates the QW containing the error. */
+ uint64_t rsdnum : 4; /**< [ 3: 0](RO/H) Indicates the RSD that had the error. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdnum : 4; /**< [ 3: 0](RO/H) Indicates the RSD that had the error. */
+ uint64_t qwnum : 2; /**< [ 5: 4](RO/H) Indicates the QW containing the error. */
+ uint64_t tadnum : 3; /**< [ 8: 6](RO/H) Indicates the TAD FIFO containing the error. */
+ uint64_t reserved_9_31 : 23;
+ uint64_t syn : 8; /**< [ 39: 32](RO/H) Error syndrome. */
+ uint64_t reserved_40_61 : 22;
+ uint64_t rsdsbe : 1; /**< [ 62: 62](RO/H) INDEX/SYN corresponds to a single-bit RSD ECC error. */
+ uint64_t rsddbe : 1; /**< [ 63: 63](RO/H) INDEX/SYN corresponds to a double-bit RSD ECC error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_rsderr_s cn; */
+};
+typedef union bdk_l2c_cbcx_rsderr bdk_l2c_cbcx_rsderr_t;
+
+static inline uint64_t BDK_L2C_CBCX_RSDERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_RSDERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058080018ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058080018ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058080018ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_RSDERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_RSDERR(a) bdk_l2c_cbcx_rsderr_t
+#define bustype_BDK_L2C_CBCX_RSDERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_RSDERR(a) "L2C_CBCX_RSDERR"
+#define device_bar_BDK_L2C_CBCX_RSDERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_RSDERR(a) (a)
+#define arguments_BDK_L2C_CBCX_RSDERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_ctl
+ *
+ * L2C Control Register
+ */
+union bdk_l2c_ctl
+{
+ uint64_t u;
+ struct bdk_l2c_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ocla_qos : 3; /**< [ 31: 29](R/W) QOS level for the transactions from OCLA to L2C. */
+ uint64_t reserved_28 : 1;
+ uint64_t disstgl2i : 1; /**< [ 27: 27](R/W) Disable STGL2Is from changing the tags. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t discclk : 1; /**< [ 24: 24](R/W) Disable conditional clocking in L2C PNR blocks. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t rsp_arb_mode : 1; /**< [ 15: 15](R/W) Arbitration mode for RSC/RSD bus. 0 = round-robin; 1 = static priority.
+ 1. IOR data.
+ 2. STIN/FILLs.
+ 3. STDN/SCDN/SCFL. */
+ uint64_t xmc_arb_mode : 1; /**< [ 14: 14](R/W) Arbitration mode for ADD bus QOS queues. 0 = fully determined through QOS, 1 = QOS0
+ highest priority; QOS 1-7 use normal mode. */
+ uint64_t rdf_cnt : 8; /**< [ 13: 6](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
+ For optimal performance set to
+ 10 * (DDR-clock period/core-clock period) - 1.
+ To disable set to 0. All other values are reserved. */
+ uint64_t disdwb : 1; /**< [ 5: 5](R/W) Suppresses DWB and INVL2 commands, effectively turning them into NOPs.
+ Internal:
+ The real behavior is DWB and INVL2 commands are forced to look like STGL2I commands with
+ DISSTGL2I set. */
+ uint64_t disgsyncto : 1; /**< [ 4: 4](R/W) Disable global sync timeout. */
+ uint64_t disldwb : 1; /**< [ 3: 3](R/W) Suppresses the DWB functionality of any received LDWB, effectively turning them into LDTs. */
+ uint64_t dissblkdty : 1; /**< [ 2: 2](R/W) Disable bandwidth optimization between L2 and LMC and MOB which only transfers modified
+ sub-blocks when possible. In an CCPI system all nodes must use the same setting of
+ DISSBLKDTY or operation is undefined. */
+ uint64_t disecc : 1; /**< [ 1: 1](R/W) Tag and data ECC disable. */
+ uint64_t disidxalias : 1; /**< [ 0: 0](R/W) Index alias disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t disidxalias : 1; /**< [ 0: 0](R/W) Index alias disable. */
+ uint64_t disecc : 1; /**< [ 1: 1](R/W) Tag and data ECC disable. */
+ uint64_t dissblkdty : 1; /**< [ 2: 2](R/W) Disable bandwidth optimization between L2 and LMC and MOB which only transfers modified
+ sub-blocks when possible. In an CCPI system all nodes must use the same setting of
+ DISSBLKDTY or operation is undefined. */
+ uint64_t disldwb : 1; /**< [ 3: 3](R/W) Suppresses the DWB functionality of any received LDWB, effectively turning them into LDTs. */
+ uint64_t disgsyncto : 1; /**< [ 4: 4](R/W) Disable global sync timeout. */
+ uint64_t disdwb : 1; /**< [ 5: 5](R/W) Suppresses DWB and INVL2 commands, effectively turning them into NOPs.
+ Internal:
+ The real behavior is DWB and INVL2 commands are forced to look like STGL2I commands with
+ DISSTGL2I set. */
+ uint64_t rdf_cnt : 8; /**< [ 13: 6](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
+ For optimal performance set to
+ 10 * (DDR-clock period/core-clock period) - 1.
+ To disable set to 0. All other values are reserved. */
+ uint64_t xmc_arb_mode : 1; /**< [ 14: 14](R/W) Arbitration mode for ADD bus QOS queues. 0 = fully determined through QOS, 1 = QOS0
+ highest priority; QOS 1-7 use normal mode. */
+ uint64_t rsp_arb_mode : 1; /**< [ 15: 15](R/W) Arbitration mode for RSC/RSD bus. 0 = round-robin; 1 = static priority.
+ 1. IOR data.
+ 2. STIN/FILLs.
+ 3. STDN/SCDN/SCFL. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t discclk : 1; /**< [ 24: 24](R/W) Disable conditional clocking in L2C PNR blocks. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t disstgl2i : 1; /**< [ 27: 27](R/W) Disable STGL2Is from changing the tags. */
+ uint64_t reserved_28 : 1;
+ uint64_t ocla_qos : 3; /**< [ 31: 29](R/W) QOS level for the transactions from OCLA to L2C. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ocla_qos : 3; /**< [ 31: 29](R/W) QOS level for the transactions from OCLA to L2C. */
+ uint64_t reserved_28 : 1;
+ uint64_t disstgl2i : 1; /**< [ 27: 27](R/W) Disable STGL2Is from changing the tags. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t discclk : 1; /**< [ 24: 24](R/W) Disable conditional clocking in L2C PNR blocks. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t rsp_arb_mode : 1; /**< [ 15: 15](R/W) Arbitration mode for RSC/RSD bus. 0 = round-robin; 1 = static priority.
+ 1. IOR data.
+ 2. STIN/FILLs.
+ 3. STDN/SCDN/SCFL. */
+ uint64_t xmc_arb_mode : 1; /**< [ 14: 14](R/W) Arbitration mode for ADD bus QOS queues. 0 = fully determined through QOS, 1 = QOS0
+ highest priority; QOS 1-7 use normal mode. */
+ uint64_t rdf_cnt : 8; /**< [ 13: 6](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
+ For optimal performance set to
+ 10 * (DDR-clock period/core-clock period) - 1.
+ To disable set to 0. All other values are reserved. */
+ uint64_t reserved_5 : 1;
+ uint64_t reserved_4 : 1;
+ uint64_t disldwb : 1; /**< [ 3: 3](R/W) Suppresses the DWB functionality of any received LDWB, effectively turning them into LDTs. */
+ uint64_t dissblkdty : 1; /**< [ 2: 2](R/W) Disable bandwidth optimization between L2 and LMC and MOB which only transfers modified
+ sub-blocks when possible. In an CCPI system all nodes must use the same setting of
+ DISSBLKDTY or operation is undefined. */
+ uint64_t disecc : 1; /**< [ 1: 1](R/W) Tag and data ECC disable. */
+ uint64_t disidxalias : 1; /**< [ 0: 0](R/W) Index alias disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t disidxalias : 1; /**< [ 0: 0](R/W) Index alias disable. */
+ uint64_t disecc : 1; /**< [ 1: 1](R/W) Tag and data ECC disable. */
+ uint64_t dissblkdty : 1; /**< [ 2: 2](R/W) Disable bandwidth optimization between L2 and LMC and MOB which only transfers modified
+ sub-blocks when possible. In an CCPI system all nodes must use the same setting of
+ DISSBLKDTY or operation is undefined. */
+ uint64_t disldwb : 1; /**< [ 3: 3](R/W) Suppresses the DWB functionality of any received LDWB, effectively turning them into LDTs. */
+ uint64_t reserved_4 : 1;
+ uint64_t reserved_5 : 1;
+ uint64_t rdf_cnt : 8; /**< [ 13: 6](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
+ For optimal performance set to
+ 10 * (DDR-clock period/core-clock period) - 1.
+ To disable set to 0. All other values are reserved. */
+ uint64_t xmc_arb_mode : 1; /**< [ 14: 14](R/W) Arbitration mode for ADD bus QOS queues. 0 = fully determined through QOS, 1 = QOS0
+ highest priority; QOS 1-7 use normal mode. */
+ uint64_t rsp_arb_mode : 1; /**< [ 15: 15](R/W) Arbitration mode for RSC/RSD bus. 0 = round-robin; 1 = static priority.
+ 1. IOR data.
+ 2. STIN/FILLs.
+ 3. STDN/SCDN/SCFL. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t discclk : 1; /**< [ 24: 24](R/W) Disable conditional clocking in L2C PNR blocks. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t disstgl2i : 1; /**< [ 27: 27](R/W) Disable STGL2Is from changing the tags. */
+ uint64_t reserved_28 : 1;
+ uint64_t ocla_qos : 3; /**< [ 31: 29](R/W) QOS level for the transactions from OCLA to L2C. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_l2c_ctl_s cn81xx; */
+ /* struct bdk_l2c_ctl_s cn83xx; */
+ struct bdk_l2c_ctl_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ocla_qos : 3; /**< [ 31: 29](R/W) QOS level for the transactions from OCLA to L2C. */
+ uint64_t reserved_28 : 1;
+ uint64_t disstgl2i : 1; /**< [ 27: 27](R/W) Disable STGL2Is from changing the tags. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t discclk : 1; /**< [ 24: 24](R/W) Disable conditional clocking in L2C PNR blocks. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t rsp_arb_mode : 1; /**< [ 15: 15](R/W) Arbitration mode for RSC/RSD bus. 0 = round-robin; 1 = static priority.
+ 1. IOR data.
+ 2. STIN/FILLs.
+ 3. STDN/SCDN/SCFL. */
+ uint64_t xmc_arb_mode : 1; /**< [ 14: 14](R/W) Arbitration mode for ADD bus QOS queues. 0 = fully determined through QOS, 1 = QOS0
+ highest priority; QOS 1-7 use normal mode. */
+ uint64_t rdf_cnt : 8; /**< [ 13: 6](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
+ For optimal performance set to
+ 10 * (DDR-clock period/core-clock period) - 1.
+ To disable set to 0. All other values are reserved. */
+ uint64_t reserved_5 : 1;
+ uint64_t disgsyncto : 1; /**< [ 4: 4](R/W) Disable global sync timeout. */
+ uint64_t disldwb : 1; /**< [ 3: 3](R/W) Suppresses the DWB functionality of any received LDWB, effectively turning them into LDTs. */
+ uint64_t dissblkdty : 1; /**< [ 2: 2](R/W) Disable bandwidth optimization between L2 and LMC and MOB which only transfers modified
+ sub-blocks when possible. In an CCPI system all nodes must use the same setting of
+ DISSBLKDTY or operation is undefined. */
+ uint64_t disecc : 1; /**< [ 1: 1](R/W) Tag and data ECC disable. */
+ uint64_t disidxalias : 1; /**< [ 0: 0](R/W) Index alias disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t disidxalias : 1; /**< [ 0: 0](R/W) Index alias disable. */
+ uint64_t disecc : 1; /**< [ 1: 1](R/W) Tag and data ECC disable. */
+ uint64_t dissblkdty : 1; /**< [ 2: 2](R/W) Disable bandwidth optimization between L2 and LMC and MOB which only transfers modified
+ sub-blocks when possible. In an CCPI system all nodes must use the same setting of
+ DISSBLKDTY or operation is undefined. */
+ uint64_t disldwb : 1; /**< [ 3: 3](R/W) Suppresses the DWB functionality of any received LDWB, effectively turning them into LDTs. */
+ uint64_t disgsyncto : 1; /**< [ 4: 4](R/W) Disable global sync timeout. */
+ uint64_t reserved_5 : 1;
+ uint64_t rdf_cnt : 8; /**< [ 13: 6](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
+ For optimal performance set to
+ 10 * (DDR-clock period/core-clock period) - 1.
+ To disable set to 0. All other values are reserved. */
+ uint64_t xmc_arb_mode : 1; /**< [ 14: 14](R/W) Arbitration mode for ADD bus QOS queues. 0 = fully determined through QOS, 1 = QOS0
+ highest priority; QOS 1-7 use normal mode. */
+ uint64_t rsp_arb_mode : 1; /**< [ 15: 15](R/W) Arbitration mode for RSC/RSD bus. 0 = round-robin; 1 = static priority.
+ 1. IOR data.
+ 2. STIN/FILLs.
+ 3. STDN/SCDN/SCFL. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t discclk : 1; /**< [ 24: 24](R/W) Disable conditional clocking in L2C PNR blocks. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t disstgl2i : 1; /**< [ 27: 27](R/W) Disable STGL2Is from changing the tags. */
+ uint64_t reserved_28 : 1;
+ uint64_t ocla_qos : 3; /**< [ 31: 29](R/W) QOS level for the transactions from OCLA to L2C. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_ctl bdk_l2c_ctl_t;
+
+#define BDK_L2C_CTL BDK_L2C_CTL_FUNC()
+static inline uint64_t BDK_L2C_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e080800000ll;
+ __bdk_csr_fatal("L2C_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CTL bdk_l2c_ctl_t
+#define bustype_BDK_L2C_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CTL "L2C_CTL"
+#define device_bar_BDK_L2C_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CTL 0
+#define arguments_BDK_L2C_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) l2c_ecc_ctl
+ *
+ * L2C ECC Control Register
+ * Flip ECC bits to generate single-bit or double-bit ECC errors in all instances of a given
+ * memory type. Encodings are as follows.
+ * 0x0 = No error.
+ * 0x1 = Single-bit error on ECC\<0\>.
+ * 0x2 = Single-bit error on ECC\<1\>.
+ * 0x3 = Double-bit error on ECC\<1:0\>.
+ *
+ * L2DFLIP allows software to generate L2DSBE, L2DDBE, VBFSBE, and VBFDBE errors for the purposes
+ * of testing error handling code. When one (or both) of these bits are set, a PL2 that misses in
+ * the L2 will fill with the appropriate error in the first two OWs of the fill. Software can
+ * determine which OW pair gets the error by choosing the desired fill order (address\<6:5\>). A
+ * PL2 that hits in the L2 will not inject any errors. Therefore sending a WBIL2 prior to the PL2
+ * is recommended to make a miss likely. (If multiple processors are involved, software must be
+ * sure that no other processor or I/O device can bring the block into the L2).
+ *
+ * To generate a VBFSBE or VBFDBE, software must first get the cache block into the cache with an
+ * error using a PL2 that misses the L2. Then a store partial to a portion of the cache block
+ * without the error must change the block to dirty. Then, a subsequent WBL2/WBIL2/victim will
+ * trigger the VBFSBE/VBFDBE error.
+ */
+union bdk_l2c_ecc_ctl
+{
+ uint64_t u;
+ struct bdk_l2c_ecc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t mibflip : 2; /**< [ 11: 10](R/W) Generate an ECC error in the MIB. See note above. */
+ uint64_t l2dflip : 2; /**< [ 9: 8](R/W) Generate an ECC error in the L2D. See note above. */
+ uint64_t l2tflip : 2; /**< [ 7: 6](R/W) Generate an ECC error in the L2T. */
+ uint64_t rdfflip : 2; /**< [ 5: 4](R/W) Generate an ECC error in RDF memory. */
+ uint64_t xmdflip : 2; /**< [ 3: 2](R/W) Generate an ECC error in all corresponding CBC XMD memories. */
+ uint64_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_1 : 2;
+ uint64_t xmdflip : 2; /**< [ 3: 2](R/W) Generate an ECC error in all corresponding CBC XMD memories. */
+ uint64_t rdfflip : 2; /**< [ 5: 4](R/W) Generate an ECC error in RDF memory. */
+ uint64_t l2tflip : 2; /**< [ 7: 6](R/W) Generate an ECC error in the L2T. */
+ uint64_t l2dflip : 2; /**< [ 9: 8](R/W) Generate an ECC error in the L2D. See note above. */
+ uint64_t mibflip : 2; /**< [ 11: 10](R/W) Generate an ECC error in the MIB. See note above. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_ecc_ctl_s cn; */
+};
+typedef union bdk_l2c_ecc_ctl bdk_l2c_ecc_ctl_t;
+
+#define BDK_L2C_ECC_CTL BDK_L2C_ECC_CTL_FUNC()
+static inline uint64_t BDK_L2C_ECC_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_ECC_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e080800010ll;
+ __bdk_csr_fatal("L2C_ECC_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_ECC_CTL bdk_l2c_ecc_ctl_t
+#define bustype_BDK_L2C_ECC_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_ECC_CTL "L2C_ECC_CTL"
+#define device_bar_BDK_L2C_ECC_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_ECC_CTL 0
+#define arguments_BDK_L2C_ECC_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) l2c_mci#_bist_status
+ *
+ * Level 2 MCI BIST Status (DCLK) Registers
+ * If clear BIST is desired, [CLEAR_BIST] must be written to 1 before [START_BIST] is
+ * written to 1 using a separate CSR write operation. [CLEAR_BIST] must not be changed
+ * after writing [START_BIST] to 1 until the BIST operation completes (indicated by
+ * [START_BIST] returning to 0) or operation is undefined.
+ */
+union bdk_l2c_mcix_bist_status
+{
+ uint64_t u;
+ struct bdk_l2c_mcix_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t start_bist : 1; /**< [ 63: 63](R/W/H) When written to 1, starts BIST. Remains 1 until BIST is complete. */
+ uint64_t clear_bist : 1; /**< [ 62: 62](R/W) When BIST is triggered, run clear BIST. */
+ uint64_t reserved_2_61 : 60;
+ uint64_t vbffl : 2; /**< [ 1: 0](RO/H) BIST failure status for VBF0-1. */
+#else /* Word 0 - Little Endian */
+ uint64_t vbffl : 2; /**< [ 1: 0](RO/H) BIST failure status for VBF0-1. */
+ uint64_t reserved_2_61 : 60;
+ uint64_t clear_bist : 1; /**< [ 62: 62](R/W) When BIST is triggered, run clear BIST. */
+ uint64_t start_bist : 1; /**< [ 63: 63](R/W/H) When written to 1, starts BIST. Remains 1 until BIST is complete. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_mcix_bist_status_s cn; */
+};
+typedef union bdk_l2c_mcix_bist_status bdk_l2c_mcix_bist_status_t;
+
+static inline uint64_t BDK_L2C_MCIX_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_MCIX_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e05c020000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e05c020000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e05c020000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_MCIX_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_MCIX_BIST_STATUS(a) bdk_l2c_mcix_bist_status_t
+#define bustype_BDK_L2C_MCIX_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_MCIX_BIST_STATUS(a) "L2C_MCIX_BIST_STATUS"
+#define device_bar_BDK_L2C_MCIX_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_MCIX_BIST_STATUS(a) (a)
+#define arguments_BDK_L2C_MCIX_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_mci#_err
+ *
+ * L2C MCI Error Information Registers
+ * This register records error information for all MCI errors.
+ * An error locks [VBF4], [INDEX], [SYN0] and [SYN1] and sets the bit corresponding to the error
+ * received. VBFDBE errors take priority and will overwrite an earlier logged VBFSBE error. The
+ * information from exactly one VBF read is present at any given time and serves to document
+ * which error(s) were present in the read with the highest priority error.
+ * The syndrome is recorded for DBE errors.
+ */
+union bdk_l2c_mcix_err
+{
+ uint64_t u;
+ struct bdk_l2c_mcix_err_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vbfdbe1 : 1; /**< [ 63: 63](RO/H) INDEX/SYN1 corresponds to a double-bit VBF ECC error. */
+ uint64_t vbfdbe0 : 1; /**< [ 62: 62](RO/H) INDEX/SYN0 corresponds to a double-bit VBF ECC error. */
+ uint64_t vbfsbe1 : 1; /**< [ 61: 61](RO/H) INDEX/SYN1 corresponds to a single-bit VBF ECC error. */
+ uint64_t vbfsbe0 : 1; /**< [ 60: 60](RO/H) INDEX/SYN0 corresponds to a single-bit VBF ECC error. */
+ uint64_t reserved_48_59 : 12;
+ uint64_t syn1 : 8; /**< [ 47: 40](RO/H) Error syndrome for QW1 ([127:64]).
+ Records only on single bit errors.
+
+ Internal:
+ See bug26334. */
+ uint64_t syn0 : 8; /**< [ 39: 32](RO/H) Error syndrome for QW0 ([63:0]).
+ Records only on single bit errors.
+
+ Internal:
+ See bug26334. */
+ uint64_t reserved_12_31 : 20;
+ uint64_t vbf4 : 1; /**< [ 11: 11](RO/H) When 1, errors were from VBF (4+a), when 0, from VBF (0+a). */
+ uint64_t index : 7; /**< [ 10: 4](RO/H) VBF index which was read and had the error(s). */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t index : 7; /**< [ 10: 4](RO/H) VBF index which was read and had the error(s). */
+ uint64_t vbf4 : 1; /**< [ 11: 11](RO/H) When 1, errors were from VBF (4+a), when 0, from VBF (0+a). */
+ uint64_t reserved_12_31 : 20;
+ uint64_t syn0 : 8; /**< [ 39: 32](RO/H) Error syndrome for QW0 ([63:0]).
+ Records only on single bit errors.
+
+ Internal:
+ See bug26334. */
+ uint64_t syn1 : 8; /**< [ 47: 40](RO/H) Error syndrome for QW1 ([127:64]).
+ Records only on single bit errors.
+
+ Internal:
+ See bug26334. */
+ uint64_t reserved_48_59 : 12;
+ uint64_t vbfsbe0 : 1; /**< [ 60: 60](RO/H) INDEX/SYN0 corresponds to a single-bit VBF ECC error. */
+ uint64_t vbfsbe1 : 1; /**< [ 61: 61](RO/H) INDEX/SYN1 corresponds to a single-bit VBF ECC error. */
+ uint64_t vbfdbe0 : 1; /**< [ 62: 62](RO/H) INDEX/SYN0 corresponds to a double-bit VBF ECC error. */
+ uint64_t vbfdbe1 : 1; /**< [ 63: 63](RO/H) INDEX/SYN1 corresponds to a double-bit VBF ECC error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_mcix_err_s cn; */
+};
+typedef union bdk_l2c_mcix_err bdk_l2c_mcix_err_t;
+
+static inline uint64_t BDK_L2C_MCIX_ERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_MCIX_ERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e05c010000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e05c010000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e05c010000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_MCIX_ERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_MCIX_ERR(a) bdk_l2c_mcix_err_t
+#define bustype_BDK_L2C_MCIX_ERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_MCIX_ERR(a) "L2C_MCIX_ERR"
+#define device_bar_BDK_L2C_MCIX_ERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_MCIX_ERR(a) (a)
+#define arguments_BDK_L2C_MCIX_ERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_oci_ctl
+ *
+ * L2C CCPI Control Register
+ */
+union bdk_l2c_oci_ctl
+{
+ uint64_t u;
+ struct bdk_l2c_oci_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t ncpend : 1; /**< [ 30: 30](RO/H) An indication that a node change is pending. Hardware sets this bit when
+ OCX_COM_NODE[ID] is changed and clears the bit when the node change has taken
+ effect. */
+ uint64_t lock_local_cas : 1; /**< [ 29: 29](RO) Reserved. */
+ uint64_t lock_local_stc : 1; /**< [ 28: 28](RO) Reserved. */
+ uint64_t lock_local_pp : 1; /**< [ 27: 27](RO) Reserved. */
+ uint64_t lngtolen : 5; /**< [ 26: 22](R/W) Reserved.
+ Internal:
+ This only controls the GSYNC timeout in the L2C_CBCs in non-OCI chips. */
+ uint64_t shtolen : 5; /**< [ 21: 17](RO) Reserved. */
+ uint64_t shtoioen : 1; /**< [ 16: 16](RO) Reserved. */
+ uint64_t shtoen : 3; /**< [ 15: 13](RO) Reserved. */
+ uint64_t shto : 1; /**< [ 12: 12](RO) Reserved. */
+ uint64_t inv_mode : 2; /**< [ 11: 10](RO) Reserved. */
+ uint64_t cas_fdx : 1; /**< [ 9: 9](RO) Reserved. */
+ uint64_t rldd_psha : 1; /**< [ 8: 8](RO) Reserved. */
+ uint64_t lock_local_iob : 1; /**< [ 7: 7](RO) Reserved. */
+ uint64_t iofrcl : 1; /**< [ 6: 6](RO) Reserved. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t enaoci : 4; /**< [ 3: 0](RO) CCPI is not present. Any attempt to enable it will be ignored. */
+#else /* Word 0 - Little Endian */
+ uint64_t enaoci : 4; /**< [ 3: 0](RO) CCPI is not present. Any attempt to enable it will be ignored. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t iofrcl : 1; /**< [ 6: 6](RO) Reserved. */
+ uint64_t lock_local_iob : 1; /**< [ 7: 7](RO) Reserved. */
+ uint64_t rldd_psha : 1; /**< [ 8: 8](RO) Reserved. */
+ uint64_t cas_fdx : 1; /**< [ 9: 9](RO) Reserved. */
+ uint64_t inv_mode : 2; /**< [ 11: 10](RO) Reserved. */
+ uint64_t shto : 1; /**< [ 12: 12](RO) Reserved. */
+ uint64_t shtoen : 3; /**< [ 15: 13](RO) Reserved. */
+ uint64_t shtoioen : 1; /**< [ 16: 16](RO) Reserved. */
+ uint64_t shtolen : 5; /**< [ 21: 17](RO) Reserved. */
+ uint64_t lngtolen : 5; /**< [ 26: 22](R/W) Reserved.
+ Internal:
+ This only controls the GSYNC timeout in the L2C_CBCs in non-OCI chips. */
+ uint64_t lock_local_pp : 1; /**< [ 27: 27](RO) Reserved. */
+ uint64_t lock_local_stc : 1; /**< [ 28: 28](RO) Reserved. */
+ uint64_t lock_local_cas : 1; /**< [ 29: 29](RO) Reserved. */
+ uint64_t ncpend : 1; /**< [ 30: 30](RO/H) An indication that a node change is pending. Hardware sets this bit when
+ OCX_COM_NODE[ID] is changed and clears the bit when the node change has taken
+ effect. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_oci_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t reserved_30 : 1;
+ uint64_t lock_local_cas : 1; /**< [ 29: 29](R/W) When set, L2 CAS operations to remote addresses which miss at the requester will be
+ performed locally (if possible) on the requesting node. Default operation will instead
+ send the CAS request to be performed on the home node. For STC ops [LOCK_LOCAL_STC]. */
+ uint64_t lock_local_stc : 1; /**< [ 28: 28](R/W) When set, L2 STC operations to remote addresses which miss at the requester will be
+ performed locally (if possible) on the requesting node. Default operation will instead
+ send the STC request to be performed on the home node. For CAS ops [LOCK_LOCAL_CAS]. */
+ uint64_t lock_local_pp : 1; /**< [ 27: 27](R/W) When clear, L2 atomic operations (excluding CAS/STC) core initiated requests to remote
+ addresses which miss at the requester will send the atomic request to be performed on the
+ home node. Default operation will instead be performed locally on the requesting node.
+ For request initiated by IOB & for STC & CAS ops, see
+ [LOCK_LOCAL_IOB]/[LOCK_LOCAL_STC]/[LOCK_LOCAL_CAS]. */
+ uint64_t lngtolen : 5; /**< [ 26: 22](R/W) Selects the bit in the counter for the long timeout value (timeout used when [SHTO] is
+ clear). Values supported are between 11 and 29 (for a timeout values between 2^11 and
+ 2^29). Actual timeout is between 1x and 2x this interval. For example if [LNGTOLEN] = 28
+ (the reset value), the timeout is between 256M and 512M core clocks. Note: a value of 0
+ disables this timer. */
+ uint64_t shtolen : 5; /**< [ 21: 17](R/W) Selects the bit in the counter for the short timeout value (timeout used when [SHTO] is
+ set). Values supported are between 9 and 29 (for a timeout values between 2^9 and 2^29).
+ Actual timeout is between 1x and 2x this interval. For example if [SHTOLEN] = 14 (the
+ reset
+ value), the timeout is between 16K and 32K core clocks. Note: a value of 0 disables this
+ timer. */
+ uint64_t shtoioen : 1; /**< [ 16: 16](R/W) When set, any core issues any of an IO load, atomic, acking store, acking IOBADDR, or
+ acking LMTST to a node that doesn't exist (existence defined by the ENAOCI bits), then the
+ hardware sets [SHTO]. */
+ uint64_t shtoen : 3; /**< [ 15: 13](R/W) When set, if the corresponding CCPI link is down, the hardware sets [SHTO].
+ See OCX_COM_LINK(0..2)_CTL for a description of what events can contribute to the
+ link_down condition. */
+ uint64_t shto : 1; /**< [ 12: 12](R/W/H) Use short timeout intervals. When set, core uses SDIDTTO for both DID and commit counter
+ timeouts, rather than DIDTTO/DIDTTO2. Similarly, L2C will use short instead of long
+ timeout. */
+ uint64_t inv_mode : 2; /**< [ 11: 10](R/W) Describes how aggressive to be when waiting for local invalidates before sending CCPI
+ responses which act like commits at the remote.
+ 0x0 = Conservative mode, waits until all local invalidates have been sent by their
+ respective CBCs to the cores.
+ 0x1 = Moderate mode, waits until all local invalidates have been sent to their respective
+ CBCs, but not necessarily actually sent to the cores themselves.
+ 0x2 = Aggressive mode, does not wait for local invalidates to begin their processing. */
+ uint64_t cas_fdx : 1; /**< [ 9: 9](R/W) When set, L2 STC/CAS operations performed at the home will immediately bring the block
+ exclusive into the home. Default operation is to first request the block shared and only
+ invalidate the remote if the compare succeeds. */
+ uint64_t rldd_psha : 1; /**< [ 8: 8](R/W) When set, RLDD is assumed to return a shared response (PSHA). Default operation assumes an
+ exclusive response (PEMD). Note that an incorrect assumption only causes an extra tag
+ write to be done upon receiving the response. */
+ uint64_t lock_local_iob : 1; /**< [ 7: 7](R/W) When set, L2 atomic operations (excluding CAS/STC) initiated by IOB to remote addresses
+ which miss at the requester are performed locally on the requesting node. When clear the
+ operation instead sends the atomic request to be performed on the home node. For request
+ initiated by core for STC and CAS ops; see
+ [LOCK_LOCAL_PP]/[LOCK_LOCAL_STC]/[LOCK_LOCAL_CAS].
+ Default is set to 1 (local locks). */
+ uint64_t iofrcl : 1; /**< [ 6: 6](R/W) When set, L2C services all I/O read and write operations on the local node, regardless of
+ the value of the node ID bits in the physical address. During normal operation this bit is
+ expected to be 0. Will only transition from 1 to 0, never from 0 to 1. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t enaoci : 4; /**< [ 3: 0](R/W) Enable CCPI processing (one bit per node_id). When set, perform CCPI
+ processing. When clear, CCPI memory writes are blocked and CCPI memory reads
+ return unpredictable data. When clear,
+ CCPI I/O requests and MOC references are processed and sent to OCX where they are
+ ultimately discarded. RDDISOCI/WRDISOCI/IORDDISOCI/IOWRDISOCI interrupts occur if and only
+ if the corresponding ENAOCI\<node\> bit is clear. References to the local node (configured
+ via OCX_COM_NODE[ID]) ignore the value of ENAOCI\<node\> because no CCPI processing is
+ required. Similarly, all I/O references ignore the value of ENAOCI when
+ L2C_OCI_CTL[IOFRCL] is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t enaoci : 4; /**< [ 3: 0](R/W) Enable CCPI processing (one bit per node_id). When set, perform CCPI
+ processing. When clear, CCPI memory writes are blocked and CCPI memory reads
+ return unpredictable data. When clear,
+ CCPI I/O requests and MOC references are processed and sent to OCX where they are
+ ultimately discarded. RDDISOCI/WRDISOCI/IORDDISOCI/IOWRDISOCI interrupts occur if and only
+ if the corresponding ENAOCI\<node\> bit is clear. References to the local node (configured
+ via OCX_COM_NODE[ID]) ignore the value of ENAOCI\<node\> because no CCPI processing is
+ required. Similarly, all I/O references ignore the value of ENAOCI when
+ L2C_OCI_CTL[IOFRCL] is set. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t iofrcl : 1; /**< [ 6: 6](R/W) When set, L2C services all I/O read and write operations on the local node, regardless of
+ the value of the node ID bits in the physical address. During normal operation this bit is
+ expected to be 0. Will only transition from 1 to 0, never from 0 to 1. */
+ uint64_t lock_local_iob : 1; /**< [ 7: 7](R/W) When set, L2 atomic operations (excluding CAS/STC) initiated by IOB to remote addresses
+ which miss at the requester are performed locally on the requesting node. When clear the
+ operation instead sends the atomic request to be performed on the home node. For request
+ initiated by core for STC and CAS ops; see
+ [LOCK_LOCAL_PP]/[LOCK_LOCAL_STC]/[LOCK_LOCAL_CAS].
+ Default is set to 1 (local locks). */
+ uint64_t rldd_psha : 1; /**< [ 8: 8](R/W) When set, RLDD is assumed to return a shared response (PSHA). Default operation assumes an
+ exclusive response (PEMD). Note that an incorrect assumption only causes an extra tag
+ write to be done upon receiving the response. */
+ uint64_t cas_fdx : 1; /**< [ 9: 9](R/W) When set, L2 STC/CAS operations performed at the home will immediately bring the block
+ exclusive into the home. Default operation is to first request the block shared and only
+ invalidate the remote if the compare succeeds. */
+ uint64_t inv_mode : 2; /**< [ 11: 10](R/W) Describes how aggressive to be when waiting for local invalidates before sending CCPI
+ responses which act like commits at the remote.
+ 0x0 = Conservative mode, waits until all local invalidates have been sent by their
+ respective CBCs to the cores.
+ 0x1 = Moderate mode, waits until all local invalidates have been sent to their respective
+ CBCs, but not necessarily actually sent to the cores themselves.
+ 0x2 = Aggressive mode, does not wait for local invalidates to begin their processing. */
+ uint64_t shto : 1; /**< [ 12: 12](R/W/H) Use short timeout intervals. When set, core uses SDIDTTO for both DID and commit counter
+ timeouts, rather than DIDTTO/DIDTTO2. Similarly, L2C will use short instead of long
+ timeout. */
+ uint64_t shtoen : 3; /**< [ 15: 13](R/W) When set, if the corresponding CCPI link is down, the hardware sets [SHTO].
+ See OCX_COM_LINK(0..2)_CTL for a description of what events can contribute to the
+ link_down condition. */
+ uint64_t shtoioen : 1; /**< [ 16: 16](R/W) When set, any core issues any of an IO load, atomic, acking store, acking IOBADDR, or
+ acking LMTST to a node that doesn't exist (existence defined by the ENAOCI bits), then the
+ hardware sets [SHTO]. */
+ uint64_t shtolen : 5; /**< [ 21: 17](R/W) Selects the bit in the counter for the short timeout value (timeout used when [SHTO] is
+ set). Values supported are between 9 and 29 (for a timeout values between 2^9 and 2^29).
+ Actual timeout is between 1x and 2x this interval. For example if [SHTOLEN] = 14 (the
+ reset
+ value), the timeout is between 16K and 32K core clocks. Note: a value of 0 disables this
+ timer. */
+ uint64_t lngtolen : 5; /**< [ 26: 22](R/W) Selects the bit in the counter for the long timeout value (timeout used when [SHTO] is
+ clear). Values supported are between 11 and 29 (for a timeout values between 2^11 and
+ 2^29). Actual timeout is between 1x and 2x this interval. For example if [LNGTOLEN] = 28
+ (the reset value), the timeout is between 256M and 512M core clocks. Note: a value of 0
+ disables this timer. */
+ uint64_t lock_local_pp : 1; /**< [ 27: 27](R/W) When clear, L2 atomic operations (excluding CAS/STC) core initiated requests to remote
+ addresses which miss at the requester will send the atomic request to be performed on the
+ home node. Default operation will instead be performed locally on the requesting node.
+ For request initiated by IOB & for STC & CAS ops, see
+ [LOCK_LOCAL_IOB]/[LOCK_LOCAL_STC]/[LOCK_LOCAL_CAS]. */
+ uint64_t lock_local_stc : 1; /**< [ 28: 28](R/W) When set, L2 STC operations to remote addresses which miss at the requester will be
+ performed locally (if possible) on the requesting node. Default operation will instead
+ send the STC request to be performed on the home node. For CAS ops [LOCK_LOCAL_CAS]. */
+ uint64_t lock_local_cas : 1; /**< [ 29: 29](R/W) When set, L2 CAS operations to remote addresses which miss at the requester will be
+ performed locally (if possible) on the requesting node. Default operation will instead
+ send the CAS request to be performed on the home node. For STC ops [LOCK_LOCAL_STC]. */
+ uint64_t reserved_30 : 1;
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_l2c_oci_ctl_s cn81xx; */
+ /* struct bdk_l2c_oci_ctl_s cn83xx; */
+ struct bdk_l2c_oci_ctl_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t ncpend : 1; /**< [ 30: 30](RO/H) An indication that a node change is pending. Hardware sets this bit when
+ OCX_COM_NODE[ID] is changed and clears the bit when the node change has taken
+ effect. */
+ uint64_t lock_local_cas : 1; /**< [ 29: 29](R/W) When set, L2 CAS operations to remote addresses which miss at the requester will be
+ performed locally (if possible) on the requesting node. Default operation will instead
+ send the CAS request to be performed on the home node. For STC ops [LOCK_LOCAL_STC]. */
+ uint64_t lock_local_stc : 1; /**< [ 28: 28](R/W) When set, L2 STC operations to remote addresses which miss at the requester will be
+ performed locally (if possible) on the requesting node. Default operation will instead
+ send the STC request to be performed on the home node. For CAS ops [LOCK_LOCAL_CAS]. */
+ uint64_t lock_local_pp : 1; /**< [ 27: 27](R/W) When clear, L2 atomic operations (excluding CAS/STC) core initiated requests to remote
+ addresses which miss at the requester will send the atomic request to be performed on the
+ home node. Default operation will instead be performed locally on the requesting node.
+ For request initiated by IOB & for STC & CAS ops, see
+ [LOCK_LOCAL_IOB]/[LOCK_LOCAL_STC]/[LOCK_LOCAL_CAS]. */
+ uint64_t lngtolen : 5; /**< [ 26: 22](R/W) Selects the bit in the counter for the long timeout value (timeout used when [SHTO] is
+ clear). Values supported are between 11 and 29 (for a timeout values between 2^11 and
+ 2^29). Actual timeout is between 1x and 2x this interval. For example if [LNGTOLEN] = 28
+ (the reset value), the timeout is between 256M and 512M core clocks. Note: a value of 0
+ disables this timer. */
+ uint64_t shtolen : 5; /**< [ 21: 17](R/W) Selects the bit in the counter for the short timeout value (timeout used when [SHTO] is
+ set). Values supported are between 9 and 29 (for a timeout values between 2^9 and 2^29).
+ Actual timeout is between 1x and 2x this interval. For example if [SHTOLEN] = 14 (the
+ reset
+ value), the timeout is between 16K and 32K core clocks. Note: a value of 0 disables this
+ timer. */
+ uint64_t shtoioen : 1; /**< [ 16: 16](R/W) When set, any core issues any of an IO load, atomic, acking store, acking IOBADDR, or
+ acking LMTST to a node that doesn't exist (existence defined by the ENAOCI bits), then the
+ hardware sets [SHTO]. */
+ uint64_t shtoen : 3; /**< [ 15: 13](R/W) When set, if the corresponding CCPI link is down, the hardware sets [SHTO].
+ See OCX_COM_LINK(0..2)_CTL for a description of what events can contribute to the
+ link_down condition. */
+ uint64_t shto : 1; /**< [ 12: 12](R/W/H) Use short timeout intervals. When set, core uses SDIDTTO for both DID and commit counter
+ timeouts, rather than DIDTTO/DIDTTO2. Similarly, L2C will use short instead of long
+ timeout. */
+ uint64_t inv_mode : 2; /**< [ 11: 10](R/W) Describes how aggressive to be when waiting for local invalidates before sending CCPI
+ responses which act like commits at the remote.
+ 0x0 = Conservative mode, waits until all local invalidates have been sent by their
+ respective CBCs to the cores.
+ 0x1 = Moderate mode, waits until all local invalidates have been sent to their respective
+ CBCs, but not necessarily actually sent to the cores themselves.
+ 0x2 = Aggressive mode, does not wait for local invalidates to begin their processing. */
+ uint64_t cas_fdx : 1; /**< [ 9: 9](R/W) When set, L2 STC/CAS operations performed at the home will immediately bring the block
+ exclusive into the home. Default operation is to first request the block shared and only
+ invalidate the remote if the compare succeeds. */
+ uint64_t rldd_psha : 1; /**< [ 8: 8](R/W) When set, RLDD is assumed to return a shared response (PSHA). Default operation assumes an
+ exclusive response (PEMD). Note that an incorrect assumption only causes an extra tag
+ write to be done upon receiving the response. */
+ uint64_t lock_local_iob : 1; /**< [ 7: 7](R/W) When set, L2 atomic operations (excluding CAS/STC) initiated by IOB to remote addresses
+ which miss at the requester are performed locally on the requesting node. When clear the
+ operation instead sends the atomic request to be performed on the home node. For request
+ initiated by core for STC and CAS ops; see
+ [LOCK_LOCAL_PP]/[LOCK_LOCAL_STC]/[LOCK_LOCAL_CAS].
+ Default is set to 1 (local locks). */
+ uint64_t iofrcl : 1; /**< [ 6: 6](R/W) When set, L2C services all I/O read and write operations on the local node, regardless of
+ the value of the node ID bits in the physical address. During normal operation this bit is
+ expected to be 0. Will only transition from 1 to 0, never from 0 to 1. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t enaoci : 4; /**< [ 3: 0](R/W) Enable CCPI processing (one bit per node_id). When set, perform CCPI
+ processing. When clear, CCPI memory writes are blocked and CCPI memory reads
+ return unpredictable data. When clear,
+ CCPI I/O requests and MOC references are processed and sent to OCX where they are
+ ultimately discarded. RDDISOCI/WRDISOCI/IORDDISOCI/IOWRDISOCI interrupts occur if and only
+ if the corresponding ENAOCI\<node\> bit is clear. References to the local node (configured
+ via OCX_COM_NODE[ID]) ignore the value of ENAOCI\<node\> because no CCPI processing is
+ required. Similarly, all I/O references ignore the value of ENAOCI when
+ L2C_OCI_CTL[IOFRCL] is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t enaoci : 4; /**< [ 3: 0](R/W) Enable CCPI processing (one bit per node_id). When set, perform CCPI
+ processing. When clear, CCPI memory writes are blocked and CCPI memory reads
+ return unpredictable data. When clear,
+ CCPI I/O requests and MOC references are processed and sent to OCX where they are
+ ultimately discarded. RDDISOCI/WRDISOCI/IORDDISOCI/IOWRDISOCI interrupts occur if and only
+ if the corresponding ENAOCI\<node\> bit is clear. References to the local node (configured
+ via OCX_COM_NODE[ID]) ignore the value of ENAOCI\<node\> because no CCPI processing is
+ required. Similarly, all I/O references ignore the value of ENAOCI when
+ L2C_OCI_CTL[IOFRCL] is set. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t iofrcl : 1; /**< [ 6: 6](R/W) When set, L2C services all I/O read and write operations on the local node, regardless of
+ the value of the node ID bits in the physical address. During normal operation this bit is
+ expected to be 0. Will only transition from 1 to 0, never from 0 to 1. */
+ uint64_t lock_local_iob : 1; /**< [ 7: 7](R/W) When set, L2 atomic operations (excluding CAS/STC) initiated by IOB to remote addresses
+ which miss at the requester are performed locally on the requesting node. When clear the
+ operation instead sends the atomic request to be performed on the home node. For request
+ initiated by core for STC and CAS ops; see
+ [LOCK_LOCAL_PP]/[LOCK_LOCAL_STC]/[LOCK_LOCAL_CAS].
+ Default is set to 1 (local locks). */
+ uint64_t rldd_psha : 1; /**< [ 8: 8](R/W) When set, RLDD is assumed to return a shared response (PSHA). Default operation assumes an
+ exclusive response (PEMD). Note that an incorrect assumption only causes an extra tag
+ write to be done upon receiving the response. */
+ uint64_t cas_fdx : 1; /**< [ 9: 9](R/W) When set, L2 STC/CAS operations performed at the home will immediately bring the block
+ exclusive into the home. Default operation is to first request the block shared and only
+ invalidate the remote if the compare succeeds. */
+ uint64_t inv_mode : 2; /**< [ 11: 10](R/W) Describes how aggressive to be when waiting for local invalidates before sending CCPI
+ responses which act like commits at the remote.
+ 0x0 = Conservative mode, waits until all local invalidates have been sent by their
+ respective CBCs to the cores.
+ 0x1 = Moderate mode, waits until all local invalidates have been sent to their respective
+ CBCs, but not necessarily actually sent to the cores themselves.
+ 0x2 = Aggressive mode, does not wait for local invalidates to begin their processing. */
+ uint64_t shto : 1; /**< [ 12: 12](R/W/H) Use short timeout intervals. When set, core uses SDIDTTO for both DID and commit counter
+ timeouts, rather than DIDTTO/DIDTTO2. Similarly, L2C will use short instead of long
+ timeout. */
+ uint64_t shtoen : 3; /**< [ 15: 13](R/W) When set, if the corresponding CCPI link is down, the hardware sets [SHTO].
+ See OCX_COM_LINK(0..2)_CTL for a description of what events can contribute to the
+ link_down condition. */
+ uint64_t shtoioen : 1; /**< [ 16: 16](R/W) When set, any core issues any of an IO load, atomic, acking store, acking IOBADDR, or
+ acking LMTST to a node that doesn't exist (existence defined by the ENAOCI bits), then the
+ hardware sets [SHTO]. */
+ uint64_t shtolen : 5; /**< [ 21: 17](R/W) Selects the bit in the counter for the short timeout value (timeout used when [SHTO] is
+ set). Values supported are between 9 and 29 (for a timeout values between 2^9 and 2^29).
+ Actual timeout is between 1x and 2x this interval. For example if [SHTOLEN] = 14 (the
+ reset
+ value), the timeout is between 16K and 32K core clocks. Note: a value of 0 disables this
+ timer. */
+ uint64_t lngtolen : 5; /**< [ 26: 22](R/W) Selects the bit in the counter for the long timeout value (timeout used when [SHTO] is
+ clear). Values supported are between 11 and 29 (for a timeout values between 2^11 and
+ 2^29). Actual timeout is between 1x and 2x this interval. For example if [LNGTOLEN] = 28
+ (the reset value), the timeout is between 256M and 512M core clocks. Note: a value of 0
+ disables this timer. */
+ uint64_t lock_local_pp : 1; /**< [ 27: 27](R/W) When clear, L2 atomic operations (excluding CAS/STC) core initiated requests to remote
+ addresses which miss at the requester will send the atomic request to be performed on the
+ home node. Default operation will instead be performed locally on the requesting node.
+ For request initiated by IOB & for STC & CAS ops, see
+ [LOCK_LOCAL_IOB]/[LOCK_LOCAL_STC]/[LOCK_LOCAL_CAS]. */
+ uint64_t lock_local_stc : 1; /**< [ 28: 28](R/W) When set, L2 STC operations to remote addresses which miss at the requester will be
+ performed locally (if possible) on the requesting node. Default operation will instead
+ send the STC request to be performed on the home node. For CAS ops [LOCK_LOCAL_CAS]. */
+ uint64_t lock_local_cas : 1; /**< [ 29: 29](R/W) When set, L2 CAS operations to remote addresses which miss at the requester will be
+ performed locally (if possible) on the requesting node. Default operation will instead
+ send the CAS request to be performed on the home node. For STC ops [LOCK_LOCAL_STC]. */
+ uint64_t ncpend : 1; /**< [ 30: 30](RO/H) An indication that a node change is pending. Hardware sets this bit when
+ OCX_COM_NODE[ID] is changed and clears the bit when the node change has taken
+ effect. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_oci_ctl bdk_l2c_oci_ctl_t;
+
+#define BDK_L2C_OCI_CTL BDK_L2C_OCI_CTL_FUNC()
+static inline uint64_t BDK_L2C_OCI_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_OCI_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e080800020ll;
+ __bdk_csr_fatal("L2C_OCI_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_OCI_CTL bdk_l2c_oci_ctl_t
+#define bustype_BDK_L2C_OCI_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_OCI_CTL "L2C_OCI_CTL"
+#define device_bar_BDK_L2C_OCI_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_OCI_CTL 0
+#define arguments_BDK_L2C_OCI_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) l2c_qos_pp#
+ *
+ * L2C Core QOS Level Registers
+ */
+union bdk_l2c_qos_ppx
+{
+ uint64_t u;
+ struct bdk_l2c_qos_ppx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t lvl : 3; /**< [ 2: 0](R/W) QOS level to use for this core. */
+#else /* Word 0 - Little Endian */
+ uint64_t lvl : 3; /**< [ 2: 0](R/W) QOS level to use for this core. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_qos_ppx_s cn; */
+};
+typedef union bdk_l2c_qos_ppx bdk_l2c_qos_ppx_t;
+
+static inline uint64_t BDK_L2C_QOS_PPX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_QOS_PPX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e080880000ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x87e080880000ll + 8ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x87e080880000ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("L2C_QOS_PPX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_QOS_PPX(a) bdk_l2c_qos_ppx_t
+#define bustype_BDK_L2C_QOS_PPX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_QOS_PPX(a) "L2C_QOS_PPX"
+#define device_bar_BDK_L2C_QOS_PPX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_QOS_PPX(a) (a)
+#define arguments_BDK_L2C_QOS_PPX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_qos_wgt
+ *
+ * L2C QOS Weight Register
+ */
+union bdk_l2c_qos_wgt
+{
+ uint64_t u;
+ struct bdk_l2c_qos_wgt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wgt7 : 8; /**< [ 63: 56](R/W) Weight for QOS level 7. */
+ uint64_t wgt6 : 8; /**< [ 55: 48](R/W) Weight for QOS level 6. */
+ uint64_t wgt5 : 8; /**< [ 47: 40](R/W) Weight for QOS level 5. */
+ uint64_t wgt4 : 8; /**< [ 39: 32](R/W) Weight for QOS level 4. */
+ uint64_t wgt3 : 8; /**< [ 31: 24](R/W) Weight for QOS level 3. */
+ uint64_t wgt2 : 8; /**< [ 23: 16](R/W) Weight for QOS level 2. */
+ uint64_t wgt1 : 8; /**< [ 15: 8](R/W) Weight for QOS level 1. */
+ uint64_t wgt0 : 8; /**< [ 7: 0](R/W) Weight for QOS level 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t wgt0 : 8; /**< [ 7: 0](R/W) Weight for QOS level 0. */
+ uint64_t wgt1 : 8; /**< [ 15: 8](R/W) Weight for QOS level 1. */
+ uint64_t wgt2 : 8; /**< [ 23: 16](R/W) Weight for QOS level 2. */
+ uint64_t wgt3 : 8; /**< [ 31: 24](R/W) Weight for QOS level 3. */
+ uint64_t wgt4 : 8; /**< [ 39: 32](R/W) Weight for QOS level 4. */
+ uint64_t wgt5 : 8; /**< [ 47: 40](R/W) Weight for QOS level 5. */
+ uint64_t wgt6 : 8; /**< [ 55: 48](R/W) Weight for QOS level 6. */
+ uint64_t wgt7 : 8; /**< [ 63: 56](R/W) Weight for QOS level 7. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_qos_wgt_s cn; */
+};
+typedef union bdk_l2c_qos_wgt bdk_l2c_qos_wgt_t;
+
+#define BDK_L2C_QOS_WGT BDK_L2C_QOS_WGT_FUNC()
+static inline uint64_t BDK_L2C_QOS_WGT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_QOS_WGT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e080800008ll;
+ __bdk_csr_fatal("L2C_QOS_WGT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_QOS_WGT bdk_l2c_qos_wgt_t
+#define bustype_BDK_L2C_QOS_WGT BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_QOS_WGT "L2C_QOS_WGT"
+#define device_bar_BDK_L2C_QOS_WGT 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_QOS_WGT 0
+#define arguments_BDK_L2C_QOS_WGT -1,-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_dll
+ *
+ * L2C TAD DLL Observability Register
+ * This register provides the parameters for DLL observability.
+ */
+union bdk_l2c_tadx_dll
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_dll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t max_dll_setting : 12; /**< [ 59: 48](RO/H) Max reported DLL setting. */
+ uint64_t min_dll_setting : 12; /**< [ 47: 36](RO/H) Min reported DLL setting. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 35: 35](RO/H) Phase detector output. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 34: 34](RO/H) Phase detector output. */
+ uint64_t pdr_rclk_refclk : 1; /**< [ 33: 33](RO/H) Phase detector output. */
+ uint64_t reserved_32 : 1;
+ uint64_t dly_elem_enable : 16; /**< [ 31: 16](RO/H) Delay element enable. */
+ uint64_t dll_setting : 12; /**< [ 15: 4](RO/H) DLL setting. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t dll_lock : 1; /**< [ 0: 0](RO/H) DLL lock: 1 = locked, 0 = unlocked. */
+#else /* Word 0 - Little Endian */
+ uint64_t dll_lock : 1; /**< [ 0: 0](RO/H) DLL lock: 1 = locked, 0 = unlocked. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t dll_setting : 12; /**< [ 15: 4](RO/H) DLL setting. */
+ uint64_t dly_elem_enable : 16; /**< [ 31: 16](RO/H) Delay element enable. */
+ uint64_t reserved_32 : 1;
+ uint64_t pdr_rclk_refclk : 1; /**< [ 33: 33](RO/H) Phase detector output. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 34: 34](RO/H) Phase detector output. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 35: 35](RO/H) Phase detector output. */
+ uint64_t min_dll_setting : 12; /**< [ 47: 36](RO/H) Min reported DLL setting. */
+ uint64_t max_dll_setting : 12; /**< [ 59: 48](RO/H) Max reported DLL setting. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_dll_s cn; */
+};
+typedef union bdk_l2c_tadx_dll bdk_l2c_tadx_dll_t;
+
+static inline uint64_t BDK_L2C_TADX_DLL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_DLL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050030000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050030000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050030000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_DLL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_DLL(a) bdk_l2c_tadx_dll_t
+#define bustype_BDK_L2C_TADX_DLL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_DLL(a) "L2C_TADX_DLL"
+#define device_bar_BDK_L2C_TADX_DLL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_DLL(a) (a)
+#define arguments_BDK_L2C_TADX_DLL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_err
+ *
+ * L2C TAD Request Error Info Registers
+ * This register records error information for *DISOCI and *NXM
+ * interrupts. The NXM logic only applies to local addresses. A command for
+ * a remote address does not cause a [RDNXM]/[WRNXM] on the requesting node, but
+ * may on the remote node. Similarly, [RDDISOCI]/[WRDISOCI] is always for a remote
+ * address. The first [WRDISOCI]/[WRNXM] error will lock the register until the
+ * logged error type is cleared; [RDDISOCI]/[RDNXM] never locks the register.
+ */
+union bdk_l2c_tadx_err
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_err_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rdnxm : 1; /**< [ 63: 63](RO/H) Logged information is for a L2C_TAD()_INT_W1C[RDNXM] error. */
+ uint64_t wrnxm : 1; /**< [ 62: 62](RO/H) Logged information is for a L2C_TAD()_INT_W1C[WRNXM] error. */
+ uint64_t rddisoci : 1; /**< [ 61: 61](RO/H) Logged information is for a L2C_TAD()_INT_W1C[RDDISOCI] error. */
+ uint64_t wrdisoci : 1; /**< [ 60: 60](RO/H) Logged information is for a L2C_TAD()_INT_W1C[WRDISOCI] error. */
+ uint64_t nonsec : 1; /**< [ 59: 59](RO/H) Nonsecure (NS) bit of request causing error. */
+ uint64_t cmd : 8; /**< [ 58: 51](RO/H) Encoding of XMC or CCPI command causing error.
+ Internal:
+ If CMD\<7\>==1, use XMC_CMD_E to
+ decode CMD\<6:0\>. If CMD\<7:5\>==0, use OCI_MREQ_CMD_E to decode CMD\<4:0\>. If CMD\<7:5\>==1,
+ use OCI_MFWD_CMD_E to decode CMD\<4:0\>. If CMD\<7:5\>==2, use OCI_MRSP_CMD_E to decode
+ CMD\<4:0\>. */
+ uint64_t source : 7; /**< [ 50: 44](RO/H) XMC source of request causing error. If [SOURCE]\<6\>==0, then [SOURCE]\<5:0\> is
+ PPID, else [SOURCE]\<3:0\> is BUSID of the IOB which made the request. If
+ [CMD]\<7\>==0, this field is unpredictable. */
+ uint64_t reserved_42_43 : 2;
+ uint64_t node : 2; /**< [ 41: 40](RO/H) CCPI node of XMC request causing error. For *NXM errors [NODE] is always the node that
+ generated request causing the error (*NXM errors are logged at the home node). For *DISOCI
+ errors, is the NODE the request is directed to (DISOCI request is always the current
+ node). */
+ uint64_t addr : 40; /**< [ 39: 0](RO/H) XMC address causing the error. [ADDR]\<6:0\> is unpredictable for *DISOCI and *NXM
+ errors. This field is the physical address after index aliasing (if enabled). */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 40; /**< [ 39: 0](RO/H) XMC address causing the error. [ADDR]\<6:0\> is unpredictable for *DISOCI and *NXM
+ errors. This field is the physical address after index aliasing (if enabled). */
+ uint64_t node : 2; /**< [ 41: 40](RO/H) CCPI node of XMC request causing error. For *NXM errors [NODE] is always the node that
+ generated request causing the error (*NXM errors are logged at the home node). For *DISOCI
+ errors, is the NODE the request is directed to (DISOCI request is always the current
+ node). */
+ uint64_t reserved_42_43 : 2;
+ uint64_t source : 7; /**< [ 50: 44](RO/H) XMC source of request causing error. If [SOURCE]\<6\>==0, then [SOURCE]\<5:0\> is
+ PPID, else [SOURCE]\<3:0\> is BUSID of the IOB which made the request. If
+ [CMD]\<7\>==0, this field is unpredictable. */
+ uint64_t cmd : 8; /**< [ 58: 51](RO/H) Encoding of XMC or CCPI command causing error.
+ Internal:
+ If CMD\<7\>==1, use XMC_CMD_E to
+ decode CMD\<6:0\>. If CMD\<7:5\>==0, use OCI_MREQ_CMD_E to decode CMD\<4:0\>. If CMD\<7:5\>==1,
+ use OCI_MFWD_CMD_E to decode CMD\<4:0\>. If CMD\<7:5\>==2, use OCI_MRSP_CMD_E to decode
+ CMD\<4:0\>. */
+ uint64_t nonsec : 1; /**< [ 59: 59](RO/H) Nonsecure (NS) bit of request causing error. */
+ uint64_t wrdisoci : 1; /**< [ 60: 60](RO/H) Logged information is for a L2C_TAD()_INT_W1C[WRDISOCI] error. */
+ uint64_t rddisoci : 1; /**< [ 61: 61](RO/H) Logged information is for a L2C_TAD()_INT_W1C[RDDISOCI] error. */
+ uint64_t wrnxm : 1; /**< [ 62: 62](RO/H) Logged information is for a L2C_TAD()_INT_W1C[WRNXM] error. */
+ uint64_t rdnxm : 1; /**< [ 63: 63](RO/H) Logged information is for a L2C_TAD()_INT_W1C[RDNXM] error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_err_s cn; */
+};
+typedef union bdk_l2c_tadx_err bdk_l2c_tadx_err_t;
+
+static inline uint64_t BDK_L2C_TADX_ERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_ERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050060000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050060000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050060000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_ERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_ERR(a) bdk_l2c_tadx_err_t
+#define bustype_BDK_L2C_TADX_ERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_ERR(a) "L2C_TADX_ERR"
+#define device_bar_BDK_L2C_TADX_ERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_ERR(a) (a)
+#define arguments_BDK_L2C_TADX_ERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_pfc#
+ *
+ * L2C TAD Performance Counter Registers
+ */
+union bdk_l2c_tadx_pfcx
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_pfcx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_pfcx_s cn; */
+};
+typedef union bdk_l2c_tadx_pfcx bdk_l2c_tadx_pfcx_t;
+
+static inline uint64_t BDK_L2C_TADX_PFCX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_PFCX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
+ return 0x87e050010100ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e050010100ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=7) && (b<=3)))
+ return 0x87e050010100ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("L2C_TADX_PFCX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_PFCX(a,b) bdk_l2c_tadx_pfcx_t
+#define bustype_BDK_L2C_TADX_PFCX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_PFCX(a,b) "L2C_TADX_PFCX"
+#define device_bar_BDK_L2C_TADX_PFCX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_PFCX(a,b) (a)
+#define arguments_BDK_L2C_TADX_PFCX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_prf
+ *
+ * L2C TAD Performance Counter Control Registers
+ * All four counters are equivalent and can use any of the defined selects.
+ */
+union bdk_l2c_tadx_prf
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_prf_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt3sel : 8; /**< [ 31: 24](R/W) Selects event to count for L2C_TAD(0)_PFC(3). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt2sel : 8; /**< [ 23: 16](R/W) Selects event to count for L2C_TAD(0)_PFC(2). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt1sel : 8; /**< [ 15: 8](R/W) Selects event to count for L2C_TAD(0)_PFC(1). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt0sel : 8; /**< [ 7: 0](R/W) Selects event to count for L2C_TAD(0)_PFC(0). Enumerated by L2C_TAD_PRF_SEL_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt0sel : 8; /**< [ 7: 0](R/W) Selects event to count for L2C_TAD(0)_PFC(0). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt1sel : 8; /**< [ 15: 8](R/W) Selects event to count for L2C_TAD(0)_PFC(1). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt2sel : 8; /**< [ 23: 16](R/W) Selects event to count for L2C_TAD(0)_PFC(2). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt3sel : 8; /**< [ 31: 24](R/W) Selects event to count for L2C_TAD(0)_PFC(3). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_prf_s cn81xx; */
+ struct bdk_l2c_tadx_prf_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt3sel : 8; /**< [ 31: 24](R/W) Selects event to count for L2C_TAD(0..7)_PFC(3). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt2sel : 8; /**< [ 23: 16](R/W) Selects event to count for L2C_TAD(0..7)_PFC(2). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt1sel : 8; /**< [ 15: 8](R/W) Selects event to count for L2C_TAD(0..7)_PFC(1). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt0sel : 8; /**< [ 7: 0](R/W) Selects event to count for L2C_TAD(0..7)_PFC(0). Enumerated by L2C_TAD_PRF_SEL_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt0sel : 8; /**< [ 7: 0](R/W) Selects event to count for L2C_TAD(0..7)_PFC(0). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt1sel : 8; /**< [ 15: 8](R/W) Selects event to count for L2C_TAD(0..7)_PFC(1). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt2sel : 8; /**< [ 23: 16](R/W) Selects event to count for L2C_TAD(0..7)_PFC(2). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt3sel : 8; /**< [ 31: 24](R/W) Selects event to count for L2C_TAD(0..7)_PFC(3). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_l2c_tadx_prf_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt3sel : 8; /**< [ 31: 24](R/W) Selects event to count for L2C_TAD(0..3)_PFC(3). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt2sel : 8; /**< [ 23: 16](R/W) Selects event to count for L2C_TAD(0..3)_PFC(2). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt1sel : 8; /**< [ 15: 8](R/W) Selects event to count for L2C_TAD(0..3)_PFC(1). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt0sel : 8; /**< [ 7: 0](R/W) Selects event to count for L2C_TAD(0..3)_PFC(0). Enumerated by L2C_TAD_PRF_SEL_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt0sel : 8; /**< [ 7: 0](R/W) Selects event to count for L2C_TAD(0..3)_PFC(0). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt1sel : 8; /**< [ 15: 8](R/W) Selects event to count for L2C_TAD(0..3)_PFC(1). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt2sel : 8; /**< [ 23: 16](R/W) Selects event to count for L2C_TAD(0..3)_PFC(2). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t cnt3sel : 8; /**< [ 31: 24](R/W) Selects event to count for L2C_TAD(0..3)_PFC(3). Enumerated by L2C_TAD_PRF_SEL_E. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_l2c_tadx_prf bdk_l2c_tadx_prf_t;
+
+static inline uint64_t BDK_L2C_TADX_PRF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_PRF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050010000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050010000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050010000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_PRF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_PRF(a) bdk_l2c_tadx_prf_t
+#define bustype_BDK_L2C_TADX_PRF(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_PRF(a) "L2C_TADX_PRF"
+#define device_bar_BDK_L2C_TADX_PRF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_PRF(a) (a)
+#define arguments_BDK_L2C_TADX_PRF(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_stat
+ *
+ * L2C TAD Status Registers
+ * This register holds information about the instantaneous state of the TAD.
+ */
+union bdk_l2c_tadx_stat
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t lfb_valid_cnt : 6; /**< [ 13: 8](RO/H) The number of LFBs in use. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t vbf_inuse_cnt : 5; /**< [ 4: 0](RO/H) The number of MCI VBFs in use. */
+#else /* Word 0 - Little Endian */
+ uint64_t vbf_inuse_cnt : 5; /**< [ 4: 0](RO/H) The number of MCI VBFs in use. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t lfb_valid_cnt : 6; /**< [ 13: 8](RO/H) The number of LFBs in use. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_stat_s cn; */
+};
+typedef union bdk_l2c_tadx_stat bdk_l2c_tadx_stat_t;
+
+static inline uint64_t BDK_L2C_TADX_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050020008ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050020008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050020008ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_STAT(a) bdk_l2c_tadx_stat_t
+#define bustype_BDK_L2C_TADX_STAT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_STAT(a) "L2C_TADX_STAT"
+#define device_bar_BDK_L2C_TADX_STAT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_STAT(a) (a)
+#define arguments_BDK_L2C_TADX_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_tag
+ *
+ * L2C TAD Tag Data Registers
+ * This register holds the tag information for LTGL2I and STGL2I commands.
+ */
+union bdk_l2c_tadx_tag
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_tag_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sblkdty : 4; /**< [ 63: 60](R/W/H) Sub-block dirty bits. Ignored/loaded with 0 for RTG accesses. If [TS] is invalid
+ (0), [SBLKDTY] must be 0 or operation is undefined. */
+ uint64_t reserved_59 : 1;
+ uint64_t nonsec : 1; /**< [ 58: 58](R/W/H) Nonsecure bit. */
+ uint64_t businfo : 9; /**< [ 57: 49](R/W/H) The bus information bits. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t ecc : 7; /**< [ 48: 42](R/W/H) The tag ECC. This field is undefined if L2C_CTL[DISECC] is not 1 when the LTGL2I reads the tags. */
+ uint64_t reserved_6_41 : 36;
+ uint64_t node : 2; /**< [ 5: 4](RAZ) Reserved. */
+ uint64_t ts : 2; /**< [ 3: 2](R/W/H) The tag state.
+ 0x0 = Invalid.
+ 0x1 = Shared.
+ 0x2 = Exclusive.
+
+ Note that a local address will never have the value of exclusive as that state
+ is encoded as shared in the tag and invalid in the RTG. */
+ uint64_t used : 1; /**< [ 1: 1](R/W/H) The LRU use bit. If setting the [LOCK] bit, the USE bit should also be set or
+ the operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t lock : 1; /**< [ 0: 0](R/W/H) The lock bit. If setting the [LOCK] bit, the USE bit should also be set or the
+ operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+#else /* Word 0 - Little Endian */
+ uint64_t lock : 1; /**< [ 0: 0](R/W/H) The lock bit. If setting the [LOCK] bit, the USE bit should also be set or the
+ operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t used : 1; /**< [ 1: 1](R/W/H) The LRU use bit. If setting the [LOCK] bit, the USE bit should also be set or
+ the operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t ts : 2; /**< [ 3: 2](R/W/H) The tag state.
+ 0x0 = Invalid.
+ 0x1 = Shared.
+ 0x2 = Exclusive.
+
+ Note that a local address will never have the value of exclusive as that state
+ is encoded as shared in the tag and invalid in the RTG. */
+ uint64_t node : 2; /**< [ 5: 4](RAZ) Reserved. */
+ uint64_t reserved_6_41 : 36;
+ uint64_t ecc : 7; /**< [ 48: 42](R/W/H) The tag ECC. This field is undefined if L2C_CTL[DISECC] is not 1 when the LTGL2I reads the tags. */
+ uint64_t businfo : 9; /**< [ 57: 49](R/W/H) The bus information bits. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t nonsec : 1; /**< [ 58: 58](R/W/H) Nonsecure bit. */
+ uint64_t reserved_59 : 1;
+ uint64_t sblkdty : 4; /**< [ 63: 60](R/W/H) Sub-block dirty bits. Ignored/loaded with 0 for RTG accesses. If [TS] is invalid
+ (0), [SBLKDTY] must be 0 or operation is undefined. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_tadx_tag_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sblkdty : 4; /**< [ 63: 60](R/W/H) Sub-block dirty bits. Ignored/loaded with 0 for RTG accesses. If [TS] is invalid
+ (0), [SBLKDTY] must be 0 or operation is undefined. */
+ uint64_t reserved_59 : 1;
+ uint64_t nonsec : 1; /**< [ 58: 58](R/W/H) Nonsecure bit. */
+ uint64_t reserved_57 : 1;
+ uint64_t businfo : 8; /**< [ 56: 49](R/W/H) The bus information bits. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t ecc : 7; /**< [ 48: 42](R/W/H) The tag ECC. This field is undefined if L2C_CTL[DISECC] is not 1 when the LTGL2I reads the tags. */
+ uint64_t reserved_40_41 : 2;
+ uint64_t tag : 23; /**< [ 39: 17](R/W/H) The tag. TAG\<39:17\> is the corresponding bits from the L2C+LMC internal L2/DRAM byte
+ address. */
+ uint64_t reserved_6_16 : 11;
+ uint64_t node : 2; /**< [ 5: 4](RAZ) Reserved. */
+ uint64_t ts : 2; /**< [ 3: 2](R/W/H) The tag state.
+ 0x0 = Invalid.
+ 0x1 = Shared.
+ 0x2 = Exclusive.
+
+ Note that a local address will never have the value of exclusive as that state
+ is encoded as shared in the tag and invalid in the RTG. */
+ uint64_t used : 1; /**< [ 1: 1](R/W/H) The LRU use bit. If setting the [LOCK] bit, the USE bit should also be set or
+ the operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t lock : 1; /**< [ 0: 0](R/W/H) The lock bit. If setting the [LOCK] bit, the USE bit should also be set or the
+ operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+#else /* Word 0 - Little Endian */
+ uint64_t lock : 1; /**< [ 0: 0](R/W/H) The lock bit. If setting the [LOCK] bit, the USE bit should also be set or the
+ operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t used : 1; /**< [ 1: 1](R/W/H) The LRU use bit. If setting the [LOCK] bit, the USE bit should also be set or
+ the operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t ts : 2; /**< [ 3: 2](R/W/H) The tag state.
+ 0x0 = Invalid.
+ 0x1 = Shared.
+ 0x2 = Exclusive.
+
+ Note that a local address will never have the value of exclusive as that state
+ is encoded as shared in the tag and invalid in the RTG. */
+ uint64_t node : 2; /**< [ 5: 4](RAZ) Reserved. */
+ uint64_t reserved_6_16 : 11;
+ uint64_t tag : 23; /**< [ 39: 17](R/W/H) The tag. TAG\<39:17\> is the corresponding bits from the L2C+LMC internal L2/DRAM byte
+ address. */
+ uint64_t reserved_40_41 : 2;
+ uint64_t ecc : 7; /**< [ 48: 42](R/W/H) The tag ECC. This field is undefined if L2C_CTL[DISECC] is not 1 when the LTGL2I reads the tags. */
+ uint64_t businfo : 8; /**< [ 56: 49](R/W/H) The bus information bits. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t reserved_57 : 1;
+ uint64_t nonsec : 1; /**< [ 58: 58](R/W/H) Nonsecure bit. */
+ uint64_t reserved_59 : 1;
+ uint64_t sblkdty : 4; /**< [ 63: 60](R/W/H) Sub-block dirty bits. Ignored/loaded with 0 for RTG accesses. If [TS] is invalid
+ (0), [SBLKDTY] must be 0 or operation is undefined. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_tadx_tag_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sblkdty : 4; /**< [ 63: 60](R/W/H) Sub-block dirty bits. Ignored/loaded with 0 for RTG accesses. If [TS] is invalid
+ (0), [SBLKDTY] must be 0 or operation is undefined. */
+ uint64_t reserved_59 : 1;
+ uint64_t nonsec : 1; /**< [ 58: 58](R/W/H) Nonsecure bit. */
+ uint64_t businfo : 9; /**< [ 57: 49](R/W/H) The bus information bits. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t ecc : 7; /**< [ 48: 42](R/W/H) The tag ECC. This field is undefined if L2C_CTL[DISECC] is not 1 when the LTGL2I reads the tags. */
+ uint64_t tag : 22; /**< [ 41: 20](R/W/H) The tag. TAG\<39:20\> is the corresponding bits from the L2C+LMC internal L2/DRAM byte
+ address. TAG\<41:40\> is the CCPI node of the address. The RTG must always have the
+ TAG\<41:40\> equal to the current node or operation is undefined. */
+ uint64_t reserved_6_19 : 14;
+ uint64_t node : 2; /**< [ 5: 4](R/W/H) The node ID for the remote node which holds this block. Ignored/loaded with 0 for TAG accesses. */
+ uint64_t ts : 2; /**< [ 3: 2](R/W/H) The tag state.
+ 0x0 = Invalid.
+ 0x1 = Shared.
+ 0x2 = Exclusive.
+
+ Note that a local address will never have the value of exclusive as that state
+ is encoded as shared in the tag and invalid in the RTG. */
+ uint64_t used : 1; /**< [ 1: 1](R/W/H) The LRU use bit. If setting the [LOCK] bit, the USE bit should also be set or
+ the operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t lock : 1; /**< [ 0: 0](R/W/H) The lock bit. If setting the [LOCK] bit, the USE bit should also be set or the
+ operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+#else /* Word 0 - Little Endian */
+ uint64_t lock : 1; /**< [ 0: 0](R/W/H) The lock bit. If setting the [LOCK] bit, the USE bit should also be set or the
+ operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t used : 1; /**< [ 1: 1](R/W/H) The LRU use bit. If setting the [LOCK] bit, the USE bit should also be set or
+ the operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t ts : 2; /**< [ 3: 2](R/W/H) The tag state.
+ 0x0 = Invalid.
+ 0x1 = Shared.
+ 0x2 = Exclusive.
+
+ Note that a local address will never have the value of exclusive as that state
+ is encoded as shared in the tag and invalid in the RTG. */
+ uint64_t node : 2; /**< [ 5: 4](R/W/H) The node ID for the remote node which holds this block. Ignored/loaded with 0 for TAG accesses. */
+ uint64_t reserved_6_19 : 14;
+ uint64_t tag : 22; /**< [ 41: 20](R/W/H) The tag. TAG\<39:20\> is the corresponding bits from the L2C+LMC internal L2/DRAM byte
+ address. TAG\<41:40\> is the CCPI node of the address. The RTG must always have the
+ TAG\<41:40\> equal to the current node or operation is undefined. */
+ uint64_t ecc : 7; /**< [ 48: 42](R/W/H) The tag ECC. This field is undefined if L2C_CTL[DISECC] is not 1 when the LTGL2I reads the tags. */
+ uint64_t businfo : 9; /**< [ 57: 49](R/W/H) The bus information bits. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t nonsec : 1; /**< [ 58: 58](R/W/H) Nonsecure bit. */
+ uint64_t reserved_59 : 1;
+ uint64_t sblkdty : 4; /**< [ 63: 60](R/W/H) Sub-block dirty bits. Ignored/loaded with 0 for RTG accesses. If [TS] is invalid
+ (0), [SBLKDTY] must be 0 or operation is undefined. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_l2c_tadx_tag_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sblkdty : 4; /**< [ 63: 60](R/W/H) Sub-block dirty bits. Ignored/loaded with 0 for RTG accesses. If [TS] is invalid
+ (0), [SBLKDTY] must be 0 or operation is undefined. */
+ uint64_t reserved_59 : 1;
+ uint64_t nonsec : 1; /**< [ 58: 58](R/W/H) Nonsecure bit. */
+ uint64_t businfo : 9; /**< [ 57: 49](R/W/H) The bus information bits. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t ecc : 7; /**< [ 48: 42](R/W/H) The tag ECC. This field is undefined if L2C_CTL[DISECC] is not 1 when the LTGL2I reads the tags. */
+ uint64_t reserved_40_41 : 2;
+ uint64_t tag : 22; /**< [ 39: 18](R/W/H) The tag. TAG\<39:18\> is the corresponding bits from the L2C+LMC internal L2/DRAM byte
+ address. */
+ uint64_t reserved_6_17 : 12;
+ uint64_t node : 2; /**< [ 5: 4](RAZ) Reserved. */
+ uint64_t ts : 2; /**< [ 3: 2](R/W/H) The tag state.
+ 0x0 = Invalid.
+ 0x1 = Shared.
+ 0x2 = Exclusive.
+
+ Note that a local address will never have the value of exclusive as that state
+ is encoded as shared in the tag and invalid in the RTG. */
+ uint64_t used : 1; /**< [ 1: 1](R/W/H) The LRU use bit. If setting the [LOCK] bit, the USE bit should also be set or
+ the operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t lock : 1; /**< [ 0: 0](R/W/H) The lock bit. If setting the [LOCK] bit, the USE bit should also be set or the
+ operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+#else /* Word 0 - Little Endian */
+ uint64_t lock : 1; /**< [ 0: 0](R/W/H) The lock bit. If setting the [LOCK] bit, the USE bit should also be set or the
+ operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t used : 1; /**< [ 1: 1](R/W/H) The LRU use bit. If setting the [LOCK] bit, the USE bit should also be set or
+ the operation is undefined. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t ts : 2; /**< [ 3: 2](R/W/H) The tag state.
+ 0x0 = Invalid.
+ 0x1 = Shared.
+ 0x2 = Exclusive.
+
+ Note that a local address will never have the value of exclusive as that state
+ is encoded as shared in the tag and invalid in the RTG. */
+ uint64_t node : 2; /**< [ 5: 4](RAZ) Reserved. */
+ uint64_t reserved_6_17 : 12;
+ uint64_t tag : 22; /**< [ 39: 18](R/W/H) The tag. TAG\<39:18\> is the corresponding bits from the L2C+LMC internal L2/DRAM byte
+ address. */
+ uint64_t reserved_40_41 : 2;
+ uint64_t ecc : 7; /**< [ 48: 42](R/W/H) The tag ECC. This field is undefined if L2C_CTL[DISECC] is not 1 when the LTGL2I reads the tags. */
+ uint64_t businfo : 9; /**< [ 57: 49](R/W/H) The bus information bits. Ignored/loaded with 0 for RTG accesses. */
+ uint64_t nonsec : 1; /**< [ 58: 58](R/W/H) Nonsecure bit. */
+ uint64_t reserved_59 : 1;
+ uint64_t sblkdty : 4; /**< [ 63: 60](R/W/H) Sub-block dirty bits. Ignored/loaded with 0 for RTG accesses. If [TS] is invalid
+ (0), [SBLKDTY] must be 0 or operation is undefined. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_l2c_tadx_tag bdk_l2c_tadx_tag_t;
+
+static inline uint64_t BDK_L2C_TADX_TAG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_TAG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050020000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050020000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050020000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_TAG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_TAG(a) bdk_l2c_tadx_tag_t
+#define bustype_BDK_L2C_TADX_TAG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_TAG(a) "L2C_TADX_TAG"
+#define device_bar_BDK_L2C_TADX_TAG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_TAG(a) (a)
+#define arguments_BDK_L2C_TADX_TAG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_timeout
+ *
+ * L2C TAD LFB Timeout Info Registers
+ * This register records error information for an LFBTO (LFB TimeOut). The first LFBTO error will
+ * lock the register until the logged error type s cleared. If multiple LFBs timed out
+ * simultaneously, then this will contain the information from the lowest LFB number that has
+ * timed-out. The address can be for the original transaction address or the replacement address
+ * (if both could have timed out, then the transaction address will be here).
+ */
+union bdk_l2c_tadx_timeout
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_timeout_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t infolfb : 1; /**< [ 63: 63](RO/H) Logged address information is for the LFB original transation. */
+ uint64_t infovab : 1; /**< [ 62: 62](RO/H) Logged address information is for the VAB (replacement). If both this and
+ [INFOLFB] is set, then both could have timed out, but info captured is from the
+ original LFB. */
+ uint64_t reserved_57_61 : 5;
+ uint64_t lfbnum : 5; /**< [ 56: 52](RO/H) The LFB number of the entry that timed out, and have its info captures in this register. */
+ uint64_t cmd : 8; /**< [ 51: 44](RO/H) Encoding of XMC or CCPI command causing error.
+ Internal:
+ If CMD\<7\>==1, use XMC_CMD_E to decode CMD\<6:0\>. If CMD\<7:5\>==0, use
+ OCI_MREQ_CMD_E to
+ decode CMD\<4:0\>. If CMD\<7:5\>==1, use OCI_MFWD_CMD_E to decode CMD\<4:0\>. If CMD\<7:5\>==2,
+ use OCI_MRSP_CMD_E to decode CMD\<4:0\>. */
+ uint64_t reserved_42_43 : 2;
+ uint64_t node : 2; /**< [ 41: 40](RO/H) Home node of the address causing the error. Similar to [ADDR] below, this can be the
+ request address (if [INFOLFB] is set), else it is the replacement address (if [INFOLFB] is
+ clear & [INFOVAB] is set). */
+ uint64_t addr : 33; /**< [ 39: 7](RO/H) Cache line address causing the error. This can be either the request address or
+ the replacement (if [INFOLFB] is set), else it is the replacement address (if
+ [INFOLFB] is clear & [INFOVAB] is set). This address is a physical address. L2C
+ performs index aliasing (if enabled) on the written address and uses that for
+ the command. This index-aliased address is what is returned on a read of
+ L2C_XMC_CMD. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t addr : 33; /**< [ 39: 7](RO/H) Cache line address causing the error. This can be either the request address or
+ the replacement (if [INFOLFB] is set), else it is the replacement address (if
+ [INFOLFB] is clear & [INFOVAB] is set). This address is a physical address. L2C
+ performs index aliasing (if enabled) on the written address and uses that for
+ the command. This index-aliased address is what is returned on a read of
+ L2C_XMC_CMD. */
+ uint64_t node : 2; /**< [ 41: 40](RO/H) Home node of the address causing the error. Similar to [ADDR] below, this can be the
+ request address (if [INFOLFB] is set), else it is the replacement address (if [INFOLFB] is
+ clear & [INFOVAB] is set). */
+ uint64_t reserved_42_43 : 2;
+ uint64_t cmd : 8; /**< [ 51: 44](RO/H) Encoding of XMC or CCPI command causing error.
+ Internal:
+ If CMD\<7\>==1, use XMC_CMD_E to decode CMD\<6:0\>. If CMD\<7:5\>==0, use
+ OCI_MREQ_CMD_E to
+ decode CMD\<4:0\>. If CMD\<7:5\>==1, use OCI_MFWD_CMD_E to decode CMD\<4:0\>. If CMD\<7:5\>==2,
+ use OCI_MRSP_CMD_E to decode CMD\<4:0\>. */
+ uint64_t lfbnum : 5; /**< [ 56: 52](RO/H) The LFB number of the entry that timed out, and have its info captures in this register. */
+ uint64_t reserved_57_61 : 5;
+ uint64_t infovab : 1; /**< [ 62: 62](RO/H) Logged address information is for the VAB (replacement). If both this and
+ [INFOLFB] is set, then both could have timed out, but info captured is from the
+ original LFB. */
+ uint64_t infolfb : 1; /**< [ 63: 63](RO/H) Logged address information is for the LFB original transation. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_timeout_s cn; */
+};
+typedef union bdk_l2c_tadx_timeout bdk_l2c_tadx_timeout_t;
+
+static inline uint64_t BDK_L2C_TADX_TIMEOUT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_TIMEOUT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050050100ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050050100ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050050100ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_TIMEOUT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_TIMEOUT(a) bdk_l2c_tadx_timeout_t
+#define bustype_BDK_L2C_TADX_TIMEOUT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_TIMEOUT(a) "L2C_TADX_TIMEOUT"
+#define device_bar_BDK_L2C_TADX_TIMEOUT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_TIMEOUT(a) (a)
+#define arguments_BDK_L2C_TADX_TIMEOUT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_timetwo
+ *
+ * L2C TAD LFB Timeout Count Registers
+ * This register records the number of LFB entries that have timed out.
+ */
+union bdk_l2c_tadx_timetwo
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_timetwo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t sid : 4; /**< [ 32: 29](RO/H) Source id of the original request, that is 'source' of request. This is only valid if the
+ request is a local request (valid if L2C_TAD()_TIMEOUT[CMD] is an XMC request and not
+ relevant if it is an CCPI request). */
+ uint64_t busid : 4; /**< [ 28: 25](RO/H) Busid of the original request, that is 'source' of request. */
+ uint64_t vabst : 3; /**< [ 24: 22](RO/H) This is the LFB internal state if INFOLFB is set, else will contain VAB internal state if
+ INFOVAB is set. */
+ uint64_t lfbst : 14; /**< [ 21: 8](RO/H) This is the LFB internal state if INFOLFB is set, else will contain VAB internal state if
+ INFOVAB is set. */
+ uint64_t tocnt : 8; /**< [ 7: 0](RO/H) This is a running count of the LFB that has timed out ... the count will saturate at 0xFF.
+ Will clear when the LFBTO interrupt is cleared. */
+#else /* Word 0 - Little Endian */
+ uint64_t tocnt : 8; /**< [ 7: 0](RO/H) This is a running count of the LFB that has timed out ... the count will saturate at 0xFF.
+ Will clear when the LFBTO interrupt is cleared. */
+ uint64_t lfbst : 14; /**< [ 21: 8](RO/H) This is the LFB internal state if INFOLFB is set, else will contain VAB internal state if
+ INFOVAB is set. */
+ uint64_t vabst : 3; /**< [ 24: 22](RO/H) This is the LFB internal state if INFOLFB is set, else will contain VAB internal state if
+ INFOVAB is set. */
+ uint64_t busid : 4; /**< [ 28: 25](RO/H) Busid of the original request, that is 'source' of request. */
+ uint64_t sid : 4; /**< [ 32: 29](RO/H) Source id of the original request, that is 'source' of request. This is only valid if the
+ request is a local request (valid if L2C_TAD()_TIMEOUT[CMD] is an XMC request and not
+ relevant if it is an CCPI request). */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_timetwo_s cn; */
+};
+typedef union bdk_l2c_tadx_timetwo bdk_l2c_tadx_timetwo_t;
+
+static inline uint64_t BDK_L2C_TADX_TIMETWO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_TIMETWO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050050000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050050000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050050000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_TIMETWO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_TIMETWO(a) bdk_l2c_tadx_timetwo_t
+#define bustype_BDK_L2C_TADX_TIMETWO(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_TIMETWO(a) "L2C_TADX_TIMETWO"
+#define device_bar_BDK_L2C_TADX_TIMETWO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_TIMETWO(a) (a)
+#define arguments_BDK_L2C_TADX_TIMETWO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad_ctl
+ *
+ * L2C TAD Control Register
+ * In CNXXXX, [MAXLFB], EXLRQ, EXRRQ, EXFWD, EXVIC refer to half-TAD
+ * LFBs/VABs. Therefore, even though there are 32 LFBs/VABs in a full TAD, the number
+ * applies to both halves.
+ * * If [MAXLFB] is != 0, [VBF_THRESH] should be less than [MAXLFB].
+ * * If [MAXVBF] is != 0, [VBF_THRESH] should be less than [MAXVBF].
+ * * If [MAXLFB] != 0, EXLRQ + EXRRQ + EXFWD + EXVIC must be less than or equal to [MAXLFB] - 4.
+ * * If [MAXLFB] == 0, EXLRQ + EXRRQ + EXFWD + EXVIC must be less than or equal to 12.
+ */
+union bdk_l2c_tad_ctl
+{
+ uint64_t u;
+ struct bdk_l2c_tad_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t frcnalc : 1; /**< [ 32: 32](R/W) When set, all cache accesses are forced to not allocate in the local L2. */
+ uint64_t disrstp : 1; /**< [ 31: 31](RO) Reserved. */
+ uint64_t wtlmcwrdn : 1; /**< [ 30: 30](R/W) Be more conservative with LFB done relative to LMC writes. */
+ uint64_t wtinvdn : 1; /**< [ 29: 29](R/W) Be more conservative with LFB done relative to invalidates. */
+ uint64_t wtfilldn : 1; /**< [ 28: 28](R/W) Be more conservative with LFB done relative to fills. */
+ uint64_t exlrq : 4; /**< [ 27: 24](RO) Reserved. */
+ uint64_t exrrq : 4; /**< [ 23: 20](RO) Reserved. */
+ uint64_t exfwd : 4; /**< [ 19: 16](RO) Reserved. */
+ uint64_t exvic : 4; /**< [ 15: 12](RO) Reserved. */
+ uint64_t vbf_thresh : 4; /**< [ 11: 8](R/W) VBF threshold. When the number of in-use VBFs exceeds this number the L2C TAD increases
+ the priority of all its write operations in the LMC. */
+ uint64_t maxvbf : 4; /**< [ 7: 4](R/W) Maximum VBFs in use at once (0 means 16, 1-15 as expected). */
+ uint64_t maxlfb : 4; /**< [ 3: 0](R/W) Maximum VABs/LFBs in use at once (0 means 16, 1-15 as expected). */
+#else /* Word 0 - Little Endian */
+ uint64_t maxlfb : 4; /**< [ 3: 0](R/W) Maximum VABs/LFBs in use at once (0 means 16, 1-15 as expected). */
+ uint64_t maxvbf : 4; /**< [ 7: 4](R/W) Maximum VBFs in use at once (0 means 16, 1-15 as expected). */
+ uint64_t vbf_thresh : 4; /**< [ 11: 8](R/W) VBF threshold. When the number of in-use VBFs exceeds this number the L2C TAD increases
+ the priority of all its write operations in the LMC. */
+ uint64_t exvic : 4; /**< [ 15: 12](RO) Reserved. */
+ uint64_t exfwd : 4; /**< [ 19: 16](RO) Reserved. */
+ uint64_t exrrq : 4; /**< [ 23: 20](RO) Reserved. */
+ uint64_t exlrq : 4; /**< [ 27: 24](RO) Reserved. */
+ uint64_t wtfilldn : 1; /**< [ 28: 28](R/W) Be more conservative with LFB done relative to fills. */
+ uint64_t wtinvdn : 1; /**< [ 29: 29](R/W) Be more conservative with LFB done relative to invalidates. */
+ uint64_t wtlmcwrdn : 1; /**< [ 30: 30](R/W) Be more conservative with LFB done relative to LMC writes. */
+ uint64_t disrstp : 1; /**< [ 31: 31](RO) Reserved. */
+ uint64_t frcnalc : 1; /**< [ 32: 32](R/W) When set, all cache accesses are forced to not allocate in the local L2. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tad_ctl_s cn81xx; */
+ struct bdk_l2c_tad_ctl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t frcnalc : 1; /**< [ 32: 32](R/W) When set, all cache accesses are forced to not allocate in the local L2. */
+ uint64_t disrstp : 1; /**< [ 31: 31](R/W) When set, if the L2 receives an RSTP XMC command, it treats it as a STP. */
+ uint64_t wtlmcwrdn : 1; /**< [ 30: 30](R/W) Be more conservative with LFB done relative to LMC writes. */
+ uint64_t wtinvdn : 1; /**< [ 29: 29](R/W) Be more conservative with LFB done relative to invalidates. */
+ uint64_t wtfilldn : 1; /**< [ 28: 28](R/W) Be more conservative with LFB done relative to fills. */
+ uint64_t exlrq : 4; /**< [ 27: 24](R/W) Extra LFBs to reserve for locally generated XMC commands. None are reserved for functional
+ correctness. Ignored if L2C_OCI_CTL[ENAOCI] is 0. */
+ uint64_t exrrq : 4; /**< [ 23: 20](R/W) Extra LFBs to reserve for Rxxx CCPI commands beyond the 1 required for CCPI protocol
+ functional correctness. Ignored if L2C_OCI_CTL[ENAOCI] is 0. */
+ uint64_t exfwd : 4; /**< [ 19: 16](R/W) Extra LFBs to reserve for Fxxx/SINV CCPI commands beyond the 1 required for CCPI protocol
+ functional correctness. Ignored if L2C_OCI_CTL[ENAOCI] is 0. */
+ uint64_t exvic : 4; /**< [ 15: 12](R/W) Extra LFBs to reserve for VICx CCPI commands beyond the 1 required for CCPI protocol
+ functional correctness. Ignored if L2C_OCI_CTL[ENAOCI] is 0. */
+ uint64_t vbf_thresh : 4; /**< [ 11: 8](R/W) VBF threshold. When the number of in-use VBFs exceeds this number the L2C TAD increases
+ the priority of all its write operations in the LMC. */
+ uint64_t maxvbf : 4; /**< [ 7: 4](R/W) Maximum VBFs in use at once (0 means 16, 1-15 as expected). */
+ uint64_t maxlfb : 4; /**< [ 3: 0](R/W) Maximum VABs/LFBs in use at once (0 means 16, 1-15 as expected). */
+#else /* Word 0 - Little Endian */
+ uint64_t maxlfb : 4; /**< [ 3: 0](R/W) Maximum VABs/LFBs in use at once (0 means 16, 1-15 as expected). */
+ uint64_t maxvbf : 4; /**< [ 7: 4](R/W) Maximum VBFs in use at once (0 means 16, 1-15 as expected). */
+ uint64_t vbf_thresh : 4; /**< [ 11: 8](R/W) VBF threshold. When the number of in-use VBFs exceeds this number the L2C TAD increases
+ the priority of all its write operations in the LMC. */
+ uint64_t exvic : 4; /**< [ 15: 12](R/W) Extra LFBs to reserve for VICx CCPI commands beyond the 1 required for CCPI protocol
+ functional correctness. Ignored if L2C_OCI_CTL[ENAOCI] is 0. */
+ uint64_t exfwd : 4; /**< [ 19: 16](R/W) Extra LFBs to reserve for Fxxx/SINV CCPI commands beyond the 1 required for CCPI protocol
+ functional correctness. Ignored if L2C_OCI_CTL[ENAOCI] is 0. */
+ uint64_t exrrq : 4; /**< [ 23: 20](R/W) Extra LFBs to reserve for Rxxx CCPI commands beyond the 1 required for CCPI protocol
+ functional correctness. Ignored if L2C_OCI_CTL[ENAOCI] is 0. */
+ uint64_t exlrq : 4; /**< [ 27: 24](R/W) Extra LFBs to reserve for locally generated XMC commands. None are reserved for functional
+ correctness. Ignored if L2C_OCI_CTL[ENAOCI] is 0. */
+ uint64_t wtfilldn : 1; /**< [ 28: 28](R/W) Be more conservative with LFB done relative to fills. */
+ uint64_t wtinvdn : 1; /**< [ 29: 29](R/W) Be more conservative with LFB done relative to invalidates. */
+ uint64_t wtlmcwrdn : 1; /**< [ 30: 30](R/W) Be more conservative with LFB done relative to LMC writes. */
+ uint64_t disrstp : 1; /**< [ 31: 31](R/W) When set, if the L2 receives an RSTP XMC command, it treats it as a STP. */
+ uint64_t frcnalc : 1; /**< [ 32: 32](R/W) When set, all cache accesses are forced to not allocate in the local L2. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_l2c_tad_ctl_s cn83xx; */
+};
+typedef union bdk_l2c_tad_ctl bdk_l2c_tad_ctl_t;
+
+#define BDK_L2C_TAD_CTL BDK_L2C_TAD_CTL_FUNC()
+static inline uint64_t BDK_L2C_TAD_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TAD_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e080800018ll;
+ __bdk_csr_fatal("L2C_TAD_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TAD_CTL bdk_l2c_tad_ctl_t
+#define bustype_BDK_L2C_TAD_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TAD_CTL "L2C_TAD_CTL"
+#define device_bar_BDK_L2C_TAD_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TAD_CTL 0
+#define arguments_BDK_L2C_TAD_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) l2c_wpar_iob#
+ *
+ * L2C IOB Way Partitioning Registers
+ */
+union bdk_l2c_wpar_iobx
+{
+ uint64_t u;
+ struct bdk_l2c_wpar_iobx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< [ 15: 0](R/W/H) Way partitioning mask (1 means do not use). The read value of [MASK] includes bits set
+ because of the L2C cripple fuses. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 16; /**< [ 15: 0](R/W/H) Way partitioning mask (1 means do not use). The read value of [MASK] includes bits set
+ because of the L2C cripple fuses. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_wpar_iobx_s cn; */
+};
+typedef union bdk_l2c_wpar_iobx bdk_l2c_wpar_iobx_t;
+
+static inline uint64_t BDK_L2C_WPAR_IOBX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_WPAR_IOBX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=15))
+ return 0x87e080840200ll + 8ll * ((a) & 0xf);
+ __bdk_csr_fatal("L2C_WPAR_IOBX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_WPAR_IOBX(a) bdk_l2c_wpar_iobx_t
+#define bustype_BDK_L2C_WPAR_IOBX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_WPAR_IOBX(a) "L2C_WPAR_IOBX"
+#define device_bar_BDK_L2C_WPAR_IOBX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_WPAR_IOBX(a) (a)
+#define arguments_BDK_L2C_WPAR_IOBX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_wpar_pp#
+ *
+ * L2C Core Way Partitioning Registers
+ */
+union bdk_l2c_wpar_ppx
+{
+ uint64_t u;
+ struct bdk_l2c_wpar_ppx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t mask : 16; /**< [ 15: 0](R/W/H) Way partitioning mask (1 means do not use). The read value of [MASK] includes bits set
+ because of the L2C cripple fuses. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 16; /**< [ 15: 0](R/W/H) Way partitioning mask (1 means do not use). The read value of [MASK] includes bits set
+ because of the L2C cripple fuses. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_wpar_ppx_s cn; */
+};
+typedef union bdk_l2c_wpar_ppx bdk_l2c_wpar_ppx_t;
+
+static inline uint64_t BDK_L2C_WPAR_PPX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_WPAR_PPX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e080840000ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x87e080840000ll + 8ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x87e080840000ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("L2C_WPAR_PPX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_WPAR_PPX(a) bdk_l2c_wpar_ppx_t
+#define bustype_BDK_L2C_WPAR_PPX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_WPAR_PPX(a) "L2C_WPAR_PPX"
+#define device_bar_BDK_L2C_WPAR_PPX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_WPAR_PPX(a) (a)
+#define arguments_BDK_L2C_WPAR_PPX(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_L2C_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c_cbc.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c_cbc.h
new file mode 100644
index 0000000000..80da3a1588
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c_cbc.h
@@ -0,0 +1,1282 @@
+#ifndef __BDK_CSRS_L2C_CBC_H__
+#define __BDK_CSRS_L2C_CBC_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium L2C_CBC.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration l2c_cbc_bar_e
+ *
+ * L2C CBC Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_L2C_CBC_BAR_E_L2C_CBCX_PF_BAR0(a) (0x87e058000000ll + 0x1000000ll * (a))
+#define BDK_L2C_CBC_BAR_E_L2C_CBCX_PF_BAR0_SIZE 0x800000ull
+#define BDK_L2C_CBC_BAR_E_L2C_CBCX_PF_BAR4(a) (0x87e058f00000ll + 0x1000000ll * (a))
+#define BDK_L2C_CBC_BAR_E_L2C_CBCX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration l2c_cbc_int_vec_e
+ *
+ * L2C CBC MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_L2C_CBC_INT_VEC_E_INTS (0)
+
+/**
+ * Register (RSL) l2c_cbc#_int_ena_w1c
+ *
+ * L2C CBC Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_l2c_cbcx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_cbcx_int_ena_w1c_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t reserved_8 : 1;
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t reserved_8 : 1;
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_l2c_cbcx_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_CBC(0)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_cbcx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_l2c_cbcx_int_ena_w1c_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_CBC(0..3)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_cbcx_int_ena_w1c bdk_l2c_cbcx_int_ena_w1c_t;
+
+static inline uint64_t BDK_L2C_CBCX_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058060020ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058060020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058060020ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_INT_ENA_W1C(a) bdk_l2c_cbcx_int_ena_w1c_t
+#define bustype_BDK_L2C_CBCX_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_INT_ENA_W1C(a) "L2C_CBCX_INT_ENA_W1C"
+#define device_bar_BDK_L2C_CBCX_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_INT_ENA_W1C(a) (a)
+#define arguments_BDK_L2C_CBCX_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_int_ena_w1s
+ *
+ * L2C CBC Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_l2c_cbcx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_cbcx_int_ena_w1s_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t reserved_8 : 1;
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t reserved_8 : 1;
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_l2c_cbcx_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_CBC(0)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_cbcx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_l2c_cbcx_int_ena_w1s_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_CBC(0..3)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_cbcx_int_ena_w1s bdk_l2c_cbcx_int_ena_w1s_t;
+
+static inline uint64_t BDK_L2C_CBCX_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058060028ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058060028ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058060028ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_INT_ENA_W1S(a) bdk_l2c_cbcx_int_ena_w1s_t
+#define bustype_BDK_L2C_CBCX_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_INT_ENA_W1S(a) "L2C_CBCX_INT_ENA_W1S"
+#define device_bar_BDK_L2C_CBCX_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_INT_ENA_W1S(a) (a)
+#define arguments_BDK_L2C_CBCX_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_int_w1c
+ *
+ * L2C CBC Interrupt Registers
+ * This register is for CBC-based interrupts.
+ */
+union bdk_l2c_cbcx_int_w1c
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_int_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Global sync timeout. Should never assert, for diagnostic use only.
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Illegal I/O write operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. See L2C_CBC()_IODISOCIERR for logged information. This interrupt applies
+ to IOBST, IOBSTP, IOBADDR, IASET, IACLR, IAADD, IASWP, IACAS, and LMTST XMC
+ commands.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Illegal I/O read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. See L2C_CBC()_IODISOCIERR for logged information. This interrupt applies
+ to IOBLD, IASET, IACLR, IAADD, IASWP, and IACAS XMC commands.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) MIB double-bit error occurred. See L2C_CBC()_MIBERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) MIB single-bit error occurred. See L2C_CBC()_MIBERR for logged
+ information. Hardware corrected the failure. Software may choose to count these
+ single-bit errors. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) RSD double-bit error occurred. See L2C_CBC()_RSDERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) RSD single-bit error occurred. See L2C_CBC()_RSDERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) RSD single-bit error occurred. See L2C_CBC()_RSDERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) RSD double-bit error occurred. See L2C_CBC()_RSDERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) MIB single-bit error occurred. See L2C_CBC()_MIBERR for logged
+ information. Hardware corrected the failure. Software may choose to count these
+ single-bit errors. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) MIB double-bit error occurred. See L2C_CBC()_MIBERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Illegal I/O read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. See L2C_CBC()_IODISOCIERR for logged information. This interrupt applies
+ to IOBLD, IASET, IACLR, IAADD, IASWP, and IACAS XMC commands.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Illegal I/O write operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. See L2C_CBC()_IODISOCIERR for logged information. This interrupt applies
+ to IOBST, IOBSTP, IOBADDR, IASET, IACLR, IAADD, IASWP, IACAS, and LMTST XMC
+ commands.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1C/H) Global sync timeout. Should never assert, for diagnostic use only.
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_cbcx_int_w1c_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t reserved_8 : 1;
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Illegal I/O write operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. See L2C_CBC()_IODISOCIERR for logged information. This interrupt applies
+ to IOBST, IOBSTP, IOBADDR, IASET, IACLR, IAADD, IASWP, IACAS, and LMTST XMC
+ commands.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Illegal I/O read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. See L2C_CBC()_IODISOCIERR for logged information. This interrupt applies
+ to IOBLD, IASET, IACLR, IAADD, IASWP, and IACAS XMC commands.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) MIB double-bit error occurred. See L2C_CBC()_MIBERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) MIB single-bit error occurred. See L2C_CBC()_MIBERR for logged
+ information. Hardware corrected the failure. Software may choose to count these
+ single-bit errors. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) RSD double-bit error occurred. See L2C_CBC()_RSDERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) RSD single-bit error occurred. See L2C_CBC()_RSDERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1C/H) RSD single-bit error occurred. See L2C_CBC()_RSDERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1C/H) RSD double-bit error occurred. See L2C_CBC()_RSDERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1C/H) MIB single-bit error occurred. See L2C_CBC()_MIBERR for logged
+ information. Hardware corrected the failure. Software may choose to count these
+ single-bit errors. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1C/H) MIB double-bit error occurred. See L2C_CBC()_MIBERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1C/H) Illegal I/O read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. See L2C_CBC()_IODISOCIERR for logged information. This interrupt applies
+ to IOBLD, IASET, IACLR, IAADD, IASWP, and IACAS XMC commands.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1C/H) Illegal I/O write operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. See L2C_CBC()_IODISOCIERR for logged information. This interrupt applies
+ to IOBST, IOBSTP, IOBADDR, IASET, IACLR, IAADD, IASWP, IACAS, and LMTST XMC
+ commands.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t reserved_8 : 1;
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_l2c_cbcx_int_w1c_s cn81xx; */
+ /* struct bdk_l2c_cbcx_int_w1c_s cn83xx; */
+ /* struct bdk_l2c_cbcx_int_w1c_s cn88xxp2; */
+};
+typedef union bdk_l2c_cbcx_int_w1c bdk_l2c_cbcx_int_w1c_t;
+
+static inline uint64_t BDK_L2C_CBCX_INT_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_INT_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058060000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058060000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058060000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_INT_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_INT_W1C(a) bdk_l2c_cbcx_int_w1c_t
+#define bustype_BDK_L2C_CBCX_INT_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_INT_W1C(a) "L2C_CBCX_INT_W1C"
+#define device_bar_BDK_L2C_CBCX_INT_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_INT_W1C(a) (a)
+#define arguments_BDK_L2C_CBCX_INT_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_int_w1s
+ *
+ * L2C CBC Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_l2c_cbcx_int_w1s
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_cbcx_int_w1s_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t reserved_8 : 1;
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t reserved_8 : 1;
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_l2c_cbcx_int_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_CBC(0)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_cbcx_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_CBC(0..1)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_l2c_cbcx_int_w1s_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsdsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDSBE]. */
+ uint64_t rsddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[RSDDBE]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t mibsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBSBE]. */
+ uint64_t mibdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[MIBDBE]. */
+ uint64_t iorddisoci : 1; /**< [ 6: 6](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IORDDISOCI]. */
+ uint64_t iowrdisoci : 1; /**< [ 7: 7](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[IOWRDISOCI]. */
+ uint64_t gsyncto : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_CBC(0..3)_INT_W1C[GSYNCTO].
+ Internal:
+ The CBC global sync timeout only, so not an OCI timeout. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_cbcx_int_w1s bdk_l2c_cbcx_int_w1s_t;
+
+static inline uint64_t BDK_L2C_CBCX_INT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_INT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e058060008ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e058060008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e058060008ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_INT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_INT_W1S(a) bdk_l2c_cbcx_int_w1s_t
+#define bustype_BDK_L2C_CBCX_INT_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_INT_W1S(a) "L2C_CBCX_INT_W1S"
+#define device_bar_BDK_L2C_CBCX_INT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_INT_W1S(a) (a)
+#define arguments_BDK_L2C_CBCX_INT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_inv#_pfc
+ *
+ * L2C CBC IOC Performance Counter Registers
+ */
+union bdk_l2c_cbcx_invx_pfc
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_invx_pfc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_invx_pfc_s cn; */
+};
+typedef union bdk_l2c_cbcx_invx_pfc bdk_l2c_cbcx_invx_pfc_t;
+
+static inline uint64_t BDK_L2C_CBCX_INVX_PFC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_INVX_PFC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e058000020ll + 0x1000000ll * ((a) & 0x0) + 0x40ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e058000020ll + 0x1000000ll * ((a) & 0x1) + 0x40ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e058000020ll + 0x1000000ll * ((a) & 0x3) + 0x40ll * ((b) & 0x1);
+ __bdk_csr_fatal("L2C_CBCX_INVX_PFC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_INVX_PFC(a,b) bdk_l2c_cbcx_invx_pfc_t
+#define bustype_BDK_L2C_CBCX_INVX_PFC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_INVX_PFC(a,b) "L2C_CBCX_INVX_PFC"
+#define device_bar_BDK_L2C_CBCX_INVX_PFC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_INVX_PFC(a,b) (a)
+#define arguments_BDK_L2C_CBCX_INVX_PFC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_ioc#_pfc
+ *
+ * L2C CBC IOC Performance Counter Register
+ */
+union bdk_l2c_cbcx_iocx_pfc
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_iocx_pfc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_iocx_pfc_s cn; */
+};
+typedef union bdk_l2c_cbcx_iocx_pfc bdk_l2c_cbcx_iocx_pfc_t;
+
+static inline uint64_t BDK_L2C_CBCX_IOCX_PFC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_IOCX_PFC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e058000028ll + 0x1000000ll * ((a) & 0x0) + 0x40ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e058000028ll + 0x1000000ll * ((a) & 0x1) + 0x40ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
+ return 0x87e058000028ll + 0x1000000ll * ((a) & 0x3) + 0x40ll * ((b) & 0x0);
+ __bdk_csr_fatal("L2C_CBCX_IOCX_PFC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_IOCX_PFC(a,b) bdk_l2c_cbcx_iocx_pfc_t
+#define bustype_BDK_L2C_CBCX_IOCX_PFC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_IOCX_PFC(a,b) "L2C_CBCX_IOCX_PFC"
+#define device_bar_BDK_L2C_CBCX_IOCX_PFC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_IOCX_PFC(a,b) (a)
+#define arguments_BDK_L2C_CBCX_IOCX_PFC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_ior#_pfc
+ *
+ * L2C CBC IOR Performance Counter Register
+ */
+union bdk_l2c_cbcx_iorx_pfc
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_iorx_pfc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_iorx_pfc_s cn; */
+};
+typedef union bdk_l2c_cbcx_iorx_pfc bdk_l2c_cbcx_iorx_pfc_t;
+
+static inline uint64_t BDK_L2C_CBCX_IORX_PFC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_IORX_PFC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e058000030ll + 0x1000000ll * ((a) & 0x0) + 0x40ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e058000030ll + 0x1000000ll * ((a) & 0x1) + 0x40ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
+ return 0x87e058000030ll + 0x1000000ll * ((a) & 0x3) + 0x40ll * ((b) & 0x0);
+ __bdk_csr_fatal("L2C_CBCX_IORX_PFC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_IORX_PFC(a,b) bdk_l2c_cbcx_iorx_pfc_t
+#define bustype_BDK_L2C_CBCX_IORX_PFC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_IORX_PFC(a,b) "L2C_CBCX_IORX_PFC"
+#define device_bar_BDK_L2C_CBCX_IORX_PFC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_IORX_PFC(a,b) (a)
+#define arguments_BDK_L2C_CBCX_IORX_PFC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_msix_pba#
+ *
+ * L2C CBC MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table; the bit number is indexed by the L2C_CBC_INT_VEC_E
+ * enumeration.
+ */
+union bdk_l2c_cbcx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO) Pending message for the associated L2C_CBC()_MSIX_VEC()_CTL, enumerated by
+ L2C_CBC_INT_VEC_E. Bits
+ that have no associated L2C_CBC_INT_VEC_E are 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO) Pending message for the associated L2C_CBC()_MSIX_VEC()_CTL, enumerated by
+ L2C_CBC_INT_VEC_E. Bits
+ that have no associated L2C_CBC_INT_VEC_E are 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_msix_pbax_s cn; */
+};
+typedef union bdk_l2c_cbcx_msix_pbax bdk_l2c_cbcx_msix_pbax_t;
+
+static inline uint64_t BDK_L2C_CBCX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e058ff0000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e058ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
+ return 0x87e058ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("L2C_CBCX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_MSIX_PBAX(a,b) bdk_l2c_cbcx_msix_pbax_t
+#define bustype_BDK_L2C_CBCX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_MSIX_PBAX(a,b) "L2C_CBCX_MSIX_PBAX"
+#define device_bar_BDK_L2C_CBCX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_L2C_CBCX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_L2C_CBCX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_msix_vec#_addr
+ *
+ * L2C CBC MSI-X Vector-Table Address Register
+ * This register is the MSI-X vector table, indexed by the L2C_CBC_INT_VEC_E enumeration.
+ */
+union bdk_l2c_cbcx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's L2C_CBC()_MSIX_VEC()_ADDR, L2C_CBC()_MSIX_VEC()_CTL, and corresponding
+ bit of L2C_CBC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_L2C_CBC_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's L2C_CBC()_MSIX_VEC()_ADDR, L2C_CBC()_MSIX_VEC()_CTL, and corresponding
+ bit of L2C_CBC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_L2C_CBC_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_msix_vecx_addr_s cn; */
+};
+typedef union bdk_l2c_cbcx_msix_vecx_addr bdk_l2c_cbcx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_L2C_CBCX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e058f00000ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e058f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
+ return 0x87e058f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("L2C_CBCX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_MSIX_VECX_ADDR(a,b) bdk_l2c_cbcx_msix_vecx_addr_t
+#define bustype_BDK_L2C_CBCX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_MSIX_VECX_ADDR(a,b) "L2C_CBCX_MSIX_VECX_ADDR"
+#define device_bar_BDK_L2C_CBCX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_L2C_CBCX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_L2C_CBCX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_msix_vec#_ctl
+ *
+ * L2C CBC MSI-X Vector-Table Control and Data Register
+ * This register is the MSI-X vector table, indexed by the L2C_CBC_INT_VEC_E enumeration.
+ */
+union bdk_l2c_cbcx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_msix_vecx_ctl_s cn; */
+};
+typedef union bdk_l2c_cbcx_msix_vecx_ctl bdk_l2c_cbcx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_L2C_CBCX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e058f00008ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e058f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
+ return 0x87e058f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("L2C_CBCX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_MSIX_VECX_CTL(a,b) bdk_l2c_cbcx_msix_vecx_ctl_t
+#define bustype_BDK_L2C_CBCX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_MSIX_VECX_CTL(a,b) "L2C_CBCX_MSIX_VECX_CTL"
+#define device_bar_BDK_L2C_CBCX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_L2C_CBCX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_L2C_CBCX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_rsc#_pfc
+ *
+ * L2C CBC COMMIT Bus Performance Counter Registers
+ */
+union bdk_l2c_cbcx_rscx_pfc
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_rscx_pfc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_rscx_pfc_s cn; */
+};
+typedef union bdk_l2c_cbcx_rscx_pfc bdk_l2c_cbcx_rscx_pfc_t;
+
+static inline uint64_t BDK_L2C_CBCX_RSCX_PFC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_RSCX_PFC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e058000010ll + 0x1000000ll * ((a) & 0x0) + 0x40ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=2)))
+ return 0x87e058000010ll + 0x1000000ll * ((a) & 0x1) + 0x40ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=2)))
+ return 0x87e058000010ll + 0x1000000ll * ((a) & 0x3) + 0x40ll * ((b) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_RSCX_PFC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_RSCX_PFC(a,b) bdk_l2c_cbcx_rscx_pfc_t
+#define bustype_BDK_L2C_CBCX_RSCX_PFC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_RSCX_PFC(a,b) "L2C_CBCX_RSCX_PFC"
+#define device_bar_BDK_L2C_CBCX_RSCX_PFC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_RSCX_PFC(a,b) (a)
+#define arguments_BDK_L2C_CBCX_RSCX_PFC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_rsd#_pfc
+ *
+ * L2C CBC FILL Bus Performance Counter Registers
+ */
+union bdk_l2c_cbcx_rsdx_pfc
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_rsdx_pfc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_rsdx_pfc_s cn; */
+};
+typedef union bdk_l2c_cbcx_rsdx_pfc bdk_l2c_cbcx_rsdx_pfc_t;
+
+static inline uint64_t BDK_L2C_CBCX_RSDX_PFC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_RSDX_PFC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e058000018ll + 0x1000000ll * ((a) & 0x0) + 0x40ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=2)))
+ return 0x87e058000018ll + 0x1000000ll * ((a) & 0x1) + 0x40ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=2)))
+ return 0x87e058000018ll + 0x1000000ll * ((a) & 0x3) + 0x40ll * ((b) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_RSDX_PFC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_RSDX_PFC(a,b) bdk_l2c_cbcx_rsdx_pfc_t
+#define bustype_BDK_L2C_CBCX_RSDX_PFC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_RSDX_PFC(a,b) "L2C_CBCX_RSDX_PFC"
+#define device_bar_BDK_L2C_CBCX_RSDX_PFC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_RSDX_PFC(a,b) (a)
+#define arguments_BDK_L2C_CBCX_RSDX_PFC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_scratch
+ *
+ * INTERNAL: L2C CBC General Purpose Scratch Register
+ *
+ * These registers are only reset by hardware during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_l2c_cbcx_scratch
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_scratch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t scratch : 7; /**< [ 7: 1](R/W) General purpose scratch register. */
+ uint64_t invdly : 1; /**< [ 0: 0](R/W) Delays all invalidates for 9 cycles after a broadcast invalidate. */
+#else /* Word 0 - Little Endian */
+ uint64_t invdly : 1; /**< [ 0: 0](R/W) Delays all invalidates for 9 cycles after a broadcast invalidate. */
+ uint64_t scratch : 7; /**< [ 7: 1](R/W) General purpose scratch register. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_scratch_s cn; */
+};
+typedef union bdk_l2c_cbcx_scratch bdk_l2c_cbcx_scratch_t;
+
+static inline uint64_t BDK_L2C_CBCX_SCRATCH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_SCRATCH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0580d0000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0580d0000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0580d0000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_SCRATCH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_SCRATCH(a) bdk_l2c_cbcx_scratch_t
+#define bustype_BDK_L2C_CBCX_SCRATCH(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_SCRATCH(a) "L2C_CBCX_SCRATCH"
+#define device_bar_BDK_L2C_CBCX_SCRATCH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_SCRATCH(a) (a)
+#define arguments_BDK_L2C_CBCX_SCRATCH(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_xmc#_pfc
+ *
+ * L2C CBC ADD bus Performance Counter Registers
+ */
+union bdk_l2c_cbcx_xmcx_pfc
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_xmcx_pfc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_xmcx_pfc_s cn; */
+};
+typedef union bdk_l2c_cbcx_xmcx_pfc bdk_l2c_cbcx_xmcx_pfc_t;
+
+static inline uint64_t BDK_L2C_CBCX_XMCX_PFC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_XMCX_PFC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e058000000ll + 0x1000000ll * ((a) & 0x0) + 0x40ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=2)))
+ return 0x87e058000000ll + 0x1000000ll * ((a) & 0x1) + 0x40ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=2)))
+ return 0x87e058000000ll + 0x1000000ll * ((a) & 0x3) + 0x40ll * ((b) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_XMCX_PFC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_XMCX_PFC(a,b) bdk_l2c_cbcx_xmcx_pfc_t
+#define bustype_BDK_L2C_CBCX_XMCX_PFC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_XMCX_PFC(a,b) "L2C_CBCX_XMCX_PFC"
+#define device_bar_BDK_L2C_CBCX_XMCX_PFC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_XMCX_PFC(a,b) (a)
+#define arguments_BDK_L2C_CBCX_XMCX_PFC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_xmc_cmd
+ *
+ * L2C CBC ADD Bus Command Register
+ * Note the following:
+ *
+ * The ADD bus command chosen must not be a IOB-destined command or operation is UNDEFINED.
+ *
+ * The ADD bus command will have SID forced to IOB, DID forced to L2C, no virtualization checks
+ * performed (always pass), and xmdmsk forced to 0. Note that this implies that commands that
+ * REQUIRE a STORE cycle (STP, STC, SAA, FAA, FAS) should not be used or the results are
+ * unpredictable. The sid = IOB means that the way partitioning used for the command is
+ * L2C_WPAR_IOB(). L2C_QOS_PP() are not used for these commands.
+ *
+ * Any FILL responses generated by the ADD bus command are ignored. Generated STINs, however,
+ * will correctly invalidate the required cores.
+ *
+ * A write that arrives while [INUSE] is set will block until [INUSE] clears. This
+ * gives software two options when needing to issue a stream of write operations to L2C_XMC_CMD:
+ * polling on [INUSE], or allowing hardware to handle the interlock -- at the expense of
+ * locking up the RSL bus for potentially tens of cycles at a time while waiting for an available
+ * LFB/VAB entry. Note that when [INUSE] clears, the only ordering it implies is that
+ * software can send another ADD bus command. Subsequent commands may complete out of order
+ * relative to earlier commands.
+ *
+ * The address written to L2C_XMC_CMD is a physical address. L2C performs index
+ * aliasing (if enabled) on the written address and uses that for the command. This
+ * index-aliased address is what is returned on a read of L2C_XMC_CMD.
+ */
+union bdk_l2c_cbcx_xmc_cmd
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_xmc_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t inuse : 1; /**< [ 63: 63](RO/H) Set to 1 by hardware upon receiving a write; cleared when command has issued (not
+ necessarily completed, but ordered relative to other traffic) and hardware can accept
+ another command. */
+ uint64_t cmd : 7; /**< [ 62: 56](R/W) Command to use for simulated ADD bus request. A new request can be accepted. */
+ uint64_t reserved_49_55 : 7;
+ uint64_t nonsec : 1; /**< [ 48: 48](R/W) Nonsecure bit to use for simulated ADD bus request. */
+ uint64_t reserved_47 : 1;
+ uint64_t qos : 3; /**< [ 46: 44](R/W) QOS level to use for simulated ADD bus request. */
+ uint64_t reserved_42_43 : 2;
+ uint64_t node : 2; /**< [ 41: 40](R/W) CCPI node to use for simulated ADD bus request. */
+ uint64_t addr : 40; /**< [ 39: 0](R/W) Address to use for simulated ADD bus request. (The address written to
+ L2C_CBC()_XMC_CMD is a physical address. L2C performs index aliasing (if
+ enabled) on the written address and uses that for the command. This
+ index-aliased address is what is returned on a read of L2C_CBC()_XMC_CMD.) */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 40; /**< [ 39: 0](R/W) Address to use for simulated ADD bus request. (The address written to
+ L2C_CBC()_XMC_CMD is a physical address. L2C performs index aliasing (if
+ enabled) on the written address and uses that for the command. This
+ index-aliased address is what is returned on a read of L2C_CBC()_XMC_CMD.) */
+ uint64_t node : 2; /**< [ 41: 40](R/W) CCPI node to use for simulated ADD bus request. */
+ uint64_t reserved_42_43 : 2;
+ uint64_t qos : 3; /**< [ 46: 44](R/W) QOS level to use for simulated ADD bus request. */
+ uint64_t reserved_47 : 1;
+ uint64_t nonsec : 1; /**< [ 48: 48](R/W) Nonsecure bit to use for simulated ADD bus request. */
+ uint64_t reserved_49_55 : 7;
+ uint64_t cmd : 7; /**< [ 62: 56](R/W) Command to use for simulated ADD bus request. A new request can be accepted. */
+ uint64_t inuse : 1; /**< [ 63: 63](RO/H) Set to 1 by hardware upon receiving a write; cleared when command has issued (not
+ necessarily completed, but ordered relative to other traffic) and hardware can accept
+ another command. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_xmc_cmd_s cn; */
+};
+typedef union bdk_l2c_cbcx_xmc_cmd bdk_l2c_cbcx_xmc_cmd_t;
+
+static inline uint64_t BDK_L2C_CBCX_XMC_CMD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_XMC_CMD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0580c0000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0580c0000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0580c0000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_XMC_CMD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_XMC_CMD(a) bdk_l2c_cbcx_xmc_cmd_t
+#define bustype_BDK_L2C_CBCX_XMC_CMD(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_XMC_CMD(a) "L2C_CBCX_XMC_CMD"
+#define device_bar_BDK_L2C_CBCX_XMC_CMD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_XMC_CMD(a) (a)
+#define arguments_BDK_L2C_CBCX_XMC_CMD(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_cbc#_xmd#_pfc
+ *
+ * L2C CBC STORE bus Performance Counter Registers
+ */
+union bdk_l2c_cbcx_xmdx_pfc
+{
+ uint64_t u;
+ struct bdk_l2c_cbcx_xmdx_pfc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Current counter value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_cbcx_xmdx_pfc_s cn; */
+};
+typedef union bdk_l2c_cbcx_xmdx_pfc bdk_l2c_cbcx_xmdx_pfc_t;
+
+static inline uint64_t BDK_L2C_CBCX_XMDX_PFC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_CBCX_XMDX_PFC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e058000008ll + 0x1000000ll * ((a) & 0x0) + 0x40ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=2)))
+ return 0x87e058000008ll + 0x1000000ll * ((a) & 0x1) + 0x40ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=2)))
+ return 0x87e058000008ll + 0x1000000ll * ((a) & 0x3) + 0x40ll * ((b) & 0x3);
+ __bdk_csr_fatal("L2C_CBCX_XMDX_PFC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_CBCX_XMDX_PFC(a,b) bdk_l2c_cbcx_xmdx_pfc_t
+#define bustype_BDK_L2C_CBCX_XMDX_PFC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_CBCX_XMDX_PFC(a,b) "L2C_CBCX_XMDX_PFC"
+#define device_bar_BDK_L2C_CBCX_XMDX_PFC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_CBCX_XMDX_PFC(a,b) (a)
+#define arguments_BDK_L2C_CBCX_XMDX_PFC(a,b) (a),(b),-1,-1
+
+#endif /* __BDK_CSRS_L2C_CBC_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c_tad.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c_tad.h
new file mode 100644
index 0000000000..0d70555565
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-l2c_tad.h
@@ -0,0 +1,2749 @@
+#ifndef __BDK_CSRS_L2C_TAD_H__
+#define __BDK_CSRS_L2C_TAD_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium L2C_TAD.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration l2c_tad_bar_e
+ *
+ * L2C TAD Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_L2C_TAD_BAR_E_L2C_TADX_PF_BAR0(a) (0x87e050000000ll + 0x1000000ll * (a))
+#define BDK_L2C_TAD_BAR_E_L2C_TADX_PF_BAR0_SIZE 0x800000ull
+#define BDK_L2C_TAD_BAR_E_L2C_TADX_PF_BAR4(a) (0x87e050f00000ll + 0x1000000ll * (a))
+#define BDK_L2C_TAD_BAR_E_L2C_TADX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration l2c_tad_int_vec_e
+ *
+ * L2C TAD MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_L2C_TAD_INT_VEC_E_INTS (0)
+
+/**
+ * Register (RSL) l2c_tad#_int_ena_w1c
+ *
+ * L2C TAD Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_l2c_tadx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_tadx_int_ena_w1c_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t reserved_18 : 1;
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t reserved_18 : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_l2c_tadx_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[RDDISOCI]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_tadx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[RDDISOCI]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0..3)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_l2c_tadx_int_ena_w1c_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_tadx_int_ena_w1c bdk_l2c_tadx_int_ena_w1c_t;
+
+static inline uint64_t BDK_L2C_TADX_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050040020ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050040020ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050040020ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_INT_ENA_W1C(a) bdk_l2c_tadx_int_ena_w1c_t
+#define bustype_BDK_L2C_TADX_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_INT_ENA_W1C(a) "L2C_TADX_INT_ENA_W1C"
+#define device_bar_BDK_L2C_TADX_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_INT_ENA_W1C(a) (a)
+#define arguments_BDK_L2C_TADX_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_int_ena_w1s
+ *
+ * L2C TAD Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_l2c_tadx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_tadx_int_ena_w1s_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t reserved_18 : 1;
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t reserved_18 : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_l2c_tadx_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[RDDISOCI]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_tadx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[RDDISOCI]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0..3)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_l2c_tadx_int_ena_w1s_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_tadx_int_ena_w1s bdk_l2c_tadx_int_ena_w1s_t;
+
+static inline uint64_t BDK_L2C_TADX_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050040028ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050040028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050040028ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_INT_ENA_W1S(a) bdk_l2c_tadx_int_ena_w1s_t
+#define bustype_BDK_L2C_TADX_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_INT_ENA_W1S(a) "L2C_TADX_INT_ENA_W1S"
+#define device_bar_BDK_L2C_TADX_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_INT_ENA_W1S(a) (a)
+#define arguments_BDK_L2C_TADX_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_int_w1c
+ *
+ * L2C TAD Interrupt Registers
+ * This register is for TAD-based interrupts.
+ */
+union bdk_l2c_tadx_int_w1c
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_int_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) RTG double-bit error.
+ See L2C_TAD()_RTG_ERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) RTG single-bit error on a read. See L2C_TAD()_RTG_ERR for logged
+ information. When [RTGSBE] is set, hardware corrected the error before using the
+ RTG tag, but did not correct any stored value. When [RTGSBE] is set, software
+ should eject the RTG location indicated by the corresponding
+ L2C_TAD()_RTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [RTGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same RTG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 1
+ payload\<23:20\> = L2C_TAD()_RTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_RTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on the payload. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Global sync timeout. Should not occur during normal operation. This may be an
+ indication of hardware failure, and may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<19:13\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<12:6\>
+ payload\<12:11\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<5:4\>
+ payload\<10\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<3\>
+ payload\<9:7\> = tad // index\<2:0\>
+ \</pre\>
+
+ where tad is the TAD index from this CSR. Note that L2C_CTL[DISIDXALIAS] has no
+ effect on the payload. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<19:13\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<12:6\>
+ payload\<12:11\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<5:4\>
+ payload\<10\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<3\>
+ payload\<9:7\> = tad // index\<2:0\>
+ \</pre\>
+
+ where tad is the TAD index from this CSR. Note that L2C_CTL[DISIDXALIAS] has no
+ effect on the payload. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Global sync timeout. Should not occur during normal operation. This may be an
+ indication of hardware failure, and may be considered fatal. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) RTG single-bit error on a read. See L2C_TAD()_RTG_ERR for logged
+ information. When [RTGSBE] is set, hardware corrected the error before using the
+ RTG tag, but did not correct any stored value. When [RTGSBE] is set, software
+ should eject the RTG location indicated by the corresponding
+ L2C_TAD()_RTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [RTGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same RTG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 1
+ payload\<23:20\> = L2C_TAD()_RTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_RTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on the payload. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) RTG double-bit error.
+ See L2C_TAD()_RTG_ERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_tadx_int_w1c_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) RTG double-bit error.
+ See L2C_TAD()_RTG_ERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) RTG single-bit error on a read. See L2C_TAD()_RTG_ERR for logged
+ information. When [RTGSBE] is set, hardware corrected the error before using the
+ RTG tag, but did not correct any stored value. When [RTGSBE] is set, software
+ should eject the RTG location indicated by the corresponding
+ L2C_TAD()_RTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [RTGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same RTG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 1
+ payload\<23:20\> = L2C_TAD()_RTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_RTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on the payload. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t reserved_18 : 1;
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<19:13\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<12:6\>
+ payload\<12:11\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<5:4\>
+ payload\<10\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<3\>
+ payload\<9:7\> = tad // index\<2:0\>
+ \</pre\>
+
+ where tad is the TAD index from this CSR. Note that L2C_CTL[DISIDXALIAS] has no
+ effect on the payload. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<19:13\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<12:6\>
+ payload\<12:11\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<5:4\>
+ payload\<10\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<3\>
+ payload\<9:7\> = tad // index\<2:0\>
+ \</pre\>
+
+ where tad is the TAD index from this CSR. Note that L2C_CTL[DISIDXALIAS] has no
+ effect on the payload. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t reserved_18 : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) RTG single-bit error on a read. See L2C_TAD()_RTG_ERR for logged
+ information. When [RTGSBE] is set, hardware corrected the error before using the
+ RTG tag, but did not correct any stored value. When [RTGSBE] is set, software
+ should eject the RTG location indicated by the corresponding
+ L2C_TAD()_RTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [RTGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same RTG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 1
+ payload\<23:20\> = L2C_TAD()_RTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_RTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on the payload. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) RTG double-bit error.
+ See L2C_TAD()_RTG_ERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_l2c_tadx_int_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Global sync timeout. Should not occur during normal operation. This may be an
+ indication of hardware failure, and may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24:21\> = 0
+ payload\<20:17\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<16:10\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<9:3\>
+ payload\<9:8\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<2:1\>
+ payload\<7\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<0\>
+ \</pre\>
+
+ Note that L2C_CTL[DISIDXALIAS] has no effect on the payload. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24:21\> = 0
+ payload\<20:17\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<16:10\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<9:3\>
+ payload\<9:8\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<2:1\>
+ payload\<7\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<0\>
+ \</pre\>
+
+ Note that L2C_CTL[DISIDXALIAS] has no effect on the payload. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Global sync timeout. Should not occur during normal operation. This may be an
+ indication of hardware failure, and may be considered fatal. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_tadx_int_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Global sync timeout. Should not occur during normal operation. This may be an
+ indication of hardware failure, and may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24:23\> = 0
+ payload\<22:19\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<18:12\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<11:5\>
+ payload\<11:10\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<4:3\>
+ payload\<9\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<2\>
+ payload\<8:7\> = tad // index\<1:0\>
+ \</pre\>
+
+ where tad is the TAD index from this CSR. Note that L2C_CTL[DISIDXALIAS] has no
+ effect on the payload. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24:23\> = 0
+ payload\<22:19\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<18:12\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<11:5\>
+ payload\<11:10\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<4:3\>
+ payload\<9\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<2\>
+ payload\<8:7\> = tad // index\<1:0\>
+ \</pre\>
+
+ where tad is the TAD index from this CSR. Note that L2C_CTL[DISIDXALIAS] has no
+ effect on the payload. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Global sync timeout. Should not occur during normal operation. This may be an
+ indication of hardware failure, and may be considered fatal. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_l2c_tadx_int_w1c_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) RTG double-bit error.
+ See L2C_TAD()_RTG_ERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) RTG single-bit error on a read. See L2C_TAD()_RTG_ERR for logged
+ information. When [RTGSBE] is set, hardware corrected the error before using the
+ RTG tag, but did not correct any stored value. When [RTGSBE] is set, software
+ should eject the RTG location indicated by the corresponding
+ L2C_TAD()_RTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [RTGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same RTG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 1
+ payload\<23:20\> = L2C_TAD()_RTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_RTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on the payload. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Global sync OCI timeout. Should not occur during normal operation. OCI/CCPI link
+ failures may cause this failure. This may be an indication of hardware failure,
+ and may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<19:13\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<12:6\>
+ payload\<12:11\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<5:4\>
+ payload\<10\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<3\>
+ payload\<9:7\> = tad // index\<2:0\>
+ \</pre\>
+
+ where tad is the TAD index from this CSR. Note that L2C_CTL[DISIDXALIAS] has no
+ effect on the payload. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1C/H) L2D single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. When [L2DSBE] is set, hardware corrected the error before using the
+ data, but did not correct any stored value. When [L2DSBE] is set, software
+ should eject the cache block indicated by the corresponding
+ L2C_TAD()_TQD_ERR[QDNUM,L2DIDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [L2DSBE]. Otherwise, hardware may encounter the error again the
+ next time the same L2D location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TQD_ERR[L2DIDX]\<10:7\> // way
+ payload\<19:13\> = L2C_TAD()_TQD_ERR[L2DIDX]\<6:0\> // index\<12:6\>
+ payload\<12:11\> = L2C_TAD()_TQD_ERR[L2DIDX]\<12:11\> // index\<5:4\>
+ payload\<10\> = L2C_TAD()_TQD_ERR[QDNUM]\<2\> // index\<3\>
+ payload\<9:7\> = tad // index\<2:0\>
+ \</pre\>
+
+ where tad is the TAD index from this CSR. Note that L2C_CTL[DISIDXALIAS] has no
+ effect on the payload. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1C/H) L2D double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1C/H) SBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1C/H) SBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1C/H) FBF single-bit error on a read. See L2C_TAD()_TQD_ERR for logged
+ information. Hardware automatically corrected the error. Software may choose to
+ count the number of these single-bit errors. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1C/H) FBF double-bit error occurred. See L2C_TAD()_TQD_ERR for logged information. An
+ indication of a hardware failure and may be considered fatal. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1C/H) TAG single-bit error on a read. See L2C_TAD()_TTG_ERR for logged
+ information. When [TAGSBE] is set, hardware corrected the error before using the
+ tag, but did not correct any stored value. When [TAGSBE] is set, software should
+ eject the TAG location indicated by the corresponding
+ L2C_TAD()_TTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [TAGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same TAG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 0
+ payload\<23:20\> = L2C_TAD()_TTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_TTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on this payload. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1C/H) TAG double-bit error occurred. See L2C_TTG()_ERR for logged information.
+ This is an indication of a hardware failure and may be considered fatal. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1C/H) No way was available for allocation. L2C sets [NOWAY] during its processing of a
+ transaction whenever it needed/wanted to allocate a WAY in the L2 cache, but was
+ unable to. When this bit = 1, it is (generally) not an indication that L2C
+ failed to complete transactions. Rather, it is a hint of possible performance
+ degradation. (For example, L2C must read- modify-write DRAM for every
+ transaction that updates some, but not all, of the bytes in a cache block,
+ misses in the L2 cache, and cannot allocate a WAY.) There is one 'failure' case
+ where L2C sets [NOWAY]: when it cannot leave a block locked in the L2 cache as
+ part of a LCKL2 transaction. See L2C_TTG()_ERR for logged information. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1C/H) Write reference outside all the defined and enabled address space
+ control (ASC) regions, or secure write reference to an ASC region
+ not enabled for secure access, or nonsecure write reference to an
+ ASC region not enabled for nonsecure access.
+ This may be an indication of software
+ failure, and may be considered fatal.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1C/H) Read reference outside all the defined and enabled address space
+ control (ASC) regions, or secure read reference to an ASC region
+ not enabled for secure access, or nonsecure read reference to an ASC
+ region not enabled for nonsecure access.
+ [RDNXM] interrupts can occur during normal operation as the cores are
+ allowed to prefetch to nonexistent memory locations. Therefore,
+ [RDNXM] is for informational purposes only.
+ See L2C_TAD()_ERR for logged information.
+ See L2C_ASC_REGION()_START, L2C_ASC_REGION()_END, and
+ L2C_ASC_REGION()_ATTR for ASC region specification. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1C/H) Illegal read to disabled LMC error. A DRAM read arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1C/H) Illegal write to disabled LMC error. A DRAM write arrived before LMC was enabled.
+ Should not occur during normal operation.
+ This may be considered fatal. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1C/H) An LFB entry (or more) has encountered a timeout condition When [LFBTO] timeout
+ condition occurs L2C_TAD()_TIMEOUT is loaded. L2C_TAD()_TIMEOUT is loaded with
+ info from the first LFB that timed out. if multiple LFB timed out
+ simultaneously, then the it will capture info from the lowest LFB number that
+ timed out.
+ Should not occur during normal operation. OCI/CCPI link failures may cause this
+ failure. This may be an indication of hardware failure, and may be considered
+ fatal. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1C/H) Global sync OCI timeout. Should not occur during normal operation. OCI/CCPI link
+ failures may cause this failure. This may be an indication of hardware failure,
+ and may be considered fatal. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1C/H) RTG single-bit error on a read. See L2C_TAD()_RTG_ERR for logged
+ information. When [RTGSBE] is set, hardware corrected the error before using the
+ RTG tag, but did not correct any stored value. When [RTGSBE] is set, software
+ should eject the RTG location indicated by the corresponding
+ L2C_TAD()_RTG_ERR[WAY,L2IDX] (via a SYS CVMCACHEWBIL2I instruction below)
+ before clearing [RTGSBE]. Otherwise, hardware may encounter the error again the
+ next time the same RTG location is referenced. Software may also choose to count
+ the number of these single-bit errors.
+
+ The SYS CVMCACHEWBIL2I instruction payload should have:
+ \<pre\>
+ payload\<24\> = 1
+ payload\<23:20\> = L2C_TAD()_RTG_ERR[WAY]
+ payload\<19:7\> = L2C_TAD()_RTG_ERR[L2IDX]
+ \</pre\>
+ Note that L2C_CTL[DISIDXALIAS] has no effect on the payload. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1C/H) RTG double-bit error.
+ See L2C_TAD()_RTG_ERR for logged information.
+ An indication of a hardware failure and may be considered fatal. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1C/H) Illegal read operation to a remote node with L2C_OCI_CTL[ENAOCI][node]
+ clear. Note [RDDISOCI] interrupts can occur during normal operation as the cores
+ are allowed to prefetch to nonexistent memory locations. Therefore, [RDDISOCI]
+ is for informational purposes only. See L2C_TAD()_ERR for logged information. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1C/H) Illegal write operation to a remote node with L2C_OCI_CTL[ENAOCI][node] clear. See
+ L2C_TAD()_ERR for logged information.
+ During normal hardware operation, an indication of a software failure and may be
+ considered fatal. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_tadx_int_w1c bdk_l2c_tadx_int_w1c_t;
+
+static inline uint64_t BDK_L2C_TADX_INT_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_INT_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050040000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050040000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050040000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_INT_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_INT_W1C(a) bdk_l2c_tadx_int_w1c_t
+#define bustype_BDK_L2C_TADX_INT_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_INT_W1C(a) "L2C_TADX_INT_W1C"
+#define device_bar_BDK_L2C_TADX_INT_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_INT_W1C(a) (a)
+#define arguments_BDK_L2C_TADX_INT_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_int_w1s
+ *
+ * L2C TAD Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_l2c_tadx_int_w1s
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_tadx_int_w1s_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t reserved_18 : 1;
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t reserved_18 : 1;
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_l2c_tadx_int_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[RDDISOCI]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_tadx_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[RDDISOCI]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_33 : 15;
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0..3)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_l2c_tadx_int_w1s_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[GSYNCTO]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dsbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DSBE]. */
+ uint64_t l2ddbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[L2DDBE]. */
+ uint64_t sbfsbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFSBE]. */
+ uint64_t sbfdbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[SBFDBE]. */
+ uint64_t fbfsbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFSBE]. */
+ uint64_t fbfdbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[FBFDBE]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tagsbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGSBE]. */
+ uint64_t tagdbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[TAGDBE]. */
+ uint64_t noway : 1; /**< [ 10: 10](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[NOWAY]. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t wrnxm : 1; /**< [ 13: 13](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRNXM]. */
+ uint64_t rdnxm : 1; /**< [ 14: 14](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDNXM]. */
+ uint64_t rddislmc : 1; /**< [ 15: 15](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISLMC]. */
+ uint64_t wrdislmc : 1; /**< [ 16: 16](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISLMC]. */
+ uint64_t lfbto : 1; /**< [ 17: 17](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[LFBTO]. */
+ uint64_t gsyncto : 1; /**< [ 18: 18](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[GSYNCTO]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t rtgsbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGSBE]. */
+ uint64_t rtgdbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RTGDBE]. */
+ uint64_t rddisoci : 1; /**< [ 34: 34](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[RDDISOCI]. */
+ uint64_t wrdisoci : 1; /**< [ 35: 35](R/W1S/H) Reads or sets L2C_TAD(0..7)_INT_W1C[WRDISOCI]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_l2c_tadx_int_w1s bdk_l2c_tadx_int_w1s_t;
+
+static inline uint64_t BDK_L2C_TADX_INT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_INT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050040008ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050040008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050040008ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_INT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_INT_W1S(a) bdk_l2c_tadx_int_w1s_t
+#define bustype_BDK_L2C_TADX_INT_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_INT_W1S(a) "L2C_TADX_INT_W1S"
+#define device_bar_BDK_L2C_TADX_INT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_INT_W1S(a) (a)
+#define arguments_BDK_L2C_TADX_INT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_msix_pba#
+ *
+ * L2C TAD MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table; the bit number is indexed by the L2C_TAD_INT_VEC_E
+ * enumeration.
+ */
+union bdk_l2c_tadx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO) Pending message for the associated L2C_TAD()_MSIX_VEC()_CTL, enumerated by
+ L2C_TAD_INT_VEC_E. Bits
+ that have no associated L2C_TAD_INT_VEC_E are 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO) Pending message for the associated L2C_TAD()_MSIX_VEC()_CTL, enumerated by
+ L2C_TAD_INT_VEC_E. Bits
+ that have no associated L2C_TAD_INT_VEC_E are 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_msix_pbax_s cn; */
+};
+typedef union bdk_l2c_tadx_msix_pbax bdk_l2c_tadx_msix_pbax_t;
+
+static inline uint64_t BDK_L2C_TADX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e050ff0000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b==0)))
+ return 0x87e050ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=7) && (b==0)))
+ return 0x87e050ff0000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("L2C_TADX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_MSIX_PBAX(a,b) bdk_l2c_tadx_msix_pbax_t
+#define bustype_BDK_L2C_TADX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_MSIX_PBAX(a,b) "L2C_TADX_MSIX_PBAX"
+#define device_bar_BDK_L2C_TADX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_L2C_TADX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_L2C_TADX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_msix_vec#_addr
+ *
+ * L2C TAD MSI-X Vector-Table Address Register
+ * This register is the MSI-X vector table, indexed by the L2C_TAD_INT_VEC_E enumeration.
+ */
+union bdk_l2c_tadx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's L2C_TAD()_MSIX_VEC()_ADDR, L2C_TAD()_MSIX_VEC()_CTL, and corresponding
+ bit of L2C_TAD()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_L2C_TAD_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's L2C_TAD()_MSIX_VEC()_ADDR, L2C_TAD()_MSIX_VEC()_CTL, and corresponding
+ bit of L2C_TAD()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_L2C_TAD_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_msix_vecx_addr_s cn; */
+};
+typedef union bdk_l2c_tadx_msix_vecx_addr bdk_l2c_tadx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_L2C_TADX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e050f00000ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b==0)))
+ return 0x87e050f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=7) && (b==0)))
+ return 0x87e050f00000ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("L2C_TADX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_MSIX_VECX_ADDR(a,b) bdk_l2c_tadx_msix_vecx_addr_t
+#define bustype_BDK_L2C_TADX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_MSIX_VECX_ADDR(a,b) "L2C_TADX_MSIX_VECX_ADDR"
+#define device_bar_BDK_L2C_TADX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_L2C_TADX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_L2C_TADX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_msix_vec#_ctl
+ *
+ * L2C TAD MSI-X Vector-Table Control and Data Register
+ * This register is the MSI-X vector table, indexed by the L2C_TAD_INT_VEC_E enumeration.
+ */
+union bdk_l2c_tadx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_msix_vecx_ctl_s cn; */
+};
+typedef union bdk_l2c_tadx_msix_vecx_ctl bdk_l2c_tadx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_L2C_TADX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e050f00008ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b==0)))
+ return 0x87e050f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=7) && (b==0)))
+ return 0x87e050f00008ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("L2C_TADX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_MSIX_VECX_CTL(a,b) bdk_l2c_tadx_msix_vecx_ctl_t
+#define bustype_BDK_L2C_TADX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_MSIX_VECX_CTL(a,b) "L2C_TADX_MSIX_VECX_CTL"
+#define device_bar_BDK_L2C_TADX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_L2C_TADX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_L2C_TADX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_rtg_err
+ *
+ * Level 2 Cache TAD RTG Error Information Registers
+ * This register records error information for all RTG SBE/DBE errors.
+ * The priority of errors (lowest to highest) is SBE, DBE. An error locks [SYN], [WAY],
+ * and [L2IDX] for equal or lower priority errors until cleared by software.
+ * The syndrome is recorded for DBE errors, though the utility of the value is not clear.
+ * [L2IDX]\<19:7\> is the L2 block index associated with the command which had no way to allocate.
+ */
+union bdk_l2c_tadx_rtg_err
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_rtg_err_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rtgdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit RTG ECC error. */
+ uint64_t rtgsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit RTG ECC error. */
+ uint64_t reserved_39_61 : 23;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t way : 4; /**< [ 23: 20](RO/H) Way of the L2 block containing the error. */
+ uint64_t l2idx : 13; /**< [ 19: 7](RO/H) Index of the L2 block containing the error.
+ See L2C_TAD()_INT_W1C[RTGSBE] for an important use of this field. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t l2idx : 13; /**< [ 19: 7](RO/H) Index of the L2 block containing the error.
+ See L2C_TAD()_INT_W1C[RTGSBE] for an important use of this field. */
+ uint64_t way : 4; /**< [ 23: 20](RO/H) Way of the L2 block containing the error. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_39_61 : 23;
+ uint64_t rtgsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit RTG ECC error. */
+ uint64_t rtgdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit RTG ECC error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_rtg_err_s cn; */
+};
+typedef union bdk_l2c_tadx_rtg_err bdk_l2c_tadx_rtg_err_t;
+
+static inline uint64_t BDK_L2C_TADX_RTG_ERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_RTG_ERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=7))
+ return 0x87e050060300ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_RTG_ERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_RTG_ERR(a) bdk_l2c_tadx_rtg_err_t
+#define bustype_BDK_L2C_TADX_RTG_ERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_RTG_ERR(a) "L2C_TADX_RTG_ERR"
+#define device_bar_BDK_L2C_TADX_RTG_ERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_RTG_ERR(a) (a)
+#define arguments_BDK_L2C_TADX_RTG_ERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_tbf_bist_status
+ *
+ * L2C TAD Quad Buffer BIST Status Registers
+ */
+union bdk_l2c_tadx_tbf_bist_status
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_tbf_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vbffl : 16; /**< [ 63: 48](RO/H) BIST failure status for VBF ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+ uint64_t sbffl : 16; /**< [ 47: 32](RO/H) BIST failure status for SBF ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+ uint64_t fbfrspfl : 16; /**< [ 31: 16](RO/H) BIST failure status for FBF RSP port ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+ uint64_t fbfwrpfl : 16; /**< [ 15: 0](RO/H) BIST failure status for FBF WRP port ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+#else /* Word 0 - Little Endian */
+ uint64_t fbfwrpfl : 16; /**< [ 15: 0](RO/H) BIST failure status for FBF WRP port ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+ uint64_t fbfrspfl : 16; /**< [ 31: 16](RO/H) BIST failure status for FBF RSP port ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+ uint64_t sbffl : 16; /**< [ 47: 32](RO/H) BIST failure status for SBF ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+ uint64_t vbffl : 16; /**< [ 63: 48](RO/H) BIST failure status for VBF ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_tbf_bist_status_s cn; */
+};
+typedef union bdk_l2c_tadx_tbf_bist_status bdk_l2c_tadx_tbf_bist_status_t;
+
+static inline uint64_t BDK_L2C_TADX_TBF_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_TBF_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050070000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050070000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050070000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_TBF_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_TBF_BIST_STATUS(a) bdk_l2c_tadx_tbf_bist_status_t
+#define bustype_BDK_L2C_TADX_TBF_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_TBF_BIST_STATUS(a) "L2C_TADX_TBF_BIST_STATUS"
+#define device_bar_BDK_L2C_TADX_TBF_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_TBF_BIST_STATUS(a) (a)
+#define arguments_BDK_L2C_TADX_TBF_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_tdt_bist_status
+ *
+ * L2C TAD Data BIST Status Registers
+ */
+union bdk_l2c_tadx_tdt_bist_status
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_tdt_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t l2dfl : 16; /**< [ 15: 0](RO/H) BIST failure status for L2D ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+#else /* Word 0 - Little Endian */
+ uint64_t l2dfl : 16; /**< [ 15: 0](RO/H) BIST failure status for L2D ({QD7H1,QD7H0, ... , QD0H1, QD0H0}). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_tdt_bist_status_s cn; */
+};
+typedef union bdk_l2c_tadx_tdt_bist_status bdk_l2c_tadx_tdt_bist_status_t;
+
+static inline uint64_t BDK_L2C_TADX_TDT_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_TDT_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050070100ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050070100ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050070100ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_TDT_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_TDT_BIST_STATUS(a) bdk_l2c_tadx_tdt_bist_status_t
+#define bustype_BDK_L2C_TADX_TDT_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_TDT_BIST_STATUS(a) "L2C_TADX_TDT_BIST_STATUS"
+#define device_bar_BDK_L2C_TADX_TDT_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_TDT_BIST_STATUS(a) (a)
+#define arguments_BDK_L2C_TADX_TDT_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_tqd_err
+ *
+ * L2C TAD Quad Error Information Registers
+ * This register records error information for all L2D/SBF/FBF errors.
+ * An error locks the [L2DIDX] and [SYN] fields and sets the bit corresponding to the error
+ * received.
+ * DBE errors take priority and overwrite an earlier logged SBE error. Only one of SBE/DBE is set
+ * at any given time and serves to document which error the [L2DIDX]/[SYN] is associated with.
+ * The syndrome is recorded for DBE errors, though the utility of the value is not clear.
+ */
+union bdk_l2c_tadx_tqd_err
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_tqd_err_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t l2ddbe : 1; /**< [ 63: 63](RO/H) L2DIDX/SYN corresponds to a double-bit L2D ECC error. */
+ uint64_t sbfdbe : 1; /**< [ 62: 62](RO/H) L2DIDX/SYN corresponds to a double-bit SBF ECC error. */
+ uint64_t fbfdbe : 1; /**< [ 61: 61](RO/H) L2DIDX/SYN corresponds to a double-bit FBF ECC error. */
+ uint64_t l2dsbe : 1; /**< [ 60: 60](RO/H) L2DIDX/SYN corresponds to a single-bit L2D ECC error. */
+ uint64_t sbfsbe : 1; /**< [ 59: 59](RO/H) L2DIDX/SYN corresponds to a single-bit SBF ECC error. */
+ uint64_t fbfsbe : 1; /**< [ 58: 58](RO/H) L2DIDX/SYN corresponds to a single-bit FBF ECC error. */
+ uint64_t reserved_40_57 : 18;
+ uint64_t syn : 8; /**< [ 39: 32](RO/H) Error syndrome. */
+ uint64_t reserved_18_31 : 14;
+ uint64_t qdnum : 3; /**< [ 17: 15](RO/H) Quad containing the error. */
+ uint64_t qdhlf : 1; /**< [ 14: 14](RO/H) Quad half of the containing the error. */
+ uint64_t l2didx : 14; /**< [ 13: 0](RO/H) For L2D errors, index within the quad-half containing the error. For SBF and FBF errors
+ \<13:5\> is 0x0 and \<4:0\> is the index of the error (\<4:1\> is lfbnum\<3:0\>, \<0\> is addr\<5\>).
+ See L2C_TAD()_INT_W1C[L2DSBE] for an important use of this field. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2didx : 14; /**< [ 13: 0](RO/H) For L2D errors, index within the quad-half containing the error. For SBF and FBF errors
+ \<13:5\> is 0x0 and \<4:0\> is the index of the error (\<4:1\> is lfbnum\<3:0\>, \<0\> is addr\<5\>).
+ See L2C_TAD()_INT_W1C[L2DSBE] for an important use of this field. */
+ uint64_t qdhlf : 1; /**< [ 14: 14](RO/H) Quad half of the containing the error. */
+ uint64_t qdnum : 3; /**< [ 17: 15](RO/H) Quad containing the error. */
+ uint64_t reserved_18_31 : 14;
+ uint64_t syn : 8; /**< [ 39: 32](RO/H) Error syndrome. */
+ uint64_t reserved_40_57 : 18;
+ uint64_t fbfsbe : 1; /**< [ 58: 58](RO/H) L2DIDX/SYN corresponds to a single-bit FBF ECC error. */
+ uint64_t sbfsbe : 1; /**< [ 59: 59](RO/H) L2DIDX/SYN corresponds to a single-bit SBF ECC error. */
+ uint64_t l2dsbe : 1; /**< [ 60: 60](RO/H) L2DIDX/SYN corresponds to a single-bit L2D ECC error. */
+ uint64_t fbfdbe : 1; /**< [ 61: 61](RO/H) L2DIDX/SYN corresponds to a double-bit FBF ECC error. */
+ uint64_t sbfdbe : 1; /**< [ 62: 62](RO/H) L2DIDX/SYN corresponds to a double-bit SBF ECC error. */
+ uint64_t l2ddbe : 1; /**< [ 63: 63](RO/H) L2DIDX/SYN corresponds to a double-bit L2D ECC error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_l2c_tadx_tqd_err_s cn; */
+};
+typedef union bdk_l2c_tadx_tqd_err bdk_l2c_tadx_tqd_err_t;
+
+static inline uint64_t BDK_L2C_TADX_TQD_ERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_TQD_ERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050060100ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050060100ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050060100ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_TQD_ERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_TQD_ERR(a) bdk_l2c_tadx_tqd_err_t
+#define bustype_BDK_L2C_TADX_TQD_ERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_TQD_ERR(a) "L2C_TADX_TQD_ERR"
+#define device_bar_BDK_L2C_TADX_TQD_ERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_TQD_ERR(a) (a)
+#define arguments_BDK_L2C_TADX_TQD_ERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_ttg_bist_status
+ *
+ * L2C TAD Tag BIST Status Registers
+ */
+union bdk_l2c_tadx_ttg_bist_status
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_ttg_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t xmdmskfl : 2; /**< [ 49: 48](RO/H) BIST failure status for RSTP XMDMSK memories. */
+ uint64_t rtgfl : 16; /**< [ 47: 32](RO/H) BIST failure status for RTG ways. */
+ uint64_t reserved_18_31 : 14;
+ uint64_t lrulfbfl : 1; /**< [ 17: 17](RO) Reserved, always zero. */
+ uint64_t lrufl : 1; /**< [ 16: 16](RO/H) BIST failure status for tag LRU. */
+ uint64_t tagfl : 16; /**< [ 15: 0](RO/H) BIST failure status for TAG ways. */
+#else /* Word 0 - Little Endian */
+ uint64_t tagfl : 16; /**< [ 15: 0](RO/H) BIST failure status for TAG ways. */
+ uint64_t lrufl : 1; /**< [ 16: 16](RO/H) BIST failure status for tag LRU. */
+ uint64_t lrulfbfl : 1; /**< [ 17: 17](RO) Reserved, always zero. */
+ uint64_t reserved_18_31 : 14;
+ uint64_t rtgfl : 16; /**< [ 47: 32](RO/H) BIST failure status for RTG ways. */
+ uint64_t xmdmskfl : 2; /**< [ 49: 48](RO/H) BIST failure status for RSTP XMDMSK memories. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_tadx_ttg_bist_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t xmdmskfl : 2; /**< [ 49: 48](RO/H) Reserved, always zero. */
+ uint64_t reserved_18_47 : 30;
+ uint64_t lrulfbfl : 1; /**< [ 17: 17](RO) Reserved, always zero. */
+ uint64_t lrufl : 1; /**< [ 16: 16](RO/H) BIST failure status for tag LRU. */
+ uint64_t tagfl : 16; /**< [ 15: 0](RO/H) BIST failure status for TAG ways. */
+#else /* Word 0 - Little Endian */
+ uint64_t tagfl : 16; /**< [ 15: 0](RO/H) BIST failure status for TAG ways. */
+ uint64_t lrufl : 1; /**< [ 16: 16](RO/H) BIST failure status for tag LRU. */
+ uint64_t lrulfbfl : 1; /**< [ 17: 17](RO) Reserved, always zero. */
+ uint64_t reserved_18_47 : 30;
+ uint64_t xmdmskfl : 2; /**< [ 49: 48](RO/H) Reserved, always zero. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_l2c_tadx_ttg_bist_status_s cn88xx; */
+ struct bdk_l2c_tadx_ttg_bist_status_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t xmdmskfl : 2; /**< [ 49: 48](RO/H) BIST failure status for RSTP XMDMSK memories. */
+ uint64_t reserved_18_47 : 30;
+ uint64_t lrulfbfl : 1; /**< [ 17: 17](RO) Reserved, always zero. */
+ uint64_t lrufl : 1; /**< [ 16: 16](RO/H) BIST failure status for tag LRU. */
+ uint64_t tagfl : 16; /**< [ 15: 0](RO/H) BIST failure status for TAG ways. */
+#else /* Word 0 - Little Endian */
+ uint64_t tagfl : 16; /**< [ 15: 0](RO/H) BIST failure status for TAG ways. */
+ uint64_t lrufl : 1; /**< [ 16: 16](RO/H) BIST failure status for tag LRU. */
+ uint64_t lrulfbfl : 1; /**< [ 17: 17](RO) Reserved, always zero. */
+ uint64_t reserved_18_47 : 30;
+ uint64_t xmdmskfl : 2; /**< [ 49: 48](RO/H) BIST failure status for RSTP XMDMSK memories. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_l2c_tadx_ttg_bist_status bdk_l2c_tadx_ttg_bist_status_t;
+
+static inline uint64_t BDK_L2C_TADX_TTG_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_TTG_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050070200ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050070200ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050070200ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_TTG_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_TTG_BIST_STATUS(a) bdk_l2c_tadx_ttg_bist_status_t
+#define bustype_BDK_L2C_TADX_TTG_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_TTG_BIST_STATUS(a) "L2C_TADX_TTG_BIST_STATUS"
+#define device_bar_BDK_L2C_TADX_TTG_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_TTG_BIST_STATUS(a) (a)
+#define arguments_BDK_L2C_TADX_TTG_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) l2c_tad#_ttg_err
+ *
+ * L2C TAD Tag Error Information Registers
+ * This register records error information for all TAG SBE/DBE/NOWAY errors.
+ * The priority of errors (lowest to highest) is NOWAY, SBE, DBE. An error locks [SYN], [WAY],
+ * and [L2IDX] for equal or lower priority errors until cleared by software.
+ * The syndrome is recorded for DBE errors, though the utility of the value is not clear.
+ * A NOWAY error does not change the value of the [SYN] field, and leaves [WAY] unpredictable.
+ * [L2IDX]\<19:7\> is the L2 block index associated with the command which had no way to allocate.
+ */
+union bdk_l2c_tadx_ttg_err
+{
+ uint64_t u;
+ struct bdk_l2c_tadx_ttg_err_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tagdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit TAG ECC error. */
+ uint64_t tagsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit TAG ECC error. */
+ uint64_t noway : 1; /**< [ 61: 61](RO/H) Information refers to a NOWAY error. */
+ uint64_t reserved_39_60 : 22;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_39_60 : 22;
+ uint64_t noway : 1; /**< [ 61: 61](RO/H) Information refers to a NOWAY error. */
+ uint64_t tagsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit TAG ECC error. */
+ uint64_t tagdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit TAG ECC error. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_l2c_tadx_ttg_err_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tagdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit TAG ECC error. */
+ uint64_t tagsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit TAG ECC error. */
+ uint64_t noway : 1; /**< [ 61: 61](RO/H) Information refers to a NOWAY error. */
+ uint64_t reserved_39_60 : 22;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t way : 4; /**< [ 20: 17](RO/H) Way of the L2 block containing the error. */
+ uint64_t l2idx : 10; /**< [ 16: 7](RO/H) Index of the L2 block containing the error.
+ See L2C_TAD()_INT_W1C[TAGSBE] for an important use of this field. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t l2idx : 10; /**< [ 16: 7](RO/H) Index of the L2 block containing the error.
+ See L2C_TAD()_INT_W1C[TAGSBE] for an important use of this field. */
+ uint64_t way : 4; /**< [ 20: 17](RO/H) Way of the L2 block containing the error. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_39_60 : 22;
+ uint64_t noway : 1; /**< [ 61: 61](RO/H) Information refers to a NOWAY error. */
+ uint64_t tagsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit TAG ECC error. */
+ uint64_t tagdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit TAG ECC error. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_l2c_tadx_ttg_err_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tagdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit TAG ECC error. */
+ uint64_t tagsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit TAG ECC error. */
+ uint64_t noway : 1; /**< [ 61: 61](RO/H) Information refers to a NOWAY error. */
+ uint64_t reserved_39_60 : 22;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t way : 4; /**< [ 23: 20](RO/H) Way of the L2 block containing the error. */
+ uint64_t l2idx : 13; /**< [ 19: 7](RO/H) Index of the L2 block containing the error.
+ See L2C_TAD()_INT_W1C[TAGSBE] for an important use of this field. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t l2idx : 13; /**< [ 19: 7](RO/H) Index of the L2 block containing the error.
+ See L2C_TAD()_INT_W1C[TAGSBE] for an important use of this field. */
+ uint64_t way : 4; /**< [ 23: 20](RO/H) Way of the L2 block containing the error. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_39_60 : 22;
+ uint64_t noway : 1; /**< [ 61: 61](RO/H) Information refers to a NOWAY error. */
+ uint64_t tagsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit TAG ECC error. */
+ uint64_t tagdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit TAG ECC error. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_l2c_tadx_ttg_err_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tagdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit TAG ECC error. */
+ uint64_t tagsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit TAG ECC error. */
+ uint64_t noway : 1; /**< [ 61: 61](RO/H) Information refers to a NOWAY error. */
+ uint64_t reserved_39_60 : 22;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_23_31 : 9;
+ uint64_t way : 4; /**< [ 22: 19](RO/H) Way of the L2 block containing the error. */
+ uint64_t l2idx : 12; /**< [ 18: 7](RO/H) Index of the L2 block containing the error.
+ See L2C_TAD()_INT_W1C[TAGSBE] for an important use of this field. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t l2idx : 12; /**< [ 18: 7](RO/H) Index of the L2 block containing the error.
+ See L2C_TAD()_INT_W1C[TAGSBE] for an important use of this field. */
+ uint64_t way : 4; /**< [ 22: 19](RO/H) Way of the L2 block containing the error. */
+ uint64_t reserved_23_31 : 9;
+ uint64_t syn : 7; /**< [ 38: 32](RO/H) Syndrome for the single-bit error. */
+ uint64_t reserved_39_60 : 22;
+ uint64_t noway : 1; /**< [ 61: 61](RO/H) Information refers to a NOWAY error. */
+ uint64_t tagsbe : 1; /**< [ 62: 62](RO/H) Information refers to a single-bit TAG ECC error. */
+ uint64_t tagdbe : 1; /**< [ 63: 63](RO/H) Information refers to a double-bit TAG ECC error. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_l2c_tadx_ttg_err bdk_l2c_tadx_ttg_err_t;
+
+static inline uint64_t BDK_L2C_TADX_TTG_ERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_L2C_TADX_TTG_ERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e050060200ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e050060200ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e050060200ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("L2C_TADX_TTG_ERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_L2C_TADX_TTG_ERR(a) bdk_l2c_tadx_ttg_err_t
+#define bustype_BDK_L2C_TADX_TTG_ERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_L2C_TADX_TTG_ERR(a) "L2C_TADX_TTG_ERR"
+#define device_bar_BDK_L2C_TADX_TTG_ERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_L2C_TADX_TTG_ERR(a) (a)
+#define arguments_BDK_L2C_TADX_TTG_ERR(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_L2C_TAD_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h
new file mode 100644
index 0000000000..b7e01a32db
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h
@@ -0,0 +1,21359 @@
+#ifndef __BDK_CSRS_LMC_H__
+#define __BDK_CSRS_LMC_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium LMC.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration lmc_bar_e
+ *
+ * LMC Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_LMC_BAR_E_LMCX_PF_BAR0(a) (0x87e088000000ll + 0x1000000ll * (a))
+#define BDK_LMC_BAR_E_LMCX_PF_BAR0_SIZE 0x800000ull
+#define BDK_LMC_BAR_E_LMCX_PF_BAR4(a) (0x87e088f00000ll + 0x1000000ll * (a))
+#define BDK_LMC_BAR_E_LMCX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration lmc_int_vec_e
+ *
+ * LMC MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_LMC_INT_VEC_E_INTS (0)
+
+/**
+ * Enumeration lmc_psb_acc_e
+ *
+ * LMC Power Serial Bus Accumulator Enumeration
+ * Enumerates the PSB accumulators for LMC slaves, which correspond to index {b} of
+ * PSBS_SYS()_ACCUM().
+ */
+#define BDK_LMC_PSB_ACC_E_DCLK_EN (0)
+#define BDK_LMC_PSB_ACC_E_RSVD3 (3)
+#define BDK_LMC_PSB_ACC_E_RX_ACTIVE (2)
+#define BDK_LMC_PSB_ACC_E_TX_ACTIVE (1)
+
+/**
+ * Enumeration lmc_psb_event_e
+ *
+ * LMC Power Serial Bus Event Enumeration
+ * Enumerates the event numbers for LMC slaves, which correspond to index {b} of
+ * PSBS_SYS()_EVENT()_CFG.
+ */
+#define BDK_LMC_PSB_EVENT_E_DCLK_EN (0)
+#define BDK_LMC_PSB_EVENT_E_RX_ACTIVE (2)
+#define BDK_LMC_PSB_EVENT_E_TX_ACTIVE (1)
+
+/**
+ * Enumeration lmc_seq_sel_e
+ *
+ * LMC Sequence Select Enumeration
+ * Enumerates the different values of LMC()_SEQ_CTL[SEQ_SEL].
+ */
+#define BDK_LMC_SEQ_SEL_E_INIT (0)
+#define BDK_LMC_SEQ_SEL_E_MPR_RW (9)
+#define BDK_LMC_SEQ_SEL_E_MRW (8)
+#define BDK_LMC_SEQ_SEL_E_OFFSET_TRAINING (0xb)
+#define BDK_LMC_SEQ_SEL_E_PPR (0xf)
+#define BDK_LMC_SEQ_SEL_E_RCD_INIT (7)
+#define BDK_LMC_SEQ_SEL_E_READ_LEVEL (1)
+#define BDK_LMC_SEQ_SEL_E_RW_TRAINING (0xe)
+#define BDK_LMC_SEQ_SEL_E_SREF_ENTRY (2)
+#define BDK_LMC_SEQ_SEL_E_SREF_EXIT (3)
+#define BDK_LMC_SEQ_SEL_E_VREF_INT (0xa)
+#define BDK_LMC_SEQ_SEL_E_WRITE_LEVEL (6)
+
+/**
+ * Register (RSL) lmc#_adr_scramble
+ *
+ * LMC Address Scramble Register
+ * These registers set the aliasing that uses the lowest, legal chip select(s).
+ */
+union bdk_lmcx_adr_scramble
+{
+ uint64_t u;
+ struct bdk_lmcx_adr_scramble_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t key : 64; /**< [ 63: 0](R/W/H) Scramble key for addresses. Clear this field to zero to disable. To enable
+ address scrambling, this key should be set to a value generated from a
+ cryptographically-secure random number generator such as RNM_RANDOM. */
+#else /* Word 0 - Little Endian */
+ uint64_t key : 64; /**< [ 63: 0](R/W/H) Scramble key for addresses. Clear this field to zero to disable. To enable
+ address scrambling, this key should be set to a value generated from a
+ cryptographically-secure random number generator such as RNM_RANDOM. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_adr_scramble_s cn; */
+};
+typedef union bdk_lmcx_adr_scramble bdk_lmcx_adr_scramble_t;
+
+static inline uint64_t BDK_LMCX_ADR_SCRAMBLE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_ADR_SCRAMBLE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000328ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_ADR_SCRAMBLE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_ADR_SCRAMBLE(a) bdk_lmcx_adr_scramble_t
+#define bustype_BDK_LMCX_ADR_SCRAMBLE(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_ADR_SCRAMBLE(a) "LMCX_ADR_SCRAMBLE"
+#define device_bar_BDK_LMCX_ADR_SCRAMBLE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_ADR_SCRAMBLE(a) (a)
+#define arguments_BDK_LMCX_ADR_SCRAMBLE(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_bank_conflict1
+ *
+ * LMC Bank Conflict1 Counter Register
+ */
+union bdk_lmcx_bank_conflict1
+{
+ uint64_t u;
+ struct bdk_lmcx_bank_conflict1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Bank conflict counter. A 64-bit counter that increments at every DCLK
+ cycles when LMC could not issue R/W operations to the DRAM due to
+ bank conflict. This increments when all 8 in-flight buffers are not
+ utilized. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Bank conflict counter. A 64-bit counter that increments at every DCLK
+ cycles when LMC could not issue R/W operations to the DRAM due to
+ bank conflict. This increments when all 8 in-flight buffers are not
+ utilized. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_bank_conflict1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Bank conflict counter. A 64-bit counter that increments at every dclk
+ cycles when LMC could not issue R/W operations to the DRAM due to
+ bank conflict. This increments when all 8 in-flight buffers are not
+ utilized. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Bank conflict counter. A 64-bit counter that increments at every dclk
+ cycles when LMC could not issue R/W operations to the DRAM due to
+ bank conflict. This increments when all 8 in-flight buffers are not
+ utilized. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_bank_conflict1_s cn81xx; */
+ /* struct bdk_lmcx_bank_conflict1_s cn88xx; */
+ /* struct bdk_lmcx_bank_conflict1_cn9 cn83xx; */
+};
+typedef union bdk_lmcx_bank_conflict1 bdk_lmcx_bank_conflict1_t;
+
+static inline uint64_t BDK_LMCX_BANK_CONFLICT1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_BANK_CONFLICT1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000360ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000360ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=3))
+ return 0x87e088000360ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000360ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_BANK_CONFLICT1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_BANK_CONFLICT1(a) bdk_lmcx_bank_conflict1_t
+#define bustype_BDK_LMCX_BANK_CONFLICT1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_BANK_CONFLICT1(a) "LMCX_BANK_CONFLICT1"
+#define device_bar_BDK_LMCX_BANK_CONFLICT1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_BANK_CONFLICT1(a) (a)
+#define arguments_BDK_LMCX_BANK_CONFLICT1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_bank_conflict2
+ *
+ * LMC Bank Conflict2 Counter Register
+ */
+union bdk_lmcx_bank_conflict2
+{
+ uint64_t u;
+ struct bdk_lmcx_bank_conflict2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Bank conflict counter. A 64-bit counter that increments at every DCLK
+ cycles when LMC could not issue R/W operations to the DRAM due to
+ bank conflict. This increments only when there are less than four in-flight
+ buffers occupied. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Bank conflict counter. A 64-bit counter that increments at every DCLK
+ cycles when LMC could not issue R/W operations to the DRAM due to
+ bank conflict. This increments only when there are less than four in-flight
+ buffers occupied. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_bank_conflict2_s cn9; */
+ /* struct bdk_lmcx_bank_conflict2_s cn81xx; */
+ /* struct bdk_lmcx_bank_conflict2_s cn88xx; */
+ struct bdk_lmcx_bank_conflict2_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Bank conflict counter. A 64-bit counter that increments at every dclk
+ cycles when LMC could not issue R/W operations to the DRAM due to
+ bank conflict. This increments only when there are less than four in-flight
+ buffers occupied. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Bank conflict counter. A 64-bit counter that increments at every dclk
+ cycles when LMC could not issue R/W operations to the DRAM due to
+ bank conflict. This increments only when there are less than four in-flight
+ buffers occupied. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_bank_conflict2 bdk_lmcx_bank_conflict2_t;
+
+static inline uint64_t BDK_LMCX_BANK_CONFLICT2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_BANK_CONFLICT2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000368ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000368ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=3))
+ return 0x87e088000368ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000368ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_BANK_CONFLICT2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_BANK_CONFLICT2(a) bdk_lmcx_bank_conflict2_t
+#define bustype_BDK_LMCX_BANK_CONFLICT2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_BANK_CONFLICT2(a) "LMCX_BANK_CONFLICT2"
+#define device_bar_BDK_LMCX_BANK_CONFLICT2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_BANK_CONFLICT2(a) (a)
+#define arguments_BDK_LMCX_BANK_CONFLICT2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_bist_ctl
+ *
+ * LMC BIST Control Registers
+ * This register has fields to control BIST operation.
+ */
+union bdk_lmcx_bist_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_bist_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t macram_bist_status : 1; /**< [ 4: 4](RO/H) Maximum Activate Counts RAM BIST status.
+ 1 means fail. */
+ uint64_t dlcram_bist_status : 1; /**< [ 3: 3](RO/H) DLC RAM BIST status; one means fail. */
+ uint64_t dlcram_bist_done : 1; /**< [ 2: 2](RO/H) DLC and MAC RAM BIST complete indication;
+ One means both RAMs have completed. */
+ uint64_t start_bist : 1; /**< [ 1: 1](R/W) Start BIST on DLC and MAC memory. */
+ uint64_t clear_bist : 1; /**< [ 0: 0](R/W) Start clear BIST on DLC and MAC memory. */
+#else /* Word 0 - Little Endian */
+ uint64_t clear_bist : 1; /**< [ 0: 0](R/W) Start clear BIST on DLC and MAC memory. */
+ uint64_t start_bist : 1; /**< [ 1: 1](R/W) Start BIST on DLC and MAC memory. */
+ uint64_t dlcram_bist_done : 1; /**< [ 2: 2](RO/H) DLC and MAC RAM BIST complete indication;
+ One means both RAMs have completed. */
+ uint64_t dlcram_bist_status : 1; /**< [ 3: 3](RO/H) DLC RAM BIST status; one means fail. */
+ uint64_t macram_bist_status : 1; /**< [ 4: 4](RO/H) Maximum Activate Counts RAM BIST status.
+ 1 means fail. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_bist_ctl_s cn; */
+};
+typedef union bdk_lmcx_bist_ctl bdk_lmcx_bist_ctl_t;
+
+static inline uint64_t BDK_LMCX_BIST_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_BIST_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000100ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000100ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000100ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_BIST_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_BIST_CTL(a) bdk_lmcx_bist_ctl_t
+#define bustype_BDK_LMCX_BIST_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_BIST_CTL(a) "LMCX_BIST_CTL"
+#define device_bar_BDK_LMCX_BIST_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_BIST_CTL(a) (a)
+#define arguments_BDK_LMCX_BIST_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_char_ctl
+ *
+ * INTERNAL: LMC Characterization Control Register
+ *
+ * This register provides an assortment of various control fields needed to characterize the DDR4
+ * interface.
+ */
+union bdk_lmcx_char_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_char_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t dq_char_byte_check : 1; /**< [ 53: 53](R/W) When set, LMC performs loopback pattern check on a byte. The selection of the byte is
+ controlled by LMC()_CHAR_CTL[DQ_CHAR_BYTE_SEL]. */
+ uint64_t dq_char_check_lock : 1; /**< [ 52: 52](RO/H) Indicates if a lock has been achieved. Is set to one only if a lock is achieved
+ during the LFSR priming period after [DQ_CHAR_CHECK_ENABLE] is set to one, and is
+ forced back to zero when [DQ_CHAR_CHECK_ENABLE] is set to zero. */
+ uint64_t dq_char_check_enable : 1; /**< [ 51: 51](R/W) Enable DQ pattern check. The transition from disabled to enabled clears
+ LMC()_CHAR_DQ_ERR_COUNT. */
+ uint64_t dq_char_bit_sel : 3; /**< [ 50: 48](R/W) Select a bit within the byte for DQ characterization pattern check. */
+ uint64_t dq_char_byte_sel : 4; /**< [ 47: 44](R/W) Select a byte of data for DQ characterization pattern check. */
+ uint64_t dr : 1; /**< [ 43: 43](R/W) Pattern at data rate (not clock rate). */
+ uint64_t skew_on : 1; /**< [ 42: 42](R/W) Skew adjacent bits. */
+ uint64_t en : 1; /**< [ 41: 41](R/W) Enable characterization. */
+ uint64_t sel : 1; /**< [ 40: 40](R/W) Pattern select: 0 = PRBS, 1 = programmable pattern. */
+ uint64_t prog : 8; /**< [ 39: 32](R/W) Programmable pattern. */
+ uint64_t prbs : 32; /**< [ 31: 0](R/W) PRBS polynomial. */
+#else /* Word 0 - Little Endian */
+ uint64_t prbs : 32; /**< [ 31: 0](R/W) PRBS polynomial. */
+ uint64_t prog : 8; /**< [ 39: 32](R/W) Programmable pattern. */
+ uint64_t sel : 1; /**< [ 40: 40](R/W) Pattern select: 0 = PRBS, 1 = programmable pattern. */
+ uint64_t en : 1; /**< [ 41: 41](R/W) Enable characterization. */
+ uint64_t skew_on : 1; /**< [ 42: 42](R/W) Skew adjacent bits. */
+ uint64_t dr : 1; /**< [ 43: 43](R/W) Pattern at data rate (not clock rate). */
+ uint64_t dq_char_byte_sel : 4; /**< [ 47: 44](R/W) Select a byte of data for DQ characterization pattern check. */
+ uint64_t dq_char_bit_sel : 3; /**< [ 50: 48](R/W) Select a bit within the byte for DQ characterization pattern check. */
+ uint64_t dq_char_check_enable : 1; /**< [ 51: 51](R/W) Enable DQ pattern check. The transition from disabled to enabled clears
+ LMC()_CHAR_DQ_ERR_COUNT. */
+ uint64_t dq_char_check_lock : 1; /**< [ 52: 52](RO/H) Indicates if a lock has been achieved. Is set to one only if a lock is achieved
+ during the LFSR priming period after [DQ_CHAR_CHECK_ENABLE] is set to one, and is
+ forced back to zero when [DQ_CHAR_CHECK_ENABLE] is set to zero. */
+ uint64_t dq_char_byte_check : 1; /**< [ 53: 53](R/W) When set, LMC performs loopback pattern check on a byte. The selection of the byte is
+ controlled by LMC()_CHAR_CTL[DQ_CHAR_BYTE_SEL]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_char_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t dq_char_byte_check : 1; /**< [ 53: 53](RO) Reserved. */
+ uint64_t dq_char_check_lock : 1; /**< [ 52: 52](RO/H) Indicates if a lock has been achieved. Is set to one only if a lock is achieved
+ during the LFSR priming period after [DQ_CHAR_CHECK_ENABLE] is set to one, and is
+ forced back to zero when [DQ_CHAR_CHECK_ENABLE] is set to zero. */
+ uint64_t dq_char_check_enable : 1; /**< [ 51: 51](R/W) Enable DQ pattern check. The transition from disabled to enabled clears
+ LMC()_CHAR_DQ_ERR_COUNT. */
+ uint64_t dq_char_bit_sel : 3; /**< [ 50: 48](R/W) Select a bit within the byte for DQ characterization pattern check. */
+ uint64_t dq_char_byte_sel : 4; /**< [ 47: 44](R/W) Select a byte of data for DQ characterization pattern check. */
+ uint64_t dr : 1; /**< [ 43: 43](R/W) Pattern at data rate (not clock rate). */
+ uint64_t skew_on : 1; /**< [ 42: 42](R/W) Skew adjacent bits. */
+ uint64_t en : 1; /**< [ 41: 41](R/W) Enable characterization. */
+ uint64_t sel : 1; /**< [ 40: 40](R/W) Pattern select: 0 = PRBS, 1 = programmable pattern. */
+ uint64_t prog : 8; /**< [ 39: 32](R/W) Programmable pattern. */
+ uint64_t prbs : 32; /**< [ 31: 0](R/W) PRBS polynomial. */
+#else /* Word 0 - Little Endian */
+ uint64_t prbs : 32; /**< [ 31: 0](R/W) PRBS polynomial. */
+ uint64_t prog : 8; /**< [ 39: 32](R/W) Programmable pattern. */
+ uint64_t sel : 1; /**< [ 40: 40](R/W) Pattern select: 0 = PRBS, 1 = programmable pattern. */
+ uint64_t en : 1; /**< [ 41: 41](R/W) Enable characterization. */
+ uint64_t skew_on : 1; /**< [ 42: 42](R/W) Skew adjacent bits. */
+ uint64_t dr : 1; /**< [ 43: 43](R/W) Pattern at data rate (not clock rate). */
+ uint64_t dq_char_byte_sel : 4; /**< [ 47: 44](R/W) Select a byte of data for DQ characterization pattern check. */
+ uint64_t dq_char_bit_sel : 3; /**< [ 50: 48](R/W) Select a bit within the byte for DQ characterization pattern check. */
+ uint64_t dq_char_check_enable : 1; /**< [ 51: 51](R/W) Enable DQ pattern check. The transition from disabled to enabled clears
+ LMC()_CHAR_DQ_ERR_COUNT. */
+ uint64_t dq_char_check_lock : 1; /**< [ 52: 52](RO/H) Indicates if a lock has been achieved. Is set to one only if a lock is achieved
+ during the LFSR priming period after [DQ_CHAR_CHECK_ENABLE] is set to one, and is
+ forced back to zero when [DQ_CHAR_CHECK_ENABLE] is set to zero. */
+ uint64_t dq_char_byte_check : 1; /**< [ 53: 53](RO) Reserved. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_lmcx_char_ctl_s cn9; */
+ /* struct bdk_lmcx_char_ctl_s cn81xx; */
+ /* struct bdk_lmcx_char_ctl_s cn83xx; */
+ /* struct bdk_lmcx_char_ctl_s cn88xxp2; */
+};
+typedef union bdk_lmcx_char_ctl bdk_lmcx_char_ctl_t;
+
+static inline uint64_t BDK_LMCX_CHAR_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CHAR_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000220ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000220ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000220ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000220ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CHAR_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CHAR_CTL(a) bdk_lmcx_char_ctl_t
+#define bustype_BDK_LMCX_CHAR_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CHAR_CTL(a) "LMCX_CHAR_CTL"
+#define device_bar_BDK_LMCX_CHAR_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CHAR_CTL(a) (a)
+#define arguments_BDK_LMCX_CHAR_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_char_dq_err_count
+ *
+ * INTERNAL: LMC DDR Characterization Error Count Register
+ *
+ * This register is used to initiate the various control sequences in the LMC.
+ */
+union bdk_lmcx_char_dq_err_count
+{
+ uint64_t u;
+ struct bdk_lmcx_char_dq_err_count_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t dq_err_count : 40; /**< [ 39: 0](RO/H) DQ error count. */
+#else /* Word 0 - Little Endian */
+ uint64_t dq_err_count : 40; /**< [ 39: 0](RO/H) DQ error count. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_char_dq_err_count_s cn; */
+};
+typedef union bdk_lmcx_char_dq_err_count bdk_lmcx_char_dq_err_count_t;
+
+static inline uint64_t BDK_LMCX_CHAR_DQ_ERR_COUNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CHAR_DQ_ERR_COUNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000040ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000040ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000040ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000040ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CHAR_DQ_ERR_COUNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) bdk_lmcx_char_dq_err_count_t
+#define bustype_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) "LMCX_CHAR_DQ_ERR_COUNT"
+#define device_bar_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) (a)
+#define arguments_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_char_mask0
+ *
+ * LMC Characterization Mask Register 0
+ * This register provides an assortment of various control fields needed to characterize the
+ * DDR4 interface.
+ * It is also used to corrupt the write data bits when ECC Corrupt logic generator is enabled.
+ */
+union bdk_lmcx_char_mask0
+{
+ uint64_t u;
+ struct bdk_lmcx_char_mask0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mask : 64; /**< [ 63: 0](R/W) Mask for DQ0\<63:0\>.
+ Before enabling ECC corrupt generation logic by setting
+ LMC()_ECC_PARITY_TEST[ECC_CORRUPT_ENA], set any the MASK bits to one to flip the
+ corresponding bits of the lower 64-bit dataword during a write data transfer. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 64; /**< [ 63: 0](R/W) Mask for DQ0\<63:0\>.
+ Before enabling ECC corrupt generation logic by setting
+ LMC()_ECC_PARITY_TEST[ECC_CORRUPT_ENA], set any the MASK bits to one to flip the
+ corresponding bits of the lower 64-bit dataword during a write data transfer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_char_mask0_s cn; */
+};
+typedef union bdk_lmcx_char_mask0 bdk_lmcx_char_mask0_t;
+
+static inline uint64_t BDK_LMCX_CHAR_MASK0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CHAR_MASK0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000228ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000228ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000228ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000228ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CHAR_MASK0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CHAR_MASK0(a) bdk_lmcx_char_mask0_t
+#define bustype_BDK_LMCX_CHAR_MASK0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CHAR_MASK0(a) "LMCX_CHAR_MASK0"
+#define device_bar_BDK_LMCX_CHAR_MASK0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CHAR_MASK0(a) (a)
+#define arguments_BDK_LMCX_CHAR_MASK0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_char_mask1
+ *
+ * INTERNAL: LMC Characterization Mask Register 1
+ *
+ * This register provides an assortment of various control fields needed to characterize the DDR4
+ * interface.
+ */
+union bdk_lmcx_char_mask1
+{
+ uint64_t u;
+ struct bdk_lmcx_char_mask1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t mask : 8; /**< [ 7: 0](R/W) Mask for DQ0\<71:64\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 8; /**< [ 7: 0](R/W) Mask for DQ0\<71:64\>. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_char_mask1_s cn; */
+};
+typedef union bdk_lmcx_char_mask1 bdk_lmcx_char_mask1_t;
+
+static inline uint64_t BDK_LMCX_CHAR_MASK1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CHAR_MASK1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000230ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000230ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000230ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000230ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CHAR_MASK1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CHAR_MASK1(a) bdk_lmcx_char_mask1_t
+#define bustype_BDK_LMCX_CHAR_MASK1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CHAR_MASK1(a) "LMCX_CHAR_MASK1"
+#define device_bar_BDK_LMCX_CHAR_MASK1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CHAR_MASK1(a) (a)
+#define arguments_BDK_LMCX_CHAR_MASK1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_char_mask2
+ *
+ * LMC Characterization Mask Register 2
+ * This register provides an assortment of various control fields needed to characterize the
+ * DDR4 interface.
+ * It is also used to corrupt the write data bits when ECC corrupt logic generator is enabled.
+ */
+union bdk_lmcx_char_mask2
+{
+ uint64_t u;
+ struct bdk_lmcx_char_mask2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mask : 64; /**< [ 63: 0](R/W) Mask for DQ1\<63:0\>.
+ Before enabling ECC Corrupt generation logic by setting
+ LMC()_ECC_PARITY_TEST[ECC_CORRUPT_ENA], set any the MASK bits to one to flip the
+ corresponding bits of the upper 64-bit dataword during a write data transfer. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 64; /**< [ 63: 0](R/W) Mask for DQ1\<63:0\>.
+ Before enabling ECC Corrupt generation logic by setting
+ LMC()_ECC_PARITY_TEST[ECC_CORRUPT_ENA], set any the MASK bits to one to flip the
+ corresponding bits of the upper 64-bit dataword during a write data transfer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_char_mask2_s cn; */
+};
+typedef union bdk_lmcx_char_mask2 bdk_lmcx_char_mask2_t;
+
+static inline uint64_t BDK_LMCX_CHAR_MASK2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CHAR_MASK2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000238ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000238ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000238ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000238ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CHAR_MASK2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CHAR_MASK2(a) bdk_lmcx_char_mask2_t
+#define bustype_BDK_LMCX_CHAR_MASK2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CHAR_MASK2(a) "LMCX_CHAR_MASK2"
+#define device_bar_BDK_LMCX_CHAR_MASK2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CHAR_MASK2(a) (a)
+#define arguments_BDK_LMCX_CHAR_MASK2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_char_mask3
+ *
+ * INTERNAL: LMC Characterization Mask Register 3
+ *
+ * This register provides an assortment of various control fields needed to characterize the DDR4
+ * interface.
+ */
+union bdk_lmcx_char_mask3
+{
+ uint64_t u;
+ struct bdk_lmcx_char_mask3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ uint64_t dac_on_mask : 9; /**< [ 16: 8](R/W) This mask is applied to the DAC ON signals that go to the PHY, so that each byte lane can
+ selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol D for DAC ON, the mask looks like this:
+ \<pre\>
+ DDDDDDDDD
+ 876543210
+ \</pre\> */
+ uint64_t mask : 8; /**< [ 7: 0](R/W) Mask for DQ1\<71:64\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 8; /**< [ 7: 0](R/W) Mask for DQ1\<71:64\>. */
+ uint64_t dac_on_mask : 9; /**< [ 16: 8](R/W) This mask is applied to the DAC ON signals that go to the PHY, so that each byte lane can
+ selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol D for DAC ON, the mask looks like this:
+ \<pre\>
+ DDDDDDDDD
+ 876543210
+ \</pre\> */
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_char_mask3_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t mask : 8; /**< [ 7: 0](R/W) Mask for DQ1\<71:64\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 8; /**< [ 7: 0](R/W) Mask for DQ1\<71:64\>. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_lmcx_char_mask3_s cn9; */
+};
+typedef union bdk_lmcx_char_mask3 bdk_lmcx_char_mask3_t;
+
+static inline uint64_t BDK_LMCX_CHAR_MASK3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CHAR_MASK3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000240ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000240ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000240ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000240ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CHAR_MASK3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CHAR_MASK3(a) bdk_lmcx_char_mask3_t
+#define bustype_BDK_LMCX_CHAR_MASK3(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CHAR_MASK3(a) "LMCX_CHAR_MASK3"
+#define device_bar_BDK_LMCX_CHAR_MASK3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CHAR_MASK3(a) (a)
+#define arguments_BDK_LMCX_CHAR_MASK3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_char_mask4
+ *
+ * INTERNAL: LMC Characterization Mask Register 4
+ *
+ * This register is an assortment of various control fields needed to characterize the DDR4 interface.
+ */
+union bdk_lmcx_char_mask4
+{
+ uint64_t u;
+ struct bdk_lmcx_char_mask4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ref_pin_on_mask : 9; /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
+ can selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol R, the mask looks like this:
+ \<pre\>
+ RRRRRRRRR
+ 876543210
+ \</pre\> */
+ uint64_t dac_on_mask : 9; /**< [ 54: 46](R/W) This mask is applied to the DAC_ON signals that go to the PHY, so that each byte lane can
+ selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol D for DAC_ON, the mask looks like this:
+ DDDDDDDDD
+ 876543210 */
+ uint64_t reserved_36_45 : 10;
+ uint64_t par_mask : 1; /**< [ 35: 35](R/W) Mask for DDR_PAR. */
+ uint64_t act_n_mask : 1; /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
+ uint64_t a17_mask : 1; /**< [ 33: 33](R/W) Mask for DDR_A17. */
+ uint64_t reset_n_mask : 1; /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
+ uint64_t a_mask : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
+ uint64_t ba_mask : 3; /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
+ uint64_t we_n_mask : 1; /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
+ uint64_t cas_n_mask : 1; /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
+ uint64_t ras_n_mask : 1; /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
+ uint64_t odt1_mask : 2; /**< [ 9: 8](R/W) Mask for DDR_ODT1. */
+ uint64_t odt0_mask : 2; /**< [ 7: 6](R/W) Mask for DDR_ODT0. */
+ uint64_t cs1_n_mask : 2; /**< [ 5: 4](R/W) Mask for DDR_CS1_L. */
+ uint64_t cs0_n_mask : 2; /**< [ 3: 2](R/W) Mask for DDR_CS0_L. */
+ uint64_t cke_mask : 2; /**< [ 1: 0](R/W) Mask for DDR_CKE*. */
+#else /* Word 0 - Little Endian */
+ uint64_t cke_mask : 2; /**< [ 1: 0](R/W) Mask for DDR_CKE*. */
+ uint64_t cs0_n_mask : 2; /**< [ 3: 2](R/W) Mask for DDR_CS0_L. */
+ uint64_t cs1_n_mask : 2; /**< [ 5: 4](R/W) Mask for DDR_CS1_L. */
+ uint64_t odt0_mask : 2; /**< [ 7: 6](R/W) Mask for DDR_ODT0. */
+ uint64_t odt1_mask : 2; /**< [ 9: 8](R/W) Mask for DDR_ODT1. */
+ uint64_t ras_n_mask : 1; /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
+ uint64_t cas_n_mask : 1; /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
+ uint64_t we_n_mask : 1; /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
+ uint64_t ba_mask : 3; /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
+ uint64_t a_mask : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
+ uint64_t reset_n_mask : 1; /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
+ uint64_t a17_mask : 1; /**< [ 33: 33](R/W) Mask for DDR_A17. */
+ uint64_t act_n_mask : 1; /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
+ uint64_t par_mask : 1; /**< [ 35: 35](R/W) Mask for DDR_PAR. */
+ uint64_t reserved_36_45 : 10;
+ uint64_t dac_on_mask : 9; /**< [ 54: 46](R/W) This mask is applied to the DAC_ON signals that go to the PHY, so that each byte lane can
+ selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol D for DAC_ON, the mask looks like this:
+ DDDDDDDDD
+ 876543210 */
+ uint64_t ref_pin_on_mask : 9; /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
+ can selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol R, the mask looks like this:
+ \<pre\>
+ RRRRRRRRR
+ 876543210
+ \</pre\> */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_char_mask4_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ref_pin_on_mask : 9; /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
+ can selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol R, the mask looks like this:
+ \<pre\>
+ RRRRRRRRR
+ 876543210
+ \</pre\> */
+ uint64_t dac_on_mask : 9; /**< [ 54: 46](R/W) This mask is applied to the DAC_ON signals that go to the PHY, so that each byte lane can
+ selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol D for DAC_ON, the mask looks like this:
+ DDDDDDDDD
+ 876543210 */
+ uint64_t reserved_45 : 1;
+ uint64_t dbi_mask : 9; /**< [ 44: 36](R/W) Mask for DBI/DQS\<1\>. */
+ uint64_t par_mask : 1; /**< [ 35: 35](R/W) Mask for DDR_PAR. */
+ uint64_t act_n_mask : 1; /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
+ uint64_t a17_mask : 1; /**< [ 33: 33](R/W) Mask for DDR_A17. */
+ uint64_t reset_n_mask : 1; /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
+ uint64_t a_mask : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
+ uint64_t ba_mask : 3; /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
+ uint64_t we_n_mask : 1; /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
+ uint64_t cas_n_mask : 1; /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
+ uint64_t ras_n_mask : 1; /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
+ uint64_t odt1_mask : 2; /**< [ 9: 8](R/W) Mask for DDR_ODT1. */
+ uint64_t odt0_mask : 2; /**< [ 7: 6](R/W) Mask for DDR_ODT0. */
+ uint64_t cs1_n_mask : 2; /**< [ 5: 4](R/W) Mask for DDR_CS1_L. */
+ uint64_t cs0_n_mask : 2; /**< [ 3: 2](R/W) Mask for DDR_CS0_L. */
+ uint64_t cke_mask : 2; /**< [ 1: 0](R/W) Mask for DDR_CKE*. */
+#else /* Word 0 - Little Endian */
+ uint64_t cke_mask : 2; /**< [ 1: 0](R/W) Mask for DDR_CKE*. */
+ uint64_t cs0_n_mask : 2; /**< [ 3: 2](R/W) Mask for DDR_CS0_L. */
+ uint64_t cs1_n_mask : 2; /**< [ 5: 4](R/W) Mask for DDR_CS1_L. */
+ uint64_t odt0_mask : 2; /**< [ 7: 6](R/W) Mask for DDR_ODT0. */
+ uint64_t odt1_mask : 2; /**< [ 9: 8](R/W) Mask for DDR_ODT1. */
+ uint64_t ras_n_mask : 1; /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
+ uint64_t cas_n_mask : 1; /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
+ uint64_t we_n_mask : 1; /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
+ uint64_t ba_mask : 3; /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
+ uint64_t a_mask : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
+ uint64_t reset_n_mask : 1; /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
+ uint64_t a17_mask : 1; /**< [ 33: 33](R/W) Mask for DDR_A17. */
+ uint64_t act_n_mask : 1; /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
+ uint64_t par_mask : 1; /**< [ 35: 35](R/W) Mask for DDR_PAR. */
+ uint64_t dbi_mask : 9; /**< [ 44: 36](R/W) Mask for DBI/DQS\<1\>. */
+ uint64_t reserved_45 : 1;
+ uint64_t dac_on_mask : 9; /**< [ 54: 46](R/W) This mask is applied to the DAC_ON signals that go to the PHY, so that each byte lane can
+ selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol D for DAC_ON, the mask looks like this:
+ DDDDDDDDD
+ 876543210 */
+ uint64_t ref_pin_on_mask : 9; /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
+ can selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol R, the mask looks like this:
+ \<pre\>
+ RRRRRRRRR
+ 876543210
+ \</pre\> */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_char_mask4_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ref_pin_on_mask : 9; /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
+ can selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol R, the mask looks like this:
+ \<pre\>
+ RRRRRRRRR
+ 876543210
+ \</pre\> */
+ uint64_t reserved_47_54 : 8;
+ uint64_t dbi_mask : 9; /**< [ 46: 38](R/W) Mask for DBI/DQS\<1\>. */
+ uint64_t c2_mask : 1; /**< [ 37: 37](R/W) Mask for CID C2. */
+ uint64_t c1_mask : 1; /**< [ 36: 36](R/W) Mask for CID C1. */
+ uint64_t par_mask : 1; /**< [ 35: 35](R/W) Mask for DDR_PAR. */
+ uint64_t act_n_mask : 1; /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
+ uint64_t a17_mask : 1; /**< [ 33: 33](R/W) Mask for DDR_A17. */
+ uint64_t reset_n_mask : 1; /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
+ uint64_t a_mask : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
+ uint64_t ba_mask : 3; /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
+ uint64_t we_n_mask : 1; /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
+ uint64_t cas_n_mask : 1; /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
+ uint64_t ras_n_mask : 1; /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
+ uint64_t odt1_mask : 2; /**< [ 9: 8](R/W) Mask for DDR_ODT1. */
+ uint64_t odt0_mask : 2; /**< [ 7: 6](R/W) Mask for DDR_ODT0. */
+ uint64_t cs1_n_mask : 2; /**< [ 5: 4](R/W) Mask for DDR_CS1_L. */
+ uint64_t cs0_n_mask : 2; /**< [ 3: 2](R/W) Mask for DDR_CS0_L. */
+ uint64_t cke_mask : 2; /**< [ 1: 0](R/W) Mask for DDR_CKE*. */
+#else /* Word 0 - Little Endian */
+ uint64_t cke_mask : 2; /**< [ 1: 0](R/W) Mask for DDR_CKE*. */
+ uint64_t cs0_n_mask : 2; /**< [ 3: 2](R/W) Mask for DDR_CS0_L. */
+ uint64_t cs1_n_mask : 2; /**< [ 5: 4](R/W) Mask for DDR_CS1_L. */
+ uint64_t odt0_mask : 2; /**< [ 7: 6](R/W) Mask for DDR_ODT0. */
+ uint64_t odt1_mask : 2; /**< [ 9: 8](R/W) Mask for DDR_ODT1. */
+ uint64_t ras_n_mask : 1; /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
+ uint64_t cas_n_mask : 1; /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
+ uint64_t we_n_mask : 1; /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
+ uint64_t ba_mask : 3; /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
+ uint64_t a_mask : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
+ uint64_t reset_n_mask : 1; /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
+ uint64_t a17_mask : 1; /**< [ 33: 33](R/W) Mask for DDR_A17. */
+ uint64_t act_n_mask : 1; /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
+ uint64_t par_mask : 1; /**< [ 35: 35](R/W) Mask for DDR_PAR. */
+ uint64_t c1_mask : 1; /**< [ 36: 36](R/W) Mask for CID C1. */
+ uint64_t c2_mask : 1; /**< [ 37: 37](R/W) Mask for CID C2. */
+ uint64_t dbi_mask : 9; /**< [ 46: 38](R/W) Mask for DBI/DQS\<1\>. */
+ uint64_t reserved_47_54 : 8;
+ uint64_t ref_pin_on_mask : 9; /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
+ can selectively turn off or on the signals once the master signals are enabled. Using the
+ symbol R, the mask looks like this:
+ \<pre\>
+ RRRRRRRRR
+ 876543210
+ \</pre\> */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_char_mask4 bdk_lmcx_char_mask4_t;
+
+static inline uint64_t BDK_LMCX_CHAR_MASK4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CHAR_MASK4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000318ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000318ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000318ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000318ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CHAR_MASK4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CHAR_MASK4(a) bdk_lmcx_char_mask4_t
+#define bustype_BDK_LMCX_CHAR_MASK4(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CHAR_MASK4(a) "LMCX_CHAR_MASK4"
+#define device_bar_BDK_LMCX_CHAR_MASK4(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CHAR_MASK4(a) (a)
+#define arguments_BDK_LMCX_CHAR_MASK4(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_comp_ctl2
+ *
+ * LMC Compensation Control Register
+ */
+union bdk_lmcx_comp_ctl2
+{
+ uint64_t u;
+ struct bdk_lmcx_comp_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t rclk_char_mode : 1; /**< [ 50: 50](R/W) Reserved.
+ Internal:
+ Select core clock characterization mode. */
+ uint64_t ddr__ptune : 5; /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on P-pullup. */
+ uint64_t ddr__ntune : 5; /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on N-pulldown. */
+ uint64_t ptune_offset : 4; /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ntune_offset : 4; /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t m180 : 1; /**< [ 31: 31](R/W) Reserved; must be zero.
+ Internal:
+ Cap impedance at 180 ohm, instead of 240 ohm. */
+ uint64_t byp : 1; /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
+ [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
+ uint64_t ptune : 5; /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
+ uint64_t ntune : 5; /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
+ uint64_t rodt_ctl : 4; /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
+ 0x0 = No ODT.
+ 0x1 = 20 ohm.
+ 0x2 = 30 ohm.
+ 0x3 = 40 ohm.
+ 0x4 = 60 ohm.
+ 0x5 = 120 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = No ODT.
+ 0x1 = 40 ohm.
+ 0x2 = 60 ohm.
+ 0x3 = 80 ohm.
+ 0x4 = 120 ohm.
+ 0x5 = 240 ohm.
+ 0x6 = 34 ohm.
+ 0x7 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t control_ctl : 4; /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t cmd_ctl : 4; /**< [ 11: 8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
+ DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t ck_ctl : 4; /**< [ 7: 4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t dqx_ctl : 4; /**< [ 3: 0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t dqx_ctl : 4; /**< [ 3: 0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved. */
+ uint64_t ck_ctl : 4; /**< [ 7: 4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t cmd_ctl : 4; /**< [ 11: 8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
+ DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t control_ctl : 4; /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t rodt_ctl : 4; /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
+ 0x0 = No ODT.
+ 0x1 = 20 ohm.
+ 0x2 = 30 ohm.
+ 0x3 = 40 ohm.
+ 0x4 = 60 ohm.
+ 0x5 = 120 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = No ODT.
+ 0x1 = 40 ohm.
+ 0x2 = 60 ohm.
+ 0x3 = 80 ohm.
+ 0x4 = 120 ohm.
+ 0x5 = 240 ohm.
+ 0x6 = 34 ohm.
+ 0x7 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t ntune : 5; /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
+ uint64_t ptune : 5; /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
+ uint64_t byp : 1; /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
+ [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
+ uint64_t m180 : 1; /**< [ 31: 31](R/W) Reserved; must be zero.
+ Internal:
+ Cap impedance at 180 ohm, instead of 240 ohm. */
+ uint64_t ntune_offset : 4; /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ptune_offset : 4; /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ddr__ntune : 5; /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on N-pulldown. */
+ uint64_t ddr__ptune : 5; /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on P-pullup. */
+ uint64_t rclk_char_mode : 1; /**< [ 50: 50](R/W) Reserved.
+ Internal:
+ Select core clock characterization mode. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_comp_ctl2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t rclk_char_mode : 1; /**< [ 50: 50](R/W) Reserved.
+ Internal:
+ Select core clock characterization mode. */
+ uint64_t ddr__ptune : 5; /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on P-pullup. */
+ uint64_t ddr__ntune : 5; /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on N-pulldown. */
+ uint64_t ptune_offset : 4; /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ntune_offset : 4; /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t m180 : 1; /**< [ 31: 31](R/W) Reserved; must be zero.
+ Internal:
+ Cap impedance at 180 ohm, instead of 240 ohm. */
+ uint64_t byp : 1; /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
+ [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
+ uint64_t ptune : 5; /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
+ uint64_t ntune : 5; /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
+ uint64_t rodt_ctl : 4; /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
+ 0x0 = No ODT.
+ 0x1 = 34 ohm.
+ 0x2 = 40 ohm.
+ 0x3 = 50 ohm.
+ 0x4 = 67 ohm.
+ 0x5 = 100 ohm.
+ 0x6 = 200 ohm.
+ _ else = Reserved. */
+ uint64_t control_ctl : 4; /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
+
+ 0x0 = Reserved.
+ 0x1 = 21 ohm.
+ 0x2 = 24 ohm.
+ 0x3 = 27 ohm.
+ 0x4 = 30 ohm.
+ 0x5 = 36 ohm.
+ 0x6 = 44 ohm.
+ _ else = Reserved. */
+ uint64_t cmd_ctl : 4; /**< [ 11: 8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
+ DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
+
+ 0x0 = Reserved.
+ 0x1 = 21 ohm.
+ 0x2 = 24 ohm.
+ 0x3 = 27 ohm.
+ 0x4 = 30 ohm.
+ 0x5 = 36 ohm.
+ 0x6 = 44 ohm.
+ _ else = Reserved. */
+ uint64_t ck_ctl : 4; /**< [ 7: 4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
+
+ 0x0 = Reserved.
+ 0x1 = 21 ohm.
+ 0x2 = 24 ohm.
+ 0x3 = 27 ohm.
+ 0x4 = 30 ohm.
+ 0x5 = 36 ohm.
+ 0x6 = 44 ohm.
+ _ else = Reserved. */
+ uint64_t dqx_ctl : 4; /**< [ 3: 0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
+ 0x1 = 20 ohm.
+ 0x2 = 22 ohm.
+ 0x3 = 25 ohm.
+ 0x4 = 29 ohm.
+ 0x5 = 34 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 50 ohm.
+ _ else = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t dqx_ctl : 4; /**< [ 3: 0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
+ 0x1 = 20 ohm.
+ 0x2 = 22 ohm.
+ 0x3 = 25 ohm.
+ 0x4 = 29 ohm.
+ 0x5 = 34 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 50 ohm.
+ _ else = Reserved. */
+ uint64_t ck_ctl : 4; /**< [ 7: 4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
+
+ 0x0 = Reserved.
+ 0x1 = 21 ohm.
+ 0x2 = 24 ohm.
+ 0x3 = 27 ohm.
+ 0x4 = 30 ohm.
+ 0x5 = 36 ohm.
+ 0x6 = 44 ohm.
+ _ else = Reserved. */
+ uint64_t cmd_ctl : 4; /**< [ 11: 8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
+ DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
+
+ 0x0 = Reserved.
+ 0x1 = 21 ohm.
+ 0x2 = 24 ohm.
+ 0x3 = 27 ohm.
+ 0x4 = 30 ohm.
+ 0x5 = 36 ohm.
+ 0x6 = 44 ohm.
+ _ else = Reserved. */
+ uint64_t control_ctl : 4; /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
+
+ 0x0 = Reserved.
+ 0x1 = 21 ohm.
+ 0x2 = 24 ohm.
+ 0x3 = 27 ohm.
+ 0x4 = 30 ohm.
+ 0x5 = 36 ohm.
+ 0x6 = 44 ohm.
+ _ else = Reserved. */
+ uint64_t rodt_ctl : 4; /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
+ 0x0 = No ODT.
+ 0x1 = 34 ohm.
+ 0x2 = 40 ohm.
+ 0x3 = 50 ohm.
+ 0x4 = 67 ohm.
+ 0x5 = 100 ohm.
+ 0x6 = 200 ohm.
+ _ else = Reserved. */
+ uint64_t ntune : 5; /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
+ uint64_t ptune : 5; /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
+ uint64_t byp : 1; /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
+ [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
+ uint64_t m180 : 1; /**< [ 31: 31](R/W) Reserved; must be zero.
+ Internal:
+ Cap impedance at 180 ohm, instead of 240 ohm. */
+ uint64_t ntune_offset : 4; /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ptune_offset : 4; /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ddr__ntune : 5; /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on N-pulldown. */
+ uint64_t ddr__ptune : 5; /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on P-pullup. */
+ uint64_t rclk_char_mode : 1; /**< [ 50: 50](R/W) Reserved.
+ Internal:
+ Select core clock characterization mode. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_comp_ctl2_s cn81xx; */
+ /* struct bdk_lmcx_comp_ctl2_s cn88xx; */
+ struct bdk_lmcx_comp_ctl2_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t rclk_char_mode : 1; /**< [ 50: 50](R/W) Reserved.
+ Internal:
+ Select RCLK characterization mode. */
+ uint64_t ddr__ptune : 5; /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on P-pullup. */
+ uint64_t ddr__ntune : 5; /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on N-pulldown. */
+ uint64_t ptune_offset : 4; /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ntune_offset : 4; /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t m180 : 1; /**< [ 31: 31](R/W) Reserved; must be zero.
+ Internal:
+ Cap impedance at 180 ohm, instead of 240 ohm. */
+ uint64_t byp : 1; /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
+ [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
+ uint64_t ptune : 5; /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
+ uint64_t ntune : 5; /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
+ uint64_t rodt_ctl : 4; /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
+ 0x0 = No ODT.
+ 0x1 = 20 ohm.
+ 0x2 = 30 ohm.
+ 0x3 = 40 ohm.
+ 0x4 = 60 ohm.
+ 0x5 = 120 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = No ODT.
+ 0x1 = 40 ohm.
+ 0x2 = 60 ohm.
+ 0x3 = 80 ohm.
+ 0x4 = 120 ohm.
+ 0x5 = 240 ohm.
+ 0x6 = 34 ohm.
+ 0x7 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t control_ctl : 4; /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t cmd_ctl : 4; /**< [ 11: 8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
+ DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t ck_ctl : 4; /**< [ 7: 4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t dqx_ctl : 4; /**< [ 3: 0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t dqx_ctl : 4; /**< [ 3: 0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved. */
+ uint64_t ck_ctl : 4; /**< [ 7: 4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t cmd_ctl : 4; /**< [ 11: 8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
+ DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t control_ctl : 4; /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
+
+ In DDR3 mode:
+ 0x1 = 24 ohm.
+ 0x2 = 26.67 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34.3 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ 0x7 = 60 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = Reserved.
+ 0x1 = Reserved.
+ 0x2 = 26 ohm.
+ 0x3 = 30 ohm.
+ 0x4 = 34 ohm.
+ 0x5 = 40 ohm.
+ 0x6 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t rodt_ctl : 4; /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
+ 0x0 = No ODT.
+ 0x1 = 20 ohm.
+ 0x2 = 30 ohm.
+ 0x3 = 40 ohm.
+ 0x4 = 60 ohm.
+ 0x5 = 120 ohm.
+ _ else = Reserved.
+
+ In DDR4 mode:
+ 0x0 = No ODT.
+ 0x1 = 40 ohm.
+ 0x2 = 60 ohm.
+ 0x3 = 80 ohm.
+ 0x4 = 120 ohm.
+ 0x5 = 240 ohm.
+ 0x6 = 34 ohm.
+ 0x7 = 48 ohm.
+ _ else = Reserved. */
+ uint64_t ntune : 5; /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
+ uint64_t ptune : 5; /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
+ uint64_t byp : 1; /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
+ [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
+ uint64_t m180 : 1; /**< [ 31: 31](R/W) Reserved; must be zero.
+ Internal:
+ Cap impedance at 180 ohm, instead of 240 ohm. */
+ uint64_t ntune_offset : 4; /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ptune_offset : 4; /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
+ indicating addition and one indicating subtraction. */
+ uint64_t ddr__ntune : 5; /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on N-pulldown. */
+ uint64_t ddr__ptune : 5; /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
+ compensation impedance on P-pullup. */
+ uint64_t rclk_char_mode : 1; /**< [ 50: 50](R/W) Reserved.
+ Internal:
+ Select RCLK characterization mode. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_comp_ctl2 bdk_lmcx_comp_ctl2_t;
+
+static inline uint64_t BDK_LMCX_COMP_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_COMP_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001b8ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001b8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001b8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001b8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_COMP_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_COMP_CTL2(a) bdk_lmcx_comp_ctl2_t
+#define bustype_BDK_LMCX_COMP_CTL2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_COMP_CTL2(a) "LMCX_COMP_CTL2"
+#define device_bar_BDK_LMCX_COMP_CTL2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_COMP_CTL2(a) (a)
+#define arguments_BDK_LMCX_COMP_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_config
+ *
+ * LMC Memory Configuration Register
+ * This register controls certain parameters required for memory configuration. Note the
+ * following:
+ * * Priority order for hardware write operations to
+ * LMC()_CONFIG/LMC()_NXM_FADR/LMC()_ECC_SYND: DED error \> SEC error.
+ * * The self-refresh entry sequence(s) power the DLL up/down (depending on
+ * LMC()_MODEREG_PARAMS0[DLL]) when LMC()_CONFIG[SREF_WITH_DLL] is set.
+ * * Prior to the self-refresh exit sequence, LMC()_MODEREG_PARAMS0 should be reprogrammed
+ * (if needed) to the appropriate values.
+ *
+ * See LMC initialization sequence for the LMC bringup sequence.
+ */
+union bdk_lmcx_config
+{
+ uint64_t u;
+ struct bdk_lmcx_config_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t lrdimm_ena : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
+ uint64_t bg2_enable : 1; /**< [ 62: 62](R/W) BG1 enable bit. Only has an effect when LMC()_CONFIG[MODEDDR4] = 1.
+ Set to one when using DDR4 x4 or x8 parts.
+ Clear to zero when using DDR4 x16 parts. */
+ uint64_t mode_x4dev : 1; /**< [ 61: 61](R/W) DDR x4 device mode. */
+ uint64_t mode32b : 1; /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
+ uint64_t scrz : 1; /**< [ 59: 59](R/W1S/H) Hide LMC()_SCRAMBLE_CFG0 and LMC()_SCRAMBLE_CFG1 when set. */
+ uint64_t early_unload_d1_r1 : 1; /**< [ 58: 58](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 3 reads.
+ The recommended EARLY_UNLOAD_D1_R1 value can be calculated after the final
+ LMC()_RLEVEL_RANK3[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 3 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK3[BYTEi]) across all i), then set EARLY_UNLOAD_D1_R1 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d1_r0 : 1; /**< [ 57: 57](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 2 reads.
+ The recommended EARLY_UNLOAD_D1_RO value can be calculated after the final
+ LMC()_RLEVEL_RANK2[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 2 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK2[BYTEi]) across all i), then set EARLY_UNLOAD_D1_RO when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d0_r1 : 1; /**< [ 56: 56](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 1 reads.
+ The recommended EARLY_UNLOAD_D0_R1 value can be calculated after the final
+ LMC()_RLEVEL_RANK1[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank one (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK1[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R1 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d0_r0 : 1; /**< [ 55: 55](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 0 reads.
+ The recommended EARLY_UNLOAD_D0_R0 value can be calculated after the final
+ LMC()_RLEVEL_RANK0[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 0 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK0[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R0 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t init_status : 4; /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
+ initialized.
+ Software must set necessary [RANKMASK] bits before executing the initialization sequence
+ using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
+ the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
+ exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
+ precharge
+ power-down entry/exit, and self-refresh entry SEQ_SELs. */
+ uint64_t mirrmask : 4; /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
+ [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
+ 0 \<= n \<= 3.
+ In DDR3, a mirrored read/write operation has the following differences:
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
+
+ When RANK_ENA = 0, MIRRMASK\<1\> MBZ.
+
+ In DDR4, a mirrored read/write operation has the following differences:
+ * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
+
+ For CN70XX, MIRRMASK\<3:2\> MBZ.
+ * When RANK_ENA = 0, MIRRMASK\<1\> MBZ." */
+ uint64_t rankmask : 4; /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
+ i, set [RANKMASK]\<i\>:
+
+ \<pre\>
+ [RANK_ENA]=1 [RANK_ENA]=0
+ RANKMASK\<0\> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK\<1\> = DIMM0_CS1 MBZ
+ RANKMASK\<2\> = Reserved Reserved
+ RANKMASK\<3\> = Reserved Reserved
+ \</pre\>
+
+ For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
+ have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
+ power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
+ [RANKMASK]\<3\> MBZ. */
+ uint64_t rank_ena : 1; /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
+ * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
+ and
+ ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
+ * Write zero for SINGLE ranked DIMMs." */
+ uint64_t sref_with_dll : 1; /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
+ MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
+ MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
+ refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
+ instruction sequences do not write any mode registers in the DDR3/4 parts. */
+ uint64_t early_dqx : 1; /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
+ lines have a larger delay than the CK line. */
+ uint64_t ref_zqcs_int : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
+ control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
+ nonzero value.
+ ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
+ triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
+ \<39:18\>
+ are equal to 0x0.
+
+ The ZQCS timer only decrements when the refresh timer is zero.
+
+ Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
+
+ A value of zero in bits \<24:18\> will effectively turn off refresh.
+
+ Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
+ effective period programmed in bits \<24:18\>. Note that this value should always be greater
+ than 32, to account for resistor calibration delays.
+
+ 000_00000000_0000000: Reserved
+
+ Max refresh interval = 127 * 512= 65024 CK cycles.
+
+ Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
+
+ If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
+ operations per second.
+ LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
+ send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
+ To cause the reset, software writes this to a one, then rewrites it to a zero. */
+ uint64_t ecc_adr : 1; /**< [ 16: 16](R/W) Include memory reference address in the ECC calculation.
+ 0 = disabled, 1 = enabled. */
+ uint64_t forcewrite : 4; /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
+ cycles. 0 = disabled. */
+ uint64_t idlepower : 3; /**< [ 11: 9](R/W) Enter precharge power-down mode after the memory controller has been idle for
+ 2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
+
+ This field should only be programmed after initialization.
+ LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
+ precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< [ 8: 5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
+ [ROW_LSB] bit + num_rowbits + num_rankbits
+
+ Values for [PBANK_LSB] are as follows:
+ 0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
+ 0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
+ 0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
+ 0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
+ 0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
+ 0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
+ 0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
+ 0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
+ 0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
+ 0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
+ 0xA: DIMM = 0; if [RANK_ENA]=1, rank = mem_adr\<37\>.
+ 0xB-0xF: Reserved.
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ With [RANK_ENA] = 0, [PBANK_LSB] = 2.
+ With [RANK_ENA] = 1, [PBANK_LSB] = 3.
+
+ Internal:
+ When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
+ uint64_t row_lsb : 3; /**< [ 4: 2](R/W) Row address bit select.
+ 0x0 = Address bit 14 is LSB.
+ 0x1 = Address bit 15 is LSB.
+ 0x2 = Address bit 16 is LSB.
+ 0x3 = Address bit 17 is LSB.
+ 0x4 = Address bit 18 is LSB.
+ 0x5 = Address bit 19 is LSB.
+ 0x6 = Address bit 20 is LSB.
+ 0x6 = Reserved.
+
+ Encoding used to determine which memory address bit position represents the low order DDR
+ ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
+ (bnk,row,col,rank and DIMM) and that is a function of the following:
+ * Datapath width (64).
+ * Number of banks (8).
+ * Number of column bits of the memory part--specified indirectly by this register.
+ * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
+ * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
+ * Number of DIMMs in the system by the register below ([PBANK_LSB]).
+
+ Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
+ mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
+ 0x1 (64b).
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ Refer to cache-block read transaction example, Cache-block read transaction example. */
+ uint64_t ecc_ena : 1; /**< [ 1: 1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
+ DIMMs with ECC; zero, otherwise.
+
+ * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
+ generated for the 64 bits of data which will be written in the memory. Later on read
+ operations, will be used to check for single-bit error (which will be auto-corrected) and
+ double-bit error (which will be reported).
+
+ * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
+ LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
+ an error. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t ecc_ena : 1; /**< [ 1: 1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
+ DIMMs with ECC; zero, otherwise.
+
+ * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
+ generated for the 64 bits of data which will be written in the memory. Later on read
+ operations, will be used to check for single-bit error (which will be auto-corrected) and
+ double-bit error (which will be reported).
+
+ * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
+ LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
+ an error. */
+ uint64_t row_lsb : 3; /**< [ 4: 2](R/W) Row address bit select.
+ 0x0 = Address bit 14 is LSB.
+ 0x1 = Address bit 15 is LSB.
+ 0x2 = Address bit 16 is LSB.
+ 0x3 = Address bit 17 is LSB.
+ 0x4 = Address bit 18 is LSB.
+ 0x5 = Address bit 19 is LSB.
+ 0x6 = Address bit 20 is LSB.
+ 0x6 = Reserved.
+
+ Encoding used to determine which memory address bit position represents the low order DDR
+ ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
+ (bnk,row,col,rank and DIMM) and that is a function of the following:
+ * Datapath width (64).
+ * Number of banks (8).
+ * Number of column bits of the memory part--specified indirectly by this register.
+ * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
+ * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
+ * Number of DIMMs in the system by the register below ([PBANK_LSB]).
+
+ Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
+ mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
+ 0x1 (64b).
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ Refer to cache-block read transaction example, Cache-block read transaction example. */
+ uint64_t pbank_lsb : 4; /**< [ 8: 5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
+ [ROW_LSB] bit + num_rowbits + num_rankbits
+
+ Values for [PBANK_LSB] are as follows:
+ 0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
+ 0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
+ 0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
+ 0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
+ 0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
+ 0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
+ 0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
+ 0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
+ 0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
+ 0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
+ 0xA: DIMM = 0; if [RANK_ENA]=1, rank = mem_adr\<37\>.
+ 0xB-0xF: Reserved.
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ With [RANK_ENA] = 0, [PBANK_LSB] = 2.
+ With [RANK_ENA] = 1, [PBANK_LSB] = 3.
+
+ Internal:
+ When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
+ uint64_t idlepower : 3; /**< [ 11: 9](R/W) Enter precharge power-down mode after the memory controller has been idle for
+ 2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
+
+ This field should only be programmed after initialization.
+ LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
+ precharge power-down. */
+ uint64_t forcewrite : 4; /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
+ cycles. 0 = disabled. */
+ uint64_t ecc_adr : 1; /**< [ 16: 16](R/W) Include memory reference address in the ECC calculation.
+ 0 = disabled, 1 = enabled. */
+ uint64_t reset : 1; /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
+ To cause the reset, software writes this to a one, then rewrites it to a zero. */
+ uint64_t ref_zqcs_int : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
+ control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
+ nonzero value.
+ ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
+ triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
+ \<39:18\>
+ are equal to 0x0.
+
+ The ZQCS timer only decrements when the refresh timer is zero.
+
+ Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
+
+ A value of zero in bits \<24:18\> will effectively turn off refresh.
+
+ Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
+ effective period programmed in bits \<24:18\>. Note that this value should always be greater
+ than 32, to account for resistor calibration delays.
+
+ 000_00000000_0000000: Reserved
+
+ Max refresh interval = 127 * 512= 65024 CK cycles.
+
+ Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
+
+ If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
+ operations per second.
+ LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
+ send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
+ uint64_t early_dqx : 1; /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
+ lines have a larger delay than the CK line. */
+ uint64_t sref_with_dll : 1; /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
+ MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
+ MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
+ refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
+ instruction sequences do not write any mode registers in the DDR3/4 parts. */
+ uint64_t rank_ena : 1; /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
+ * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
+ and
+ ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
+ * Write zero for SINGLE ranked DIMMs." */
+ uint64_t rankmask : 4; /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
+ i, set [RANKMASK]\<i\>:
+
+ \<pre\>
+ [RANK_ENA]=1 [RANK_ENA]=0
+ RANKMASK\<0\> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK\<1\> = DIMM0_CS1 MBZ
+ RANKMASK\<2\> = Reserved Reserved
+ RANKMASK\<3\> = Reserved Reserved
+ \</pre\>
+
+ For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
+ have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
+ power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
+ [RANKMASK]\<3\> MBZ. */
+ uint64_t mirrmask : 4; /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
+ [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
+ 0 \<= n \<= 3.
+ In DDR3, a mirrored read/write operation has the following differences:
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
+
+ When RANK_ENA = 0, MIRRMASK\<1\> MBZ.
+
+ In DDR4, a mirrored read/write operation has the following differences:
+ * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
+
+ For CN70XX, MIRRMASK\<3:2\> MBZ.
+ * When RANK_ENA = 0, MIRRMASK\<1\> MBZ." */
+ uint64_t init_status : 4; /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
+ initialized.
+ Software must set necessary [RANKMASK] bits before executing the initialization sequence
+ using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
+ the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
+ exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
+ precharge
+ power-down entry/exit, and self-refresh entry SEQ_SELs. */
+ uint64_t early_unload_d0_r0 : 1; /**< [ 55: 55](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 0 reads.
+ The recommended EARLY_UNLOAD_D0_R0 value can be calculated after the final
+ LMC()_RLEVEL_RANK0[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 0 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK0[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R0 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d0_r1 : 1; /**< [ 56: 56](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 1 reads.
+ The recommended EARLY_UNLOAD_D0_R1 value can be calculated after the final
+ LMC()_RLEVEL_RANK1[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank one (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK1[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R1 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d1_r0 : 1; /**< [ 57: 57](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 2 reads.
+ The recommended EARLY_UNLOAD_D1_RO value can be calculated after the final
+ LMC()_RLEVEL_RANK2[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 2 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK2[BYTEi]) across all i), then set EARLY_UNLOAD_D1_RO when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d1_r1 : 1; /**< [ 58: 58](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 3 reads.
+ The recommended EARLY_UNLOAD_D1_R1 value can be calculated after the final
+ LMC()_RLEVEL_RANK3[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 3 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK3[BYTEi]) across all i), then set EARLY_UNLOAD_D1_R1 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t scrz : 1; /**< [ 59: 59](R/W1S/H) Hide LMC()_SCRAMBLE_CFG0 and LMC()_SCRAMBLE_CFG1 when set. */
+ uint64_t mode32b : 1; /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
+ uint64_t mode_x4dev : 1; /**< [ 61: 61](R/W) DDR x4 device mode. */
+ uint64_t bg2_enable : 1; /**< [ 62: 62](R/W) BG1 enable bit. Only has an effect when LMC()_CONFIG[MODEDDR4] = 1.
+ Set to one when using DDR4 x4 or x8 parts.
+ Clear to zero when using DDR4 x16 parts. */
+ uint64_t lrdimm_ena : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_config_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t lrdimm_ena : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
+ uint64_t bg2_enable : 1; /**< [ 62: 62](R/W) BG1 enable bit.
+ Set to one when using DDR4 x4 or x8 parts.
+ Clear to zero when using DDR4 x16 parts. */
+ uint64_t mode_x4dev : 1; /**< [ 61: 61](R/W) DDR x4 device mode. */
+ uint64_t mode32b : 1; /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
+ uint64_t reserved_59 : 1;
+ uint64_t early_unload_d1_r1 : 1; /**< [ 58: 58](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 3 reads.
+ The recommended [EARLY_UNLOAD_D1_R1] value can be calculated after the final
+ LMC()_RLEVEL_RANK(3)[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 3 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK(3)[BYTEi]) across all i), then set [EARLY_UNLOAD_D1_R1] when the
+ low three bits of this largest setting is smaller than 4 (i.e.
+ [EARLY_UNLOAD_D1_R1] = (maxset\<2:0\> \< 4)). */
+ uint64_t early_unload_d1_r0 : 1; /**< [ 57: 57](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 2 reads.
+ The recommended [EARLY_UNLOAD_D1_R0] value can be calculated after the final
+ LMC()_RLEVEL_RANK(2)[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 2 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK(2)[BYTEi]) across all i), then set [EARLY_UNLOAD_D1_R0] when the
+ low three bits of this largest setting is smaller than 4 (i.e.
+ [EARLY_UNLOAD_D1_R0] = (maxset\<2:0\> \< 4)). */
+ uint64_t early_unload_d0_r1 : 1; /**< [ 56: 56](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 1 reads.
+ The recommended [EARLY_UNLOAD_D0_R1] value can be calculated after the final
+ LMC()_RLEVEL_RANK(1)[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank one (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK(1)[BYTEi]) across all i), then set [EARLY_UNLOAD_D0_R1] when the
+ low three bits of this largest setting is smaller than 4 (i.e.
+ [EARLY_UNLOAD_D0_R1] = (maxset\<2:0\> \< 4)). */
+ uint64_t early_unload_d0_r0 : 1; /**< [ 55: 55](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 0 reads.
+ The recommended [EARLY_UNLOAD_D0_R0] value can be calculated after the final
+ LMC()_RLEVEL_RANK(0)[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 0 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK(0)[BYTEi]) across all i), then set [EARLY_UNLOAD_D0_R0] when the
+ low three bits of this largest setting is smaller than 4 (i.e.
+ [EARLY_UNLOAD_D0_R0] = (maxset\<2:0\> \< 4)). */
+ uint64_t init_status : 4; /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
+ initialized.
+ Software must set necessary [RANKMASK] bits before executing the initialization sequence
+ using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
+ the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
+ exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
+ precharge
+ power-down entry/exit, and self-refresh entry SEQ_SELs. */
+ uint64_t mirrmask : 4; /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
+ [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
+ 0 \<= n \<= 3.
+ A mirrored read/write operation has the following differences:
+ * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>." */
+ uint64_t rankmask : 4; /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
+ i, set [RANKMASK]\<i\>:
+
+ \<pre\>
+ [RANK_ENA]=1 [RANK_ENA]=0
+ RANKMASK\<0\> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK\<1\> = DIMM0_CS1 MBZ
+ RANKMASK\<2\> = DIMM1_CS0 DIMM1_CS0
+ RANKMASK\<3\> = DIMM1_CS1 MBZ
+ \</pre\>
+
+ For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
+ have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
+ power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
+ [RANKMASK]\<3\> MBZ. */
+ uint64_t rank_ena : 1; /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
+ * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
+ and
+ ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
+ * Write zero for SINGLE ranked DIMMs." */
+ uint64_t sref_with_dll : 1; /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
+ MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
+ MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
+ refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
+ instruction sequences do not write any mode registers in the DDR4 parts. */
+ uint64_t early_dqx : 1; /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
+ lines have a larger delay than the CK line. */
+ uint64_t ref_zqcs_int : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
+ control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
+ nonzero value.
+ ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
+ triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
+ \<39:18\>
+ are equal to 0x0.
+
+ The ZQCS timer only decrements when the refresh timer is zero.
+
+ Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
+
+ A value of zero in bits \<24:18\> will effectively turn off refresh.
+
+ Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
+ effective period programmed in bits \<24:18\>. Note that this value should always be greater
+ than 32, to account for resistor calibration delays.
+
+ 000_00000000_0000000: Reserved
+
+ Max refresh interval = 127 * 512= 65024 CK cycles.
+
+ Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
+
+ If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
+ operations per second.
+ LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
+ send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
+ To cause the reset, software writes this to a one, then rewrites it to a zero. */
+ uint64_t reserved_16 : 1;
+ uint64_t forcewrite : 4; /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
+ cycles. 0 = disabled. */
+ uint64_t idlepower : 3; /**< [ 11: 9](R/W) Enter precharge power-down mode after the memory controller has been idle for
+ 2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
+
+ This field should only be programmed after initialization.
+ LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
+ precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< [ 8: 5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
+ [ROW_LSB] bit + num_rowbits + num_rankbits
+
+ Values for [PBANK_LSB] are as follows:
+ 0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
+ 0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
+ 0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
+ 0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
+ 0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
+ 0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
+ 0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
+ 0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
+ 0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
+ 0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
+ 0xA: DIMM = mem_adr\<38\>; if [RANK_ENA]=1, rank = mem_adr\<37\>.
+ 0xB: DIMM = mem_adr\<39\>; if [RANK_ENA]=1, rank = mem_adr\<38\>.
+ 0xC: DIMM = mem_adr\<40\>; if [RANK_ENA]=1, rank = mem_adr\<39\>.
+ 0xD: DIMM = mem_adr\<41\>; if [RANK_ENA]=1, rank = mem_adr\<40\>.
+ 0xE: DIMM = 0; if [RANK_ENA]=1, rank = mem_adr\<41\>.
+ 0xF: Reserved.
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ With [RANK_ENA] = 0, [PBANK_LSB] = 2.
+ With [RANK_ENA] = 1, [PBANK_LSB] = 3.
+
+ Internal:
+ When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
+ uint64_t row_lsb : 3; /**< [ 4: 2](R/W) Row address bit select.
+ 0x0 = Address bit 14 is LSB.
+ 0x1 = Address bit 15 is LSB.
+ 0x2 = Address bit 16 is LSB.
+ 0x3 = Address bit 17 is LSB.
+ 0x4 = Address bit 18 is LSB.
+ 0x5 = Address bit 19 is LSB.
+ 0x6 = Address bit 20 is LSB.
+ 0x6 = Reserved.
+
+ Encoding used to determine which memory address bit position represents the low order DDR
+ ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
+ (bnk,row,col,rank and DIMM) and that is a function of the following:
+ * Datapath width (64).
+ * Number of banks (8).
+ * Number of column bits of the memory part--specified indirectly by this register.
+ * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
+ * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
+ * Number of DIMMs in the system by the register below ([PBANK_LSB]).
+
+ Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
+ mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
+ 0x1 (64b).
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ Refer to cache-block read transaction example, Cache-block read transaction example. */
+ uint64_t ecc_ena : 1; /**< [ 1: 1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
+ DIMMs with ECC; zero, otherwise.
+
+ * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
+ generated for the 64 bits of data which will be written in the memory. Later on read
+ operations, will be used to check for single-bit error (which will be auto-corrected) and
+ double-bit error (which will be reported).
+
+ * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
+ LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
+ an error. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t ecc_ena : 1; /**< [ 1: 1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
+ DIMMs with ECC; zero, otherwise.
+
+ * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
+ generated for the 64 bits of data which will be written in the memory. Later on read
+ operations, will be used to check for single-bit error (which will be auto-corrected) and
+ double-bit error (which will be reported).
+
+ * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
+ LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
+ an error. */
+ uint64_t row_lsb : 3; /**< [ 4: 2](R/W) Row address bit select.
+ 0x0 = Address bit 14 is LSB.
+ 0x1 = Address bit 15 is LSB.
+ 0x2 = Address bit 16 is LSB.
+ 0x3 = Address bit 17 is LSB.
+ 0x4 = Address bit 18 is LSB.
+ 0x5 = Address bit 19 is LSB.
+ 0x6 = Address bit 20 is LSB.
+ 0x6 = Reserved.
+
+ Encoding used to determine which memory address bit position represents the low order DDR
+ ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
+ (bnk,row,col,rank and DIMM) and that is a function of the following:
+ * Datapath width (64).
+ * Number of banks (8).
+ * Number of column bits of the memory part--specified indirectly by this register.
+ * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
+ * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
+ * Number of DIMMs in the system by the register below ([PBANK_LSB]).
+
+ Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
+ mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
+ 0x1 (64b).
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ Refer to cache-block read transaction example, Cache-block read transaction example. */
+ uint64_t pbank_lsb : 4; /**< [ 8: 5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
+ [ROW_LSB] bit + num_rowbits + num_rankbits
+
+ Values for [PBANK_LSB] are as follows:
+ 0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
+ 0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
+ 0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
+ 0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
+ 0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
+ 0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
+ 0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
+ 0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
+ 0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
+ 0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
+ 0xA: DIMM = mem_adr\<38\>; if [RANK_ENA]=1, rank = mem_adr\<37\>.
+ 0xB: DIMM = mem_adr\<39\>; if [RANK_ENA]=1, rank = mem_adr\<38\>.
+ 0xC: DIMM = mem_adr\<40\>; if [RANK_ENA]=1, rank = mem_adr\<39\>.
+ 0xD: DIMM = mem_adr\<41\>; if [RANK_ENA]=1, rank = mem_adr\<40\>.
+ 0xE: DIMM = 0; if [RANK_ENA]=1, rank = mem_adr\<41\>.
+ 0xF: Reserved.
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ With [RANK_ENA] = 0, [PBANK_LSB] = 2.
+ With [RANK_ENA] = 1, [PBANK_LSB] = 3.
+
+ Internal:
+ When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
+ uint64_t idlepower : 3; /**< [ 11: 9](R/W) Enter precharge power-down mode after the memory controller has been idle for
+ 2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
+
+ This field should only be programmed after initialization.
+ LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
+ precharge power-down. */
+ uint64_t forcewrite : 4; /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
+ cycles. 0 = disabled. */
+ uint64_t reserved_16 : 1;
+ uint64_t reset : 1; /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
+ To cause the reset, software writes this to a one, then rewrites it to a zero. */
+ uint64_t ref_zqcs_int : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
+ control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
+ nonzero value.
+ ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
+ triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
+ \<39:18\>
+ are equal to 0x0.
+
+ The ZQCS timer only decrements when the refresh timer is zero.
+
+ Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
+
+ A value of zero in bits \<24:18\> will effectively turn off refresh.
+
+ Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
+ effective period programmed in bits \<24:18\>. Note that this value should always be greater
+ than 32, to account for resistor calibration delays.
+
+ 000_00000000_0000000: Reserved
+
+ Max refresh interval = 127 * 512= 65024 CK cycles.
+
+ Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
+
+ If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
+ operations per second.
+ LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
+ send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
+ uint64_t early_dqx : 1; /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
+ lines have a larger delay than the CK line. */
+ uint64_t sref_with_dll : 1; /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
+ MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
+ MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
+ refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
+ instruction sequences do not write any mode registers in the DDR4 parts. */
+ uint64_t rank_ena : 1; /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
+ * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
+ and
+ ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
+ * Write zero for SINGLE ranked DIMMs." */
+ uint64_t rankmask : 4; /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
+ i, set [RANKMASK]\<i\>:
+
+ \<pre\>
+ [RANK_ENA]=1 [RANK_ENA]=0
+ RANKMASK\<0\> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK\<1\> = DIMM0_CS1 MBZ
+ RANKMASK\<2\> = DIMM1_CS0 DIMM1_CS0
+ RANKMASK\<3\> = DIMM1_CS1 MBZ
+ \</pre\>
+
+ For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
+ have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
+ power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
+ [RANKMASK]\<3\> MBZ. */
+ uint64_t mirrmask : 4; /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
+ [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
+ 0 \<= n \<= 3.
+ A mirrored read/write operation has the following differences:
+ * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>." */
+ uint64_t init_status : 4; /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
+ initialized.
+ Software must set necessary [RANKMASK] bits before executing the initialization sequence
+ using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
+ the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
+ exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
+ precharge
+ power-down entry/exit, and self-refresh entry SEQ_SELs. */
+ uint64_t early_unload_d0_r0 : 1; /**< [ 55: 55](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 0 reads.
+ The recommended [EARLY_UNLOAD_D0_R0] value can be calculated after the final
+ LMC()_RLEVEL_RANK(0)[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 0 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK(0)[BYTEi]) across all i), then set [EARLY_UNLOAD_D0_R0] when the
+ low three bits of this largest setting is smaller than 4 (i.e.
+ [EARLY_UNLOAD_D0_R0] = (maxset\<2:0\> \< 4)). */
+ uint64_t early_unload_d0_r1 : 1; /**< [ 56: 56](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 1 reads.
+ The recommended [EARLY_UNLOAD_D0_R1] value can be calculated after the final
+ LMC()_RLEVEL_RANK(1)[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank one (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK(1)[BYTEi]) across all i), then set [EARLY_UNLOAD_D0_R1] when the
+ low three bits of this largest setting is smaller than 4 (i.e.
+ [EARLY_UNLOAD_D0_R1] = (maxset\<2:0\> \< 4)). */
+ uint64_t early_unload_d1_r0 : 1; /**< [ 57: 57](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 2 reads.
+ The recommended [EARLY_UNLOAD_D1_R0] value can be calculated after the final
+ LMC()_RLEVEL_RANK(2)[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 2 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK(2)[BYTEi]) across all i), then set [EARLY_UNLOAD_D1_R0] when the
+ low three bits of this largest setting is smaller than 4 (i.e.
+ [EARLY_UNLOAD_D1_R0] = (maxset\<2:0\> \< 4)). */
+ uint64_t early_unload_d1_r1 : 1; /**< [ 58: 58](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 3 reads.
+ The recommended [EARLY_UNLOAD_D1_R1] value can be calculated after the final
+ LMC()_RLEVEL_RANK(3)[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 3 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK(3)[BYTEi]) across all i), then set [EARLY_UNLOAD_D1_R1] when the
+ low three bits of this largest setting is smaller than 4 (i.e.
+ [EARLY_UNLOAD_D1_R1] = (maxset\<2:0\> \< 4)). */
+ uint64_t reserved_59 : 1;
+ uint64_t mode32b : 1; /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
+ uint64_t mode_x4dev : 1; /**< [ 61: 61](R/W) DDR x4 device mode. */
+ uint64_t bg2_enable : 1; /**< [ 62: 62](R/W) BG1 enable bit.
+ Set to one when using DDR4 x4 or x8 parts.
+ Clear to zero when using DDR4 x16 parts. */
+ uint64_t lrdimm_ena : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_config_s cn81xx; */
+ struct bdk_lmcx_config_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t lrdimm_ena : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
+ uint64_t bg2_enable : 1; /**< [ 62: 62](R/W) BG1 enable bit. Only has an effect when LMC()_CONFIG[MODEDDR4] = 1.
+ Set to one when using DDR4 x4 or x8 parts.
+ Clear to zero when using DDR4 x16 parts. */
+ uint64_t mode_x4dev : 1; /**< [ 61: 61](R/W) DDR x4 device mode. */
+ uint64_t mode32b : 1; /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
+ uint64_t scrz : 1; /**< [ 59: 59](R/W1S/H) Hide LMC()_SCRAMBLE_CFG0 and LMC()_SCRAMBLE_CFG1 when set. */
+ uint64_t early_unload_d1_r1 : 1; /**< [ 58: 58](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 3 reads.
+ The recommended EARLY_UNLOAD_D1_R1 value can be calculated after the final
+ LMC()_RLEVEL_RANK3[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 3 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK3[BYTEi]) across all i), then set EARLY_UNLOAD_D1_R1 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d1_r0 : 1; /**< [ 57: 57](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 2 reads.
+ The recommended EARLY_UNLOAD_D1_RO value can be calculated after the final
+ LMC()_RLEVEL_RANK2[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 2 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK2[BYTEi]) across all i), then set EARLY_UNLOAD_D1_RO when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d0_r1 : 1; /**< [ 56: 56](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 1 reads.
+ The recommended EARLY_UNLOAD_D0_R1 value can be calculated after the final
+ LMC()_RLEVEL_RANK1[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank one (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK1[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R1 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d0_r0 : 1; /**< [ 55: 55](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 0 reads.
+ The recommended EARLY_UNLOAD_D0_R0 value can be calculated after the final
+ LMC()_RLEVEL_RANK0[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 0 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK0[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R0 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t init_status : 4; /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
+ initialized.
+ Software must set necessary [RANKMASK] bits before executing the initialization sequence
+ using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
+ the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
+ exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
+ precharge
+ power-down entry/exit, and self-refresh entry SEQ_SELs. */
+ uint64_t mirrmask : 4; /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
+ [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
+ 0 \<= n \<= 3.
+ In DDR3, a mirrored read/write operation has the following differences:
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
+
+ When RANK_ENA = 0, MIRRMASK\<1\> MBZ.
+
+ In DDR4, a mirrored read/write operation has the following differences:
+ * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
+
+ For CN70XX, MIRRMASK\<3:2\> MBZ.
+ * When RANK_ENA = 0, MIRRMASK\<1\> MBZ." */
+ uint64_t rankmask : 4; /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
+ i, set [RANKMASK]\<i\>:
+
+ \<pre\>
+ [RANK_ENA]=1 [RANK_ENA]=0
+ RANKMASK\<0\> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK\<1\> = DIMM0_CS1 MBZ
+ RANKMASK\<2\> = DIMM1_CS0 DIMM1_CS0
+ RANKMASK\<3\> = DIMM1_CS1 MBZ
+ \</pre\>
+
+ For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
+ have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
+ power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
+ [RANKMASK]\<3\> MBZ. */
+ uint64_t rank_ena : 1; /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
+ * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
+ and
+ ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
+ * Write zero for SINGLE ranked DIMMs." */
+ uint64_t sref_with_dll : 1; /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
+ MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
+ MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
+ refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
+ instruction sequences do not write any mode registers in the DDR3/4 parts. */
+ uint64_t early_dqx : 1; /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
+ lines have a larger delay than the CK line. */
+ uint64_t ref_zqcs_int : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
+ control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
+ nonzero value.
+ ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
+ triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
+ \<39:18\>
+ are equal to 0x0.
+
+ The ZQCS timer only decrements when the refresh timer is zero.
+
+ Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
+
+ A value of zero in bits \<24:18\> will effectively turn off refresh.
+
+ Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
+ effective period programmed in bits \<24:18\>. Note that this value should always be greater
+ than 32, to account for resistor calibration delays.
+
+ 000_00000000_0000000: Reserved
+
+ Max refresh interval = 127 * 512= 65024 CK cycles.
+
+ Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
+
+ If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
+ operations per second.
+ LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
+ send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
+ uint64_t reset : 1; /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
+ To cause the reset, software writes this to a one, then rewrites it to a zero. */
+ uint64_t ecc_adr : 1; /**< [ 16: 16](R/W) Include memory reference address in the ECC calculation.
+ 0 = disabled, 1 = enabled. */
+ uint64_t forcewrite : 4; /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
+ cycles. 0 = disabled. */
+ uint64_t idlepower : 3; /**< [ 11: 9](R/W) Enter precharge power-down mode after the memory controller has been idle for
+ 2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
+
+ This field should only be programmed after initialization.
+ LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
+ precharge power-down. */
+ uint64_t pbank_lsb : 4; /**< [ 8: 5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
+ [ROW_LSB] bit + num_rowbits + num_rankbits
+
+ Values for [PBANK_LSB] are as follows:
+ 0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
+ 0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
+ 0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
+ 0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
+ 0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
+ 0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
+ 0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
+ 0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
+ 0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
+ 0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
+ 0xA: DIMM = 0; if [RANK_ENA]=1, rank = mem_adr\<37\>.
+ 0xB-0xF: Reserved.
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ With [RANK_ENA] = 0, [PBANK_LSB] = 2.
+ With [RANK_ENA] = 1, [PBANK_LSB] = 3.
+
+ Internal:
+ When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
+ uint64_t row_lsb : 3; /**< [ 4: 2](R/W) Row address bit select.
+ 0x0 = Address bit 14 is LSB.
+ 0x1 = Address bit 15 is LSB.
+ 0x2 = Address bit 16 is LSB.
+ 0x3 = Address bit 17 is LSB.
+ 0x4 = Address bit 18 is LSB.
+ 0x5 = Address bit 19 is LSB.
+ 0x6 = Address bit 20 is LSB.
+ 0x6 = Reserved.
+
+ Encoding used to determine which memory address bit position represents the low order DDR
+ ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
+ (bnk,row,col,rank and DIMM) and that is a function of the following:
+ * Datapath width (64).
+ * Number of banks (8).
+ * Number of column bits of the memory part--specified indirectly by this register.
+ * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
+ * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
+ * Number of DIMMs in the system by the register below ([PBANK_LSB]).
+
+ Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
+ mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
+ 0x1 (64b).
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ Refer to cache-block read transaction example, Cache-block read transaction example. */
+ uint64_t ecc_ena : 1; /**< [ 1: 1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
+ DIMMs with ECC; zero, otherwise.
+
+ * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
+ generated for the 64 bits of data which will be written in the memory. Later on read
+ operations, will be used to check for single-bit error (which will be auto-corrected) and
+ double-bit error (which will be reported).
+
+ * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
+ LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
+ an error. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t ecc_ena : 1; /**< [ 1: 1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
+ DIMMs with ECC; zero, otherwise.
+
+ * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
+ generated for the 64 bits of data which will be written in the memory. Later on read
+ operations, will be used to check for single-bit error (which will be auto-corrected) and
+ double-bit error (which will be reported).
+
+ * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
+ LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
+ an error. */
+ uint64_t row_lsb : 3; /**< [ 4: 2](R/W) Row address bit select.
+ 0x0 = Address bit 14 is LSB.
+ 0x1 = Address bit 15 is LSB.
+ 0x2 = Address bit 16 is LSB.
+ 0x3 = Address bit 17 is LSB.
+ 0x4 = Address bit 18 is LSB.
+ 0x5 = Address bit 19 is LSB.
+ 0x6 = Address bit 20 is LSB.
+ 0x6 = Reserved.
+
+ Encoding used to determine which memory address bit position represents the low order DDR
+ ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
+ (bnk,row,col,rank and DIMM) and that is a function of the following:
+ * Datapath width (64).
+ * Number of banks (8).
+ * Number of column bits of the memory part--specified indirectly by this register.
+ * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
+ * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
+ * Number of DIMMs in the system by the register below ([PBANK_LSB]).
+
+ Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
+ mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
+ 0x1 (64b).
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ Refer to cache-block read transaction example, Cache-block read transaction example. */
+ uint64_t pbank_lsb : 4; /**< [ 8: 5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
+ [ROW_LSB] bit + num_rowbits + num_rankbits
+
+ Values for [PBANK_LSB] are as follows:
+ 0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
+ 0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
+ 0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
+ 0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
+ 0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
+ 0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
+ 0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
+ 0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
+ 0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
+ 0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
+ 0xA: DIMM = 0; if [RANK_ENA]=1, rank = mem_adr\<37\>.
+ 0xB-0xF: Reserved.
+
+ For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
+ parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
+ 16. So, row = mem_adr\<29:16\>.
+
+ With [RANK_ENA] = 0, [PBANK_LSB] = 2.
+ With [RANK_ENA] = 1, [PBANK_LSB] = 3.
+
+ Internal:
+ When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
+ uint64_t idlepower : 3; /**< [ 11: 9](R/W) Enter precharge power-down mode after the memory controller has been idle for
+ 2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
+
+ This field should only be programmed after initialization.
+ LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
+ precharge power-down. */
+ uint64_t forcewrite : 4; /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
+ cycles. 0 = disabled. */
+ uint64_t ecc_adr : 1; /**< [ 16: 16](R/W) Include memory reference address in the ECC calculation.
+ 0 = disabled, 1 = enabled. */
+ uint64_t reset : 1; /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
+ To cause the reset, software writes this to a one, then rewrites it to a zero. */
+ uint64_t ref_zqcs_int : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
+ control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
+ nonzero value.
+ ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
+ triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
+ \<39:18\>
+ are equal to 0x0.
+
+ The ZQCS timer only decrements when the refresh timer is zero.
+
+ Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
+
+ A value of zero in bits \<24:18\> will effectively turn off refresh.
+
+ Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
+ effective period programmed in bits \<24:18\>. Note that this value should always be greater
+ than 32, to account for resistor calibration delays.
+
+ 000_00000000_0000000: Reserved
+
+ Max refresh interval = 127 * 512= 65024 CK cycles.
+
+ Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
+
+ If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
+ operations per second.
+ LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
+ send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
+ uint64_t early_dqx : 1; /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
+ lines have a larger delay than the CK line. */
+ uint64_t sref_with_dll : 1; /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
+ MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
+ MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
+ refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
+ instruction sequences do not write any mode registers in the DDR3/4 parts. */
+ uint64_t rank_ena : 1; /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
+ * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
+ and
+ ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
+ * Write zero for SINGLE ranked DIMMs." */
+ uint64_t rankmask : 4; /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
+ i, set [RANKMASK]\<i\>:
+
+ \<pre\>
+ [RANK_ENA]=1 [RANK_ENA]=0
+ RANKMASK\<0\> = DIMM0_CS0 DIMM0_CS0
+ RANKMASK\<1\> = DIMM0_CS1 MBZ
+ RANKMASK\<2\> = DIMM1_CS0 DIMM1_CS0
+ RANKMASK\<3\> = DIMM1_CS1 MBZ
+ \</pre\>
+
+ For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
+ have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
+ power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
+ [RANKMASK]\<3\> MBZ. */
+ uint64_t mirrmask : 4; /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
+ [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
+ 0 \<= n \<= 3.
+ In DDR3, a mirrored read/write operation has the following differences:
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
+
+ When RANK_ENA = 0, MIRRMASK\<1\> MBZ.
+
+ In DDR4, a mirrored read/write operation has the following differences:
+ * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
+ * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
+ * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
+ * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
+ * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
+ * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
+
+ For CN70XX, MIRRMASK\<3:2\> MBZ.
+ * When RANK_ENA = 0, MIRRMASK\<1\> MBZ." */
+ uint64_t init_status : 4; /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
+ initialized.
+ Software must set necessary [RANKMASK] bits before executing the initialization sequence
+ using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
+ the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
+ exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
+ precharge
+ power-down entry/exit, and self-refresh entry SEQ_SELs. */
+ uint64_t early_unload_d0_r0 : 1; /**< [ 55: 55](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 0 reads.
+ The recommended EARLY_UNLOAD_D0_R0 value can be calculated after the final
+ LMC()_RLEVEL_RANK0[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 0 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK0[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R0 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d0_r1 : 1; /**< [ 56: 56](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 1 reads.
+ The recommended EARLY_UNLOAD_D0_R1 value can be calculated after the final
+ LMC()_RLEVEL_RANK1[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank one (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK1[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R1 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d1_r0 : 1; /**< [ 57: 57](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 2 reads.
+ The recommended EARLY_UNLOAD_D1_RO value can be calculated after the final
+ LMC()_RLEVEL_RANK2[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 2 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK2[BYTEi]) across all i), then set EARLY_UNLOAD_D1_RO when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset\<1:0\>
+ !=3)). */
+ uint64_t early_unload_d1_r1 : 1; /**< [ 58: 58](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle early for Rank 3 reads.
+ The recommended EARLY_UNLOAD_D1_R1 value can be calculated after the final
+ LMC()_RLEVEL_RANK3[BYTE*] values are selected (as part of read leveling initialization).
+ Then, determine the largest read leveling setting for rank 3 (i.e. calculate
+ maxset=MAX(LMC()_RLEVEL_RANK3[BYTEi]) across all i), then set EARLY_UNLOAD_D1_R1 when the
+ low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset\<1:0\>
+ !=3)). */
+ uint64_t scrz : 1; /**< [ 59: 59](R/W1S/H) Hide LMC()_SCRAMBLE_CFG0 and LMC()_SCRAMBLE_CFG1 when set. */
+ uint64_t mode32b : 1; /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
+ uint64_t mode_x4dev : 1; /**< [ 61: 61](R/W) DDR x4 device mode. */
+ uint64_t bg2_enable : 1; /**< [ 62: 62](R/W) BG1 enable bit. Only has an effect when LMC()_CONFIG[MODEDDR4] = 1.
+ Set to one when using DDR4 x4 or x8 parts.
+ Clear to zero when using DDR4 x16 parts. */
+ uint64_t lrdimm_ena : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_lmcx_config_cn88xx cn83xx; */
+};
+typedef union bdk_lmcx_config bdk_lmcx_config_t;
+
+static inline uint64_t BDK_LMCX_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CONFIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000188ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000188ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000188ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000188ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CONFIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CONFIG(a) bdk_lmcx_config_t
+#define bustype_BDK_LMCX_CONFIG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CONFIG(a) "LMCX_CONFIG"
+#define device_bar_BDK_LMCX_CONFIG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CONFIG(a) (a)
+#define arguments_BDK_LMCX_CONFIG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_control
+ *
+ * LMC Control Register
+ */
+union bdk_lmcx_control
+{
+ uint64_t u;
+ struct bdk_lmcx_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t scramble_ena : 1; /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Reserved.
+ Internal:
+ Offset for DFA rate-matching. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Reserved.
+ Internal:
+ Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
+ address bits. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t reserved_10 : 1;
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t throttle_wr : 1; /**< [ 7: 7](R/W) When set, use at most one IFB for write operations. */
+ uint64_t throttle_rd : 1; /**< [ 6: 6](R/W) When set, use at most one IFB for read operations. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR3. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems for physical details. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems for physical details. */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR3. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t throttle_rd : 1; /**< [ 6: 6](R/W) When set, use at most one IFB for read operations. */
+ uint64_t throttle_wr : 1; /**< [ 7: 7](R/W) When set, use at most one IFB for write operations. */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t reserved_10 : 1;
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
+ address bits. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Reserved.
+ Internal:
+ Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Reserved.
+ Internal:
+ Offset for DFA rate-matching. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t scramble_ena : 1; /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_control_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t scramble_ena : 1; /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Offset for DFA rate-matching. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) XOR the bank bits.
+ 0: bank\<2:0\> = address\<9:7\>.
+ 1: bank\<2:0\> = address\<9:7\> ^ address\<14:12\>. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t elev_prio_dis : 1; /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
+ priority information from L2C. */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t throttle_wr : 1; /**< [ 7: 7](R/W) When set, use at most one IFB for write operations. */
+ uint64_t throttle_rd : 1; /**< [ 6: 6](R/W) When set, use at most one IFB for read operations. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR3. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems for physical details. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems for physical details. */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR3. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t throttle_rd : 1; /**< [ 6: 6](R/W) When set, use at most one IFB for read operations. */
+ uint64_t throttle_wr : 1; /**< [ 7: 7](R/W) When set, use at most one IFB for write operations. */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t elev_prio_dis : 1; /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
+ priority information from L2C. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) XOR the bank bits.
+ 0: bank\<2:0\> = address\<9:7\>.
+ 1: bank\<2:0\> = address\<9:7\> ^ address\<14:12\>. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Offset for DFA rate-matching. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t scramble_ena : 1; /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_control_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Reserved.
+ Internal:
+ Offset for DFA rate-matching. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Reserved.
+ Internal:
+ Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR4 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
+ address bits. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t wrfl_prio_dis : 1; /**< [ 10: 10](R/W) Disable write flush priority logic. When set, LMC does not prioritize write regardless if
+ there is pending write flush command sent from TAD. */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR4. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout.
+ If software wants to enable this feature, it must be set prior to running any
+ initialization code. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout.
+ If software wants to enable this feature, it must be set prior to running any
+ initialization code. */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR4. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t reserved_6_7 : 2;
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t wrfl_prio_dis : 1; /**< [ 10: 10](R/W) Disable write flush priority logic. When set, LMC does not prioritize write regardless if
+ there is pending write flush command sent from TAD. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
+ address bits. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR4 PHY and pads clocks. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Reserved.
+ Internal:
+ Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Reserved.
+ Internal:
+ Offset for DFA rate-matching. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_control_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t scramble_ena : 1; /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Reserved.
+ Internal:
+ Offset for DFA rate-matching. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Reserved.
+ Internal:
+ Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
+ address bits. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t elev_prio_dis : 1; /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
+ priority information from L2C. */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t throttle_wr : 1; /**< [ 7: 7](R/W) When set, use at most one IFB for write operations. */
+ uint64_t throttle_rd : 1; /**< [ 6: 6](R/W) When set, use at most one IFB for read operations. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR3. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems for physical details. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems for physical details. */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR3. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t throttle_rd : 1; /**< [ 6: 6](R/W) When set, use at most one IFB for read operations. */
+ uint64_t throttle_wr : 1; /**< [ 7: 7](R/W) When set, use at most one IFB for write operations. */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t elev_prio_dis : 1; /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
+ priority information from L2C. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
+ address bits. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Reserved.
+ Internal:
+ Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Reserved.
+ Internal:
+ Offset for DFA rate-matching. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t scramble_ena : 1; /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_control_cn81xx cn83xx; */
+ struct bdk_lmcx_control_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t scramble_ena : 1; /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Offset for DFA rate-matching. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
+ address bits. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t elev_prio_dis : 1; /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
+ priority information from L2C. */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t throttle_wr : 1; /**< [ 7: 7](R/W) When set, use at most one IFB for write operations. */
+ uint64_t throttle_rd : 1; /**< [ 6: 6](R/W) When set, use at most one IFB for read operations. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR3. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems for physical details. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rdimm_ena : 1; /**< [ 0: 0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
+ address and control bits to be registered in the controller. */
+ uint64_t bwcnt : 1; /**< [ 1: 1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
+ LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
+ a one, then write this field to a zero. */
+ uint64_t ddr2t : 1; /**< [ 2: 2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
+ setup time pressure on the address and command bus which nominally have a very large
+ fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
+ for Two Dimm Unbuffered Systems for physical details. */
+ uint64_t pocas : 1; /**< [ 3: 3](R/W) Reserved; must be zero.
+ Internal:
+ Enable the posted CAS feature of DDR3. This bit must be
+ set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
+ uint64_t fprch2 : 2; /**< [ 5: 4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is FPRCH2 CKs earlier.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = Reserved." */
+ uint64_t throttle_rd : 1; /**< [ 6: 6](R/W) When set, use at most one IFB for read operations. */
+ uint64_t throttle_wr : 1; /**< [ 7: 7](R/W) When set, use at most one IFB for write operations. */
+ uint64_t inorder_rd : 1; /**< [ 8: 8](R/W) Send read operations in order (regardless of priority). */
+ uint64_t inorder_wr : 1; /**< [ 9: 9](R/W) Send write operations in order (regardless of priority). */
+ uint64_t elev_prio_dis : 1; /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
+ priority information from L2C. */
+ uint64_t nxm_write_en : 1; /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
+ the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
+ operations to addresses that don't exist in the DRAM at an aliased address. */
+ uint64_t max_write_batch : 4; /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
+ to interrupt. */
+ uint64_t xor_bank : 1; /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
+ address bits. */
+ uint64_t auto_dclkdis : 1; /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
+ traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
+ uint64_t int_zqcs_dis : 1; /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t ext_zqcs_dis : 1; /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t bprch : 2; /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
+ drivers is delayed an additional BPRCH CK cycles.
+ 0x0 = 0 CK cycles.
+ 0x1 = 1 CK cycles.
+ 0x2 = 2 CK cycles.
+ 0x3 = 3 CK cycles." */
+ uint64_t wodt_bprch : 1; /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
+ additional CK cycle. */
+ uint64_t rodt_bprch : 1; /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
+ CK cycle. */
+ uint64_t crm_max : 5; /**< [ 28: 24](R/W) Coarse rate matching max bucket size. The coarse rate matching logic is used to control
+ the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
+ reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
+ increments by
+ one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
+ when [CRM_MAX] is reached.
+
+ 0x0 = Reserved. */
+ uint64_t crm_thr : 5; /**< [ 33: 29](R/W) Coarse rate matching threshold. */
+ uint64_t crm_cnt : 5; /**< [ 38: 34](RO/H) Coarse count. */
+ uint64_t thrmax : 4; /**< [ 42: 39](R/W) Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
+ the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
+ writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
+
+ 0x0 = Reserved. */
+ uint64_t persub : 8; /**< [ 50: 43](R/W) Offset for DFA rate-matching. */
+ uint64_t thrcnt : 12; /**< [ 62: 51](RO/H) Fine count. */
+ uint64_t scramble_ena : 1; /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_lmcx_control bdk_lmcx_control_t;
+
+static inline uint64_t BDK_LMCX_CONTROL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CONTROL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000190ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000190ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000190ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000190ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CONTROL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CONTROL(a) bdk_lmcx_control_t
+#define bustype_BDK_LMCX_CONTROL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CONTROL(a) "LMCX_CONTROL"
+#define device_bar_BDK_LMCX_CONTROL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CONTROL(a) (a)
+#define arguments_BDK_LMCX_CONTROL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ctl
+ *
+ * LMC Control Register
+ */
+union bdk_lmcx_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t wr_cmd_delay_sel : 2; /**< [ 13: 12](R/W) Selects the write command delays (in core clk cycles) for incoming write transaction.
+ Depending on the dram clock speed, programming this field can be required to ensure proper
+ loading of the write data into LMC's buffer. It is recommended to set this field as
+ follows:
+ _ DDR4-1600 - DDR4-2133 = set to 0 (no delay).
+ _ DDR4-2400 = set to 1 (delay by 1 cycle).
+ _ DDR4-2666 = set to 2 (delay by 2 cycles).
+ _ DDR4-3200 = set to 3 (delay by 3 cycles).
+
+ Internal:
+ CYA bits to cover the case when rclk is at its slowest speed (300MHz), while dclk is
+ greater than 1.2GHz. In general, the condition ((24+CWL) * dclk_period (ns) \> 33.33ns)
+ must be met. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t predictive_start : 1; /**< [ 8: 8](WO) A 0-\>1 transition initiates the predictive fill logic on the LMC response data.
+ For optimal performance, set this field to one along with the correct value of [RDF_CNT]
+ after core clock stabilizes to a new frequency.
+ This field is a one-shot and clears itself each time it is set. */
+ uint64_t rdf_cnt : 8; /**< [ 7: 0](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
+ For optimal performance set to
+ RNDUP[((10 * DDR-clock period)/core-clock period) - 1].
+ Set to zero to disable predictive mode. */
+#else /* Word 0 - Little Endian */
+ uint64_t rdf_cnt : 8; /**< [ 7: 0](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
+ For optimal performance set to
+ RNDUP[((10 * DDR-clock period)/core-clock period) - 1].
+ Set to zero to disable predictive mode. */
+ uint64_t predictive_start : 1; /**< [ 8: 8](WO) A 0-\>1 transition initiates the predictive fill logic on the LMC response data.
+ For optimal performance, set this field to one along with the correct value of [RDF_CNT]
+ after core clock stabilizes to a new frequency.
+ This field is a one-shot and clears itself each time it is set. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t wr_cmd_delay_sel : 2; /**< [ 13: 12](R/W) Selects the write command delays (in core clk cycles) for incoming write transaction.
+ Depending on the dram clock speed, programming this field can be required to ensure proper
+ loading of the write data into LMC's buffer. It is recommended to set this field as
+ follows:
+ _ DDR4-1600 - DDR4-2133 = set to 0 (no delay).
+ _ DDR4-2400 = set to 1 (delay by 1 cycle).
+ _ DDR4-2666 = set to 2 (delay by 2 cycles).
+ _ DDR4-3200 = set to 3 (delay by 3 cycles).
+
+ Internal:
+ CYA bits to cover the case when rclk is at its slowest speed (300MHz), while dclk is
+ greater than 1.2GHz. In general, the condition ((24+CWL) * dclk_period (ns) \> 33.33ns)
+ must be met. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_ctl_s cn; */
+};
+typedef union bdk_lmcx_ctl bdk_lmcx_ctl_t;
+
+static inline uint64_t BDK_LMCX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001c0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_CTL(a) bdk_lmcx_ctl_t
+#define bustype_BDK_LMCX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_CTL(a) "LMCX_CTL"
+#define device_bar_BDK_LMCX_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_CTL(a) (a)
+#define arguments_BDK_LMCX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_dbtrain_ctl
+ *
+ * LMC Data Buffer Training Control Register
+ * Reserved.
+ * Internal:
+ * This register contains control bits that are used during the Data Buffer
+ * training sequence in DDR4 LRDIMM mode. When one of the data buffer training
+ * sequence is initiated, it uses the contents of this register to control
+ * its operation.
+ */
+union bdk_lmcx_dbtrain_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_dbtrain_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t lfsr_pattern_sel : 1; /**< [ 62: 62](RO) Reserved. */
+ uint64_t cmd_count_ext : 2; /**< [ 61: 60](RO) Reserved. */
+ uint64_t db_output_impedance : 3; /**< [ 59: 57](R/W) Reserved.
+ Internal:
+ Host interface DQ/DQS output driver impedance control.
+ This is the default value used during host interface write leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t db_sel : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Used when running host interface write leveling.
+ 0 = selects DIMM0's data buffer.
+ 1 = selects DIMM1's data buffer. */
+ uint64_t tccd_sel : 1; /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
+ back-to-back read commands. Otherwise it will space out back-to-back
+ reads with a default value of 4 cycles.
+
+ While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
+ Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
+ this bit to be set. */
+ uint64_t rw_train : 1; /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
+ memory array using burst pattern that are set in
+ LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
+ LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
+
+ This burst pattern gets shifted by one byte at every cycle.
+ The sequence will then do the reads to the same location and compare
+ the data coming back with this pattern.
+ The bit-wise comparison result gets stored in
+ LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
+ uint64_t read_dq_count : 7; /**< [ 53: 47](R/W) Reserved.
+ Internal:
+ The amount of cycles until a pulse is issued to sample the DQ into the
+ MPR register. This bits control the timing of when to sample the data
+ buffer training result. */
+ uint64_t read_cmd_count : 5; /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
+ Internal:
+ This can be set to zero in which case the sequence does not send any
+ Read commands to accommodate for the DWL training mode. */
+ uint64_t write_ena : 1; /**< [ 41: 41](R/W) Reserved.
+ Internal:
+ Enables the write operation. This is mainly used to accomplish the MWD
+ training sequence of the data buffer.
+ LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
+ uint64_t activate : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Enables the activate command during the data buffer training sequence. */
+ uint64_t prank : 2; /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
+ uint64_t lrank : 3; /**< [ 37: 35](R/W) Reserved.
+ Internal:
+ Logical rank bits for read/write/activate operation during the data buffer
+ training. */
+ uint64_t row_a : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
+ uint64_t bg : 2; /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
+ uint64_t ba : 2; /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
+ uint64_t column_a : 13; /**< [ 12: 0](R/W) Column address for the read/write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t column_a : 13; /**< [ 12: 0](R/W) Column address for the read/write operation. */
+ uint64_t ba : 2; /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
+ uint64_t bg : 2; /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
+ uint64_t row_a : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
+ uint64_t lrank : 3; /**< [ 37: 35](R/W) Reserved.
+ Internal:
+ Logical rank bits for read/write/activate operation during the data buffer
+ training. */
+ uint64_t prank : 2; /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
+ uint64_t activate : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Enables the activate command during the data buffer training sequence. */
+ uint64_t write_ena : 1; /**< [ 41: 41](R/W) Reserved.
+ Internal:
+ Enables the write operation. This is mainly used to accomplish the MWD
+ training sequence of the data buffer.
+ LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
+ uint64_t read_cmd_count : 5; /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
+ Internal:
+ This can be set to zero in which case the sequence does not send any
+ Read commands to accommodate for the DWL training mode. */
+ uint64_t read_dq_count : 7; /**< [ 53: 47](R/W) Reserved.
+ Internal:
+ The amount of cycles until a pulse is issued to sample the DQ into the
+ MPR register. This bits control the timing of when to sample the data
+ buffer training result. */
+ uint64_t rw_train : 1; /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
+ memory array using burst pattern that are set in
+ LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
+ LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
+
+ This burst pattern gets shifted by one byte at every cycle.
+ The sequence will then do the reads to the same location and compare
+ the data coming back with this pattern.
+ The bit-wise comparison result gets stored in
+ LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
+ uint64_t tccd_sel : 1; /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
+ back-to-back read commands. Otherwise it will space out back-to-back
+ reads with a default value of 4 cycles.
+
+ While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
+ Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
+ this bit to be set. */
+ uint64_t db_sel : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Used when running host interface write leveling.
+ 0 = selects DIMM0's data buffer.
+ 1 = selects DIMM1's data buffer. */
+ uint64_t db_output_impedance : 3; /**< [ 59: 57](R/W) Reserved.
+ Internal:
+ Host interface DQ/DQS output driver impedance control.
+ This is the default value used during host interface write leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t cmd_count_ext : 2; /**< [ 61: 60](RO) Reserved. */
+ uint64_t lfsr_pattern_sel : 1; /**< [ 62: 62](RO) Reserved. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_dbtrain_ctl_s cn88xxp1; */
+ struct bdk_lmcx_dbtrain_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t lfsr_pattern_sel : 1; /**< [ 62: 62](R/W) If set high, the sequence uses 32-bit LFSR pattern when generating data sequence
+ during the General R/W training (LMC()_DBTRAIN_CTL[RW_TRAIN] == 1).
+
+ The LFSR polynomials are programmed by LMC()_CHAR_CTL[PRBS]. */
+ uint64_t cmd_count_ext : 2; /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
+ up to 128 read and write commands. */
+ uint64_t db_output_impedance : 3; /**< [ 59: 57](R/W) Reserved.
+ Internal:
+ Host interface DQ/DQS output driver impedance control.
+ This is the default value used during host interface write leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t db_sel : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Used when running host interface write leveling.
+ 0 = selects DIMM0's data buffer.
+ 1 = selects DIMM1's data buffer. */
+ uint64_t tccd_sel : 1; /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
+ back-to-back read commands. Otherwise it will space out back-to-back
+ reads with a default value of 4 cycles.
+
+ While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
+ Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
+ this bit to be set. */
+ uint64_t rw_train : 1; /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
+ memory array using burst pattern that are set in
+ LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
+ LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
+
+ This burst pattern gets shifted by one byte at every cycle.
+ The sequence will then do the reads to the same location and compare
+ the data coming back with this pattern.
+ The bit-wise comparison result gets stored in
+ LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
+ uint64_t read_dq_count : 7; /**< [ 53: 47](R/W) Reserved.
+ Internal:
+ The amount of cycles until a pulse is issued to sample the DQ into the
+ MPR register. This bits control the timing of when to sample the data
+ buffer training result. */
+ uint64_t read_cmd_count : 5; /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
+ Internal:
+ This can be set to zero in which case the sequence does not send any
+ Read commands to accommodate for the DWL training mode. */
+ uint64_t write_ena : 1; /**< [ 41: 41](R/W) Reserved.
+ Internal:
+ Enables the write operation. This is mainly used to accomplish the MWD
+ training sequence of the data buffer.
+ LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
+ uint64_t activate : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Enables the activate command during the data buffer training sequence. */
+ uint64_t prank : 2; /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
+ uint64_t lrank : 3; /**< [ 37: 35](R/W) Reserved.
+ Internal:
+ Logical rank bits for read/write/activate operation during the data buffer
+ training. */
+ uint64_t row_a : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
+ uint64_t bg : 2; /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
+ uint64_t ba : 2; /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
+ uint64_t column_a : 13; /**< [ 12: 0](R/W) Column address for the read/write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t column_a : 13; /**< [ 12: 0](R/W) Column address for the read/write operation. */
+ uint64_t ba : 2; /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
+ uint64_t bg : 2; /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
+ uint64_t row_a : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
+ uint64_t lrank : 3; /**< [ 37: 35](R/W) Reserved.
+ Internal:
+ Logical rank bits for read/write/activate operation during the data buffer
+ training. */
+ uint64_t prank : 2; /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
+ uint64_t activate : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Enables the activate command during the data buffer training sequence. */
+ uint64_t write_ena : 1; /**< [ 41: 41](R/W) Reserved.
+ Internal:
+ Enables the write operation. This is mainly used to accomplish the MWD
+ training sequence of the data buffer.
+ LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
+ uint64_t read_cmd_count : 5; /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
+ Internal:
+ This can be set to zero in which case the sequence does not send any
+ Read commands to accommodate for the DWL training mode. */
+ uint64_t read_dq_count : 7; /**< [ 53: 47](R/W) Reserved.
+ Internal:
+ The amount of cycles until a pulse is issued to sample the DQ into the
+ MPR register. This bits control the timing of when to sample the data
+ buffer training result. */
+ uint64_t rw_train : 1; /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
+ memory array using burst pattern that are set in
+ LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
+ LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
+
+ This burst pattern gets shifted by one byte at every cycle.
+ The sequence will then do the reads to the same location and compare
+ the data coming back with this pattern.
+ The bit-wise comparison result gets stored in
+ LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
+ uint64_t tccd_sel : 1; /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
+ back-to-back read commands. Otherwise it will space out back-to-back
+ reads with a default value of 4 cycles.
+
+ While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
+ Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
+ this bit to be set. */
+ uint64_t db_sel : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Used when running host interface write leveling.
+ 0 = selects DIMM0's data buffer.
+ 1 = selects DIMM1's data buffer. */
+ uint64_t db_output_impedance : 3; /**< [ 59: 57](R/W) Reserved.
+ Internal:
+ Host interface DQ/DQS output driver impedance control.
+ This is the default value used during host interface write leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t cmd_count_ext : 2; /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
+ up to 128 read and write commands. */
+ uint64_t lfsr_pattern_sel : 1; /**< [ 62: 62](R/W) If set high, the sequence uses 32-bit LFSR pattern when generating data sequence
+ during the General R/W training (LMC()_DBTRAIN_CTL[RW_TRAIN] == 1).
+
+ The LFSR polynomials are programmed by LMC()_CHAR_CTL[PRBS]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_dbtrain_ctl_cn9 cn81xx; */
+ struct bdk_lmcx_dbtrain_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t lfsr_pattern_sel : 1; /**< [ 62: 62](R/W) If set high, the sequence uses 32-bit LFSR pattern when generating data sequence
+ during the General R/W training (LMC()_DBTRAIN_CTL[RW_TRAIN] == 1).
+
+ The LFSR polynomials are programmed by LMC()_CHAR_CTL[PRBS]. */
+ uint64_t cmd_count_ext : 2; /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
+ up to 128 read and write commands. */
+ uint64_t db_output_impedance : 3; /**< [ 59: 57](R/W) Reserved.
+ Internal:
+ Host interface DQ/DQS output driver impedance control.
+ This is the default value used during host interface write leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t db_sel : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Used when running host interface write leveling.
+ 0 = selects DIMM0's data buffer.
+ 1 = selects DIMM1's data buffer. */
+ uint64_t tccd_sel : 1; /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
+ back-to-back read commands. Otherwise it will space out back-to-back
+ reads with a default value of 4 cycles.
+
+ While in DRAM MPR mode, reads from Page 0 may use tCCD_S or tCCD_L.
+ Reads from Pages 1, 2 or 3 however must use tCCD_L, thereby requring
+ this bit to be set. */
+ uint64_t rw_train : 1; /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
+ memory array using burst patern that are set in
+ LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
+ LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
+
+ This burst pattern gets shifted by one byte at every cycle.
+ The sequence will then do the reads to the same location and compare
+ the data coming back with this pattern.
+ The bit-wise comparison result gets stored in
+ LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
+ uint64_t read_dq_count : 7; /**< [ 53: 47](R/W) Reserved.
+ Internal:
+ The amount of cycles until a pulse is issued to sample the DQ into the
+ MPR register. This bits control the timing of when to sample the data
+ buffer training result. */
+ uint64_t read_cmd_count : 5; /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
+ Internal:
+ This can be set to zero in which case the sequence does not send any
+ Read commands to accommodate for the DWL training mode. */
+ uint64_t write_ena : 1; /**< [ 41: 41](R/W) Reserved.
+ Internal:
+ Enables the write operation. This is mainly used to accomplish the MWD
+ training sequence of the data buffer.
+ LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
+ uint64_t activate : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Enables the activate command during the data buffer training sequence. */
+ uint64_t prank : 2; /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
+ uint64_t lrank : 3; /**< [ 37: 35](R/W) Reserved.
+ Internal:
+ Logical rank bits for read/write/activate operation during the data buffer
+ training. */
+ uint64_t row_a : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
+ uint64_t bg : 2; /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
+ uint64_t ba : 2; /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
+ uint64_t column_a : 13; /**< [ 12: 0](R/W) Column address for the read/write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t column_a : 13; /**< [ 12: 0](R/W) Column address for the read/write operation. */
+ uint64_t ba : 2; /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
+ uint64_t bg : 2; /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
+ uint64_t row_a : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
+ uint64_t lrank : 3; /**< [ 37: 35](R/W) Reserved.
+ Internal:
+ Logical rank bits for read/write/activate operation during the data buffer
+ training. */
+ uint64_t prank : 2; /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
+ uint64_t activate : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Enables the activate command during the data buffer training sequence. */
+ uint64_t write_ena : 1; /**< [ 41: 41](R/W) Reserved.
+ Internal:
+ Enables the write operation. This is mainly used to accomplish the MWD
+ training sequence of the data buffer.
+ LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
+ uint64_t read_cmd_count : 5; /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
+ Internal:
+ This can be set to zero in which case the sequence does not send any
+ Read commands to accommodate for the DWL training mode. */
+ uint64_t read_dq_count : 7; /**< [ 53: 47](R/W) Reserved.
+ Internal:
+ The amount of cycles until a pulse is issued to sample the DQ into the
+ MPR register. This bits control the timing of when to sample the data
+ buffer training result. */
+ uint64_t rw_train : 1; /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
+ memory array using burst patern that are set in
+ LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
+ LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
+
+ This burst pattern gets shifted by one byte at every cycle.
+ The sequence will then do the reads to the same location and compare
+ the data coming back with this pattern.
+ The bit-wise comparison result gets stored in
+ LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
+ uint64_t tccd_sel : 1; /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
+ back-to-back read commands. Otherwise it will space out back-to-back
+ reads with a default value of 4 cycles.
+
+ While in DRAM MPR mode, reads from Page 0 may use tCCD_S or tCCD_L.
+ Reads from Pages 1, 2 or 3 however must use tCCD_L, thereby requring
+ this bit to be set. */
+ uint64_t db_sel : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Used when running host interface write leveling.
+ 0 = selects DIMM0's data buffer.
+ 1 = selects DIMM1's data buffer. */
+ uint64_t db_output_impedance : 3; /**< [ 59: 57](R/W) Reserved.
+ Internal:
+ Host interface DQ/DQS output driver impedance control.
+ This is the default value used during host interface write leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t cmd_count_ext : 2; /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
+ up to 128 read and write commands. */
+ uint64_t lfsr_pattern_sel : 1; /**< [ 62: 62](R/W) If set high, the sequence uses 32-bit LFSR pattern when generating data sequence
+ during the General R/W training (LMC()_DBTRAIN_CTL[RW_TRAIN] == 1).
+
+ The LFSR polynomials are programmed by LMC()_CHAR_CTL[PRBS]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_lmcx_dbtrain_ctl_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t lfsr_pattern_sel : 1; /**< [ 62: 62](RO) Reserved. */
+ uint64_t cmd_count_ext : 2; /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
+ up to 128 read and write commmands. */
+ uint64_t db_output_impedance : 3; /**< [ 59: 57](R/W) Reserved.
+ Internal:
+ Host interface DQ/DQS output driver impedance control.
+ This is the default value used during host interface write leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t db_sel : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Used when running host interface write leveling.
+ 0 = selects DIMM0's data buffer.
+ 1 = selects DIMM1's data buffer. */
+ uint64_t tccd_sel : 1; /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
+ back-to-back read commands. Otherwise it will space out back-to-back
+ reads with a default value of 4 cycles.
+
+ While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
+ Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
+ this bit to be set. */
+ uint64_t rw_train : 1; /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
+ memory array using burst pattern that are set in
+ LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
+ LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
+
+ This burst pattern gets shifted by one byte at every cycle.
+ The sequence will then do the reads to the same location and compare
+ the data coming back with this pattern.
+ The bit-wise comparison result gets stored in
+ LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
+ uint64_t read_dq_count : 7; /**< [ 53: 47](R/W) Reserved.
+ Internal:
+ The amount of cycles until a pulse is issued to sample the DQ into the
+ MPR register. This bits control the timing of when to sample the data
+ buffer training result. */
+ uint64_t read_cmd_count : 5; /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
+ Internal:
+ This can be set to zero in which case the sequence does not send any
+ Read commands to accommodate for the DWL training mode. */
+ uint64_t write_ena : 1; /**< [ 41: 41](R/W) Reserved.
+ Internal:
+ Enables the write operation. This is mainly used to accomplish the MWD
+ training sequence of the data buffer.
+ LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
+ uint64_t activate : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Enables the activate command during the data buffer training sequence. */
+ uint64_t prank : 2; /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
+ uint64_t lrank : 3; /**< [ 37: 35](R/W) Reserved.
+ Internal:
+ Logical rank bits for read/write/activate operation during the data buffer
+ training. */
+ uint64_t row_a : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
+ uint64_t bg : 2; /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
+ uint64_t ba : 2; /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
+ uint64_t column_a : 13; /**< [ 12: 0](R/W) Column address for the read/write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t column_a : 13; /**< [ 12: 0](R/W) Column address for the read/write operation. */
+ uint64_t ba : 2; /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
+ uint64_t bg : 2; /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
+ uint64_t row_a : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
+ uint64_t lrank : 3; /**< [ 37: 35](R/W) Reserved.
+ Internal:
+ Logical rank bits for read/write/activate operation during the data buffer
+ training. */
+ uint64_t prank : 2; /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
+ uint64_t activate : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Enables the activate command during the data buffer training sequence. */
+ uint64_t write_ena : 1; /**< [ 41: 41](R/W) Reserved.
+ Internal:
+ Enables the write operation. This is mainly used to accomplish the MWD
+ training sequence of the data buffer.
+ LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
+ uint64_t read_cmd_count : 5; /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
+ Internal:
+ This can be set to zero in which case the sequence does not send any
+ Read commands to accommodate for the DWL training mode. */
+ uint64_t read_dq_count : 7; /**< [ 53: 47](R/W) Reserved.
+ Internal:
+ The amount of cycles until a pulse is issued to sample the DQ into the
+ MPR register. This bits control the timing of when to sample the data
+ buffer training result. */
+ uint64_t rw_train : 1; /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
+ memory array using burst pattern that are set in
+ LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
+ LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
+
+ This burst pattern gets shifted by one byte at every cycle.
+ The sequence will then do the reads to the same location and compare
+ the data coming back with this pattern.
+ The bit-wise comparison result gets stored in
+ LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
+ uint64_t tccd_sel : 1; /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
+ back-to-back read commands. Otherwise it will space out back-to-back
+ reads with a default value of 4 cycles.
+
+ While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
+ Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
+ this bit to be set. */
+ uint64_t db_sel : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Used when running host interface write leveling.
+ 0 = selects DIMM0's data buffer.
+ 1 = selects DIMM1's data buffer. */
+ uint64_t db_output_impedance : 3; /**< [ 59: 57](R/W) Reserved.
+ Internal:
+ Host interface DQ/DQS output driver impedance control.
+ This is the default value used during host interface write leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t cmd_count_ext : 2; /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
+ up to 128 read and write commmands. */
+ uint64_t lfsr_pattern_sel : 1; /**< [ 62: 62](RO) Reserved. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_lmcx_dbtrain_ctl bdk_lmcx_dbtrain_ctl_t;
+
+static inline uint64_t BDK_LMCX_DBTRAIN_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DBTRAIN_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880003f8ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880003f8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880003f8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880003f8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_DBTRAIN_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DBTRAIN_CTL(a) bdk_lmcx_dbtrain_ctl_t
+#define bustype_BDK_LMCX_DBTRAIN_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DBTRAIN_CTL(a) "LMCX_DBTRAIN_CTL"
+#define device_bar_BDK_LMCX_DBTRAIN_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DBTRAIN_CTL(a) (a)
+#define arguments_BDK_LMCX_DBTRAIN_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_dclk_cnt
+ *
+ * LMC System-Memory-Clock Counter Register
+ */
+union bdk_lmcx_dclk_cnt
+{
+ uint64_t u;
+ struct bdk_lmcx_dclk_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dclkcnt : 64; /**< [ 63: 0](RO/H) Performance counter. A 64-bit counter that increments every CK cycle. */
+#else /* Word 0 - Little Endian */
+ uint64_t dclkcnt : 64; /**< [ 63: 0](RO/H) Performance counter. A 64-bit counter that increments every CK cycle. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_dclk_cnt_s cn; */
+};
+typedef union bdk_lmcx_dclk_cnt bdk_lmcx_dclk_cnt_t;
+
+static inline uint64_t BDK_LMCX_DCLK_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DCLK_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001e0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001e0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001e0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001e0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_DCLK_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DCLK_CNT(a) bdk_lmcx_dclk_cnt_t
+#define bustype_BDK_LMCX_DCLK_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DCLK_CNT(a) "LMCX_DCLK_CNT"
+#define device_bar_BDK_LMCX_DCLK_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DCLK_CNT(a) (a)
+#define arguments_BDK_LMCX_DCLK_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ddr4_dimm_ctl
+ *
+ * LMC DIMM Control Register
+ * Bits 0-21 of this register are used only when LMC()_CONTROL[RDIMM_ENA] = 1.
+ *
+ * During an RCW initialization sequence, bits 0-21 control LMC's write
+ * operations to the extended DDR4 control words in the JEDEC standard
+ * registering clock driver on an RDIMM.
+ *
+ * Internal:
+ * Bits 22-27 is used only when LMC()_CONFIG[LRDIMM_ENA] = 1 AND
+ * LMC()_MR_MPR_CTL[MR_WR_PBA_ENABLE] = 1.
+ *
+ * During PBA mode of an MRW sequence, bits 22-27 controls the Buffer Configuration
+ * Control Word F0BC1x settings during the BCW write.
+ */
+union bdk_lmcx_ddr4_dimm_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_ddr4_dimm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t rank_timing_enable : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Package Rank Timing Alignment Enable bit for the DDR4 LRDIMM Buffer Configuration Control
+ Word F0BC1x DA[7]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t bodt_trans_mode : 1; /**< [ 26: 26](R/W) Reserved.
+ Internal:
+ BODT input handling in Transparent Mode for the DDR4 LRDIMM Buffer Conifguration Control
+ Word F0BC1x. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t trans_mode_ena : 1; /**< [ 25: 25](R/W) Reserved.
+ Internal:
+ Transparent Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[5]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t read_preamble_mode : 1; /**< [ 24: 24](R/W) Reserved.
+ Internal:
+ Read Preamble Training Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[4]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t buff_config_da3 : 1; /**< [ 23: 23](R/W) Reserved.
+ Internal:
+ Reserved setting value in F0BC1x DA3. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t mpr_over_ena : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ MPR Override Mode Enable bit for the DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[1]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t ddr4_dimm1_wmask : 11; /**< [ 21: 11](R/W) Reserved.
+ Internal:
+ DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t ddr4_dimm0_wmask : 11; /**< [ 10: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+#else /* Word 0 - Little Endian */
+ uint64_t ddr4_dimm0_wmask : 11; /**< [ 10: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+ uint64_t ddr4_dimm1_wmask : 11; /**< [ 21: 11](R/W) Reserved.
+ Internal:
+ DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t mpr_over_ena : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ MPR Override Mode Enable bit for the DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[1]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t buff_config_da3 : 1; /**< [ 23: 23](R/W) Reserved.
+ Internal:
+ Reserved setting value in F0BC1x DA3. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t read_preamble_mode : 1; /**< [ 24: 24](R/W) Reserved.
+ Internal:
+ Read Preamble Training Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[4]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t trans_mode_ena : 1; /**< [ 25: 25](R/W) Reserved.
+ Internal:
+ Transparent Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[5]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t bodt_trans_mode : 1; /**< [ 26: 26](R/W) Reserved.
+ Internal:
+ BODT input handling in Transparent Mode for the DDR4 LRDIMM Buffer Conifguration Control
+ Word F0BC1x. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t rank_timing_enable : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Package Rank Timing Alignment Enable bit for the DDR4 LRDIMM Buffer Configuration Control
+ Word F0BC1x DA[7]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_ddr4_dimm_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t rank_timing_enable : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Package Rank Timing Alignment Enable bit for the DDR4 LRDIMM Buffer Configuration Control
+ Word F0BC1x DA[7]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t bodt_trans_mode : 1; /**< [ 26: 26](R/W) Reserved.
+ Internal:
+ BODT input handling in Transparent Mode for the DDR4 LRDIMM Buffer Conifguration Control
+ Word F0BC1x. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t trans_mode_ena : 1; /**< [ 25: 25](R/W) Reserved.
+ Internal:
+ Transparent Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[5]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t read_preamble_mode : 1; /**< [ 24: 24](R/W) Reserved.
+ Internal:
+ Read Preamble Training Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[4]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t buff_config_da3 : 1; /**< [ 23: 23](R/W) Reserved.
+ Internal:
+ Reserved setting value in F0BC1x DA3. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t mpr_over_ena : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ MPR Override Mode Enable bit for the DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[1]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t ddr4_dimm1_wmask : 11; /**< [ 21: 11](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t ddr4_dimm0_wmask : 11; /**< [ 10: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+#else /* Word 0 - Little Endian */
+ uint64_t ddr4_dimm0_wmask : 11; /**< [ 10: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+ uint64_t ddr4_dimm1_wmask : 11; /**< [ 21: 11](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t mpr_over_ena : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ MPR Override Mode Enable bit for the DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[1]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t buff_config_da3 : 1; /**< [ 23: 23](R/W) Reserved.
+ Internal:
+ Reserved setting value in F0BC1x DA3. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t read_preamble_mode : 1; /**< [ 24: 24](R/W) Reserved.
+ Internal:
+ Read Preamble Training Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[4]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t trans_mode_ena : 1; /**< [ 25: 25](R/W) Reserved.
+ Internal:
+ Transparent Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
+ F0BC1x DA[5]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t bodt_trans_mode : 1; /**< [ 26: 26](R/W) Reserved.
+ Internal:
+ BODT input handling in Transparent Mode for the DDR4 LRDIMM Buffer Conifguration Control
+ Word F0BC1x. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t rank_timing_enable : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Package Rank Timing Alignment Enable bit for the DDR4 LRDIMM Buffer Configuration Control
+ Word F0BC1x DA[7]. Used during PBA BCW Write through the MRW sequence. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_ddr4_dimm_ctl_s cn81xx; */
+ /* struct bdk_lmcx_ddr4_dimm_ctl_cn9 cn88xx; */
+ /* struct bdk_lmcx_ddr4_dimm_ctl_cn9 cn83xx; */
+};
+typedef union bdk_lmcx_ddr4_dimm_ctl bdk_lmcx_ddr4_dimm_ctl_t;
+
+static inline uint64_t BDK_LMCX_DDR4_DIMM_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DDR4_DIMM_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880003f0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880003f0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880003f0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880003f0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_DDR4_DIMM_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DDR4_DIMM_CTL(a) bdk_lmcx_ddr4_dimm_ctl_t
+#define bustype_BDK_LMCX_DDR4_DIMM_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DDR4_DIMM_CTL(a) "LMCX_DDR4_DIMM_CTL"
+#define device_bar_BDK_LMCX_DDR4_DIMM_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DDR4_DIMM_CTL(a) (a)
+#define arguments_BDK_LMCX_DDR4_DIMM_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ddr_pll_ctl
+ *
+ * LMC DDR PLL Control Register
+ * This register controls the DDR_CK frequency. For details, refer to CK speed programming. See
+ * LMC initialization sequence for the initialization sequence.
+ * DDR PLL bringup sequence:
+ *
+ * 1. Write CLKF, POSTDIV.
+ *
+ * 2. Wait 1 ref clock cycle (10ns).
+ *
+ * 3. Write 0 to PD, 1 to UPDATE.
+ *
+ * 4. Wait 500 ref clock cycles (5us).
+ *
+ * 5. Write 0 to PLL_RESET.
+ *
+ * 6. Wait 2000 ref clock cycles (20us).
+ *
+ * 7. Write 0x2 to PLL_SEL, 0 to PS_RESET. LMCs not bringing up the PLL
+ * need to write 0x2 to PLL_SEL to receive the phase-shifted PLL output
+ *
+ * 8. Wait 2 ref clock cycles (20ns).
+ *
+ * 9. Write 1 to PHY_DCOK, wait 20us before bringing up the DDR interface.
+ */
+union bdk_lmcx_ddr_pll_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_ddr_pll_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_45_63 : 19;
+ uint64_t dclk_alt_refclk_sel : 1; /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
+ uint64_t reserved_18_43 : 26;
+ uint64_t pll_bypass : 1; /**< [ 17: 17](R/W) Bypass PLL output with bypass_clk_n/p. */
+ uint64_t postdiv : 2; /**< [ 16: 15](R/W) DDR postscalar divide ratio. Determines the LMC CK speed. See also [CLKF].
+ 0x0 = 2.
+ 0x1 = 4.
+ 0x2 = 8.
+ 0x3 = 16. */
+ uint64_t pll_sel : 2; /**< [ 14: 13](R/W) PLL output select.
+ 0x0 = Off.
+ 0x1 = Runt.
+ 0x2 = PLL. */
+ uint64_t update : 1; /**< [ 12: 12](WO) PLL programming update. This is a one-shot operation; it automatically returns
+ to 0 after a write to 1. */
+ uint64_t pd : 1; /**< [ 11: 11](R/W) Powerdown PLL. */
+ uint64_t ps_reset : 1; /**< [ 10: 10](R/W) Post scalar reset. */
+ uint64_t pll_reset : 1; /**< [ 9: 9](R/W) PLL reset. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t pll_reset : 1; /**< [ 9: 9](R/W) PLL reset. */
+ uint64_t ps_reset : 1; /**< [ 10: 10](R/W) Post scalar reset. */
+ uint64_t pd : 1; /**< [ 11: 11](R/W) Powerdown PLL. */
+ uint64_t update : 1; /**< [ 12: 12](WO) PLL programming update. This is a one-shot operation; it automatically returns
+ to 0 after a write to 1. */
+ uint64_t pll_sel : 2; /**< [ 14: 13](R/W) PLL output select.
+ 0x0 = Off.
+ 0x1 = Runt.
+ 0x2 = PLL. */
+ uint64_t postdiv : 2; /**< [ 16: 15](R/W) DDR postscalar divide ratio. Determines the LMC CK speed. See also [CLKF].
+ 0x0 = 2.
+ 0x1 = 4.
+ 0x2 = 8.
+ 0x3 = 16. */
+ uint64_t pll_bypass : 1; /**< [ 17: 17](R/W) Bypass PLL output with bypass_clk_n/p. */
+ uint64_t reserved_18_43 : 26;
+ uint64_t dclk_alt_refclk_sel : 1; /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
+ uint64_t reserved_45_63 : 19;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_ddr_pll_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t phy_dcok : 1; /**< [ 41: 41](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
+ uint64_t ddr4_mode : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ FIXME REMOVE
+ DDR4 mode select: 1 = DDR4, 0 = Reserved. */
+ uint64_t pll_phase_sel : 1; /**< [ 39: 39](R/W) Phase select.
+ 0 = Select PLL Output clock phase 0, 120, and 240.
+ 1 = Select PLL Output clock phase 60, 180, and 300.
+
+ For even LMCs should use 0, for odd LMCs should use 1. */
+ uint64_t rep : 1; /**< [ 38: 38](R/W) Regamp internal setting. */
+ uint64_t pll_ref_oct : 1; /**< [ 37: 37](R/W) Termination.
+ 0 = Disable 50 Ohm on chip termination.
+ 1 = Enable 50 Ohm on chip termination. */
+ uint64_t pll_ref_hcsl : 1; /**< [ 36: 36](R/W) Reference termination.
+ 0 = disable HCSL reference clock termination.
+ 1 = enable HCSL reference clock termination when [PLL_REF_OCT] is 1. */
+ uint64_t pll_ref_bypass : 1; /**< [ 35: 35](R/W) Bypass reference clock with bypass_clk_n/p. */
+ uint64_t pll_diffamp : 4; /**< [ 34: 31](R/W) Diffamp bias current setting. */
+ uint64_t cpamp : 1; /**< [ 30: 30](R/W) Charge pump internal opamp setting. */
+ uint64_t pll_cps : 4; /**< [ 29: 26](R/W) Charge pump current setting for Cs. */
+ uint64_t pll_cpb : 4; /**< [ 25: 22](R/W) Charge pump current setting for Cb. */
+ uint64_t bg_div16_en : 1; /**< [ 21: 21](R/W) Bandgap clock frequency.
+ 0 = Reference clock divided by 4.
+ 1 = Reference clock divided by 16. */
+ uint64_t bg_clk_en : 1; /**< [ 20: 20](R/W) Bandgap gap chopping enable. */
+ uint64_t prediv : 2; /**< [ 19: 18](R/W) Reference clock divider.
+ 0x0 = reference clock divides down by 1.
+ 0x1 = reference clock divides down by 1.
+ 0x2 = reference clock divides down by 2.
+ 0x3 = reference clock divides down by 3. */
+ uint64_t pll_bypass : 1; /**< [ 17: 17](R/W) Bypass PLL output with bypass_clk_n/p. */
+ uint64_t postdiv : 2; /**< [ 16: 15](R/W) DDR postscalar divide ratio. Determines the LMC CK speed. See also [CLKF].
+ 0x0 = 2.
+ 0x1 = 4.
+ 0x2 = 8.
+ 0x3 = 16. */
+ uint64_t pll_sel : 2; /**< [ 14: 13](R/W) PLL output select.
+ 0x0 = Off.
+ 0x1 = Runt.
+ 0x2 = PLL. */
+ uint64_t update : 1; /**< [ 12: 12](WO) PLL programming update. This is a one-shot operation; it automatically returns
+ to 0 after a write to 1. */
+ uint64_t pd : 1; /**< [ 11: 11](R/W) Powerdown PLL. */
+ uint64_t ps_reset : 1; /**< [ 10: 10](R/W) Post scalar reset. */
+ uint64_t pll_reset : 1; /**< [ 9: 9](R/W) PLL reset. */
+ uint64_t clkf : 9; /**< [ 8: 0](R/W) Multiply reference by [CLKF]. 96 \<= [CLKF] \<= 172. LMC PLL frequency = 33.33 * [CLKF].
+ min = 3.2 GHz, max = 5.7 GHz.
+
+ Typical settings:
+ 800 MHz: CLKF = 0x60 (96), POSTDIV = 0x1 (4).
+ 933 MHz: CLKF = 0x70 (112), POSTDIV = 0x1 (4).
+ 1067 MHz: CLKF = 0x80 (128), POSTDIV = 0x1 (4).
+ 1200 MHz: CLKF = 0x90 (144), POSTDIV = 0x1 (4).
+ 1333 MHz: CLKF = 0xA0 (160), POSTDIV = 0x1 (4). */
+#else /* Word 0 - Little Endian */
+ uint64_t clkf : 9; /**< [ 8: 0](R/W) Multiply reference by [CLKF]. 96 \<= [CLKF] \<= 172. LMC PLL frequency = 33.33 * [CLKF].
+ min = 3.2 GHz, max = 5.7 GHz.
+
+ Typical settings:
+ 800 MHz: CLKF = 0x60 (96), POSTDIV = 0x1 (4).
+ 933 MHz: CLKF = 0x70 (112), POSTDIV = 0x1 (4).
+ 1067 MHz: CLKF = 0x80 (128), POSTDIV = 0x1 (4).
+ 1200 MHz: CLKF = 0x90 (144), POSTDIV = 0x1 (4).
+ 1333 MHz: CLKF = 0xA0 (160), POSTDIV = 0x1 (4). */
+ uint64_t pll_reset : 1; /**< [ 9: 9](R/W) PLL reset. */
+ uint64_t ps_reset : 1; /**< [ 10: 10](R/W) Post scalar reset. */
+ uint64_t pd : 1; /**< [ 11: 11](R/W) Powerdown PLL. */
+ uint64_t update : 1; /**< [ 12: 12](WO) PLL programming update. This is a one-shot operation; it automatically returns
+ to 0 after a write to 1. */
+ uint64_t pll_sel : 2; /**< [ 14: 13](R/W) PLL output select.
+ 0x0 = Off.
+ 0x1 = Runt.
+ 0x2 = PLL. */
+ uint64_t postdiv : 2; /**< [ 16: 15](R/W) DDR postscalar divide ratio. Determines the LMC CK speed. See also [CLKF].
+ 0x0 = 2.
+ 0x1 = 4.
+ 0x2 = 8.
+ 0x3 = 16. */
+ uint64_t pll_bypass : 1; /**< [ 17: 17](R/W) Bypass PLL output with bypass_clk_n/p. */
+ uint64_t prediv : 2; /**< [ 19: 18](R/W) Reference clock divider.
+ 0x0 = reference clock divides down by 1.
+ 0x1 = reference clock divides down by 1.
+ 0x2 = reference clock divides down by 2.
+ 0x3 = reference clock divides down by 3. */
+ uint64_t bg_clk_en : 1; /**< [ 20: 20](R/W) Bandgap gap chopping enable. */
+ uint64_t bg_div16_en : 1; /**< [ 21: 21](R/W) Bandgap clock frequency.
+ 0 = Reference clock divided by 4.
+ 1 = Reference clock divided by 16. */
+ uint64_t pll_cpb : 4; /**< [ 25: 22](R/W) Charge pump current setting for Cb. */
+ uint64_t pll_cps : 4; /**< [ 29: 26](R/W) Charge pump current setting for Cs. */
+ uint64_t cpamp : 1; /**< [ 30: 30](R/W) Charge pump internal opamp setting. */
+ uint64_t pll_diffamp : 4; /**< [ 34: 31](R/W) Diffamp bias current setting. */
+ uint64_t pll_ref_bypass : 1; /**< [ 35: 35](R/W) Bypass reference clock with bypass_clk_n/p. */
+ uint64_t pll_ref_hcsl : 1; /**< [ 36: 36](R/W) Reference termination.
+ 0 = disable HCSL reference clock termination.
+ 1 = enable HCSL reference clock termination when [PLL_REF_OCT] is 1. */
+ uint64_t pll_ref_oct : 1; /**< [ 37: 37](R/W) Termination.
+ 0 = Disable 50 Ohm on chip termination.
+ 1 = Enable 50 Ohm on chip termination. */
+ uint64_t rep : 1; /**< [ 38: 38](R/W) Regamp internal setting. */
+ uint64_t pll_phase_sel : 1; /**< [ 39: 39](R/W) Phase select.
+ 0 = Select PLL Output clock phase 0, 120, and 240.
+ 1 = Select PLL Output clock phase 60, 180, and 300.
+
+ For even LMCs should use 0, for odd LMCs should use 1. */
+ uint64_t ddr4_mode : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ FIXME REMOVE
+ DDR4 mode select: 1 = DDR4, 0 = Reserved. */
+ uint64_t phy_dcok : 1; /**< [ 41: 41](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_ddr_pll_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_45_63 : 19;
+ uint64_t dclk_alt_refclk_sel : 1; /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
+ uint64_t bwadj : 12; /**< [ 43: 32](R/W) Bandwidth control for DCLK PLLs. */
+ uint64_t dclk_invert : 1; /**< [ 31: 31](R/W) Invert DCLK that feeds LMC/DDR at the south side of the chip. */
+ uint64_t phy_dcok : 1; /**< [ 30: 30](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
+ uint64_t ddr4_mode : 1; /**< [ 29: 29](R/W) DDR4 mode select: 1 = DDR4, 0 = DDR3. */
+ uint64_t pll_fbslip : 1; /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
+ uint64_t pll_lock : 1; /**< [ 27: 27](RO/H) PLL LOCK indication. */
+ uint64_t pll_rfslip : 1; /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
+ uint64_t clkr : 2; /**< [ 25: 24](R/W) PLL post-divider control. */
+ uint64_t jtg_test_mode : 1; /**< [ 23: 23](R/W) Reserved; must be zero.
+ Internal:
+ JTAG test mode. Clock alignment between DCLK & REFCLK as
+ well as FCLK & REFCLK can only be performed after the ddr_pll_divider_reset is deasserted.
+ Software needs to wait at least 10 reference clock cycles after deasserting
+ pll_divider_reset
+ before asserting LMC()_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can take up
+ to 160 microseconds) DCLK and FCLK can exhibit some high-frequency pulses. Therefore, all
+ bring up activities in that clock domain need to be delayed (when the chip operates in
+ jtg_test_mode) by about 160 microseconds to ensure that lock is achieved. */
+ uint64_t ddr_div_reset : 1; /**< [ 22: 22](R/W) DDR postscalar divider reset. */
+ uint64_t ddr_ps_en : 4; /**< [ 21: 18](R/W) DDR postscalar divide ratio. Determines the LMC CK speed.
+ 0x0 = divide LMC PLL by 1.
+ 0x1 = divide LMC PLL by 2.
+ 0x2 = divide LMC PLL by 3.
+ 0x3 = divide LMC PLL by 4.
+ 0x4 = divide LMC PLL by 5.
+ 0x5 = divide LMC PLL by 6.
+ 0x6 = divide LMC PLL by 7.
+ 0x7 = divide LMC PLL by 8.
+ 0x8 = divide LMC PLL by 10.
+ 0x9 = divide LMC PLL by 12.
+ 0xA = Reserved.
+ 0xB = Reserved.
+ 0xC = Reserved.
+ 0xD = Reserved.
+ 0xE = Reserved.
+ 0xF = Reserved.
+
+ [DDR_PS_EN] is not used when [DDR_DIV_RESET] = 1. */
+ uint64_t reserved_9_17 : 9;
+ uint64_t clkf_ext : 1; /**< [ 8: 8](R/W) A 1-bit extension to the [CLKF] register to support for DDR4-2666. */
+ uint64_t reset_n : 1; /**< [ 7: 7](R/W) PLL reset */
+ uint64_t clkf : 7; /**< [ 6: 0](R/W) Multiply reference by [CLKF]. 31 \<= [CLKF] \<= 99. LMC PLL frequency = 50 * [CLKF]. min =
+ 1.6
+ GHz, max = 5 GHz. */
+#else /* Word 0 - Little Endian */
+ uint64_t clkf : 7; /**< [ 6: 0](R/W) Multiply reference by [CLKF]. 31 \<= [CLKF] \<= 99. LMC PLL frequency = 50 * [CLKF]. min =
+ 1.6
+ GHz, max = 5 GHz. */
+ uint64_t reset_n : 1; /**< [ 7: 7](R/W) PLL reset */
+ uint64_t clkf_ext : 1; /**< [ 8: 8](R/W) A 1-bit extension to the [CLKF] register to support for DDR4-2666. */
+ uint64_t reserved_9_17 : 9;
+ uint64_t ddr_ps_en : 4; /**< [ 21: 18](R/W) DDR postscalar divide ratio. Determines the LMC CK speed.
+ 0x0 = divide LMC PLL by 1.
+ 0x1 = divide LMC PLL by 2.
+ 0x2 = divide LMC PLL by 3.
+ 0x3 = divide LMC PLL by 4.
+ 0x4 = divide LMC PLL by 5.
+ 0x5 = divide LMC PLL by 6.
+ 0x6 = divide LMC PLL by 7.
+ 0x7 = divide LMC PLL by 8.
+ 0x8 = divide LMC PLL by 10.
+ 0x9 = divide LMC PLL by 12.
+ 0xA = Reserved.
+ 0xB = Reserved.
+ 0xC = Reserved.
+ 0xD = Reserved.
+ 0xE = Reserved.
+ 0xF = Reserved.
+
+ [DDR_PS_EN] is not used when [DDR_DIV_RESET] = 1. */
+ uint64_t ddr_div_reset : 1; /**< [ 22: 22](R/W) DDR postscalar divider reset. */
+ uint64_t jtg_test_mode : 1; /**< [ 23: 23](R/W) Reserved; must be zero.
+ Internal:
+ JTAG test mode. Clock alignment between DCLK & REFCLK as
+ well as FCLK & REFCLK can only be performed after the ddr_pll_divider_reset is deasserted.
+ Software needs to wait at least 10 reference clock cycles after deasserting
+ pll_divider_reset
+ before asserting LMC()_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can take up
+ to 160 microseconds) DCLK and FCLK can exhibit some high-frequency pulses. Therefore, all
+ bring up activities in that clock domain need to be delayed (when the chip operates in
+ jtg_test_mode) by about 160 microseconds to ensure that lock is achieved. */
+ uint64_t clkr : 2; /**< [ 25: 24](R/W) PLL post-divider control. */
+ uint64_t pll_rfslip : 1; /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
+ uint64_t pll_lock : 1; /**< [ 27: 27](RO/H) PLL LOCK indication. */
+ uint64_t pll_fbslip : 1; /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
+ uint64_t ddr4_mode : 1; /**< [ 29: 29](R/W) DDR4 mode select: 1 = DDR4, 0 = DDR3. */
+ uint64_t phy_dcok : 1; /**< [ 30: 30](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
+ uint64_t dclk_invert : 1; /**< [ 31: 31](R/W) Invert DCLK that feeds LMC/DDR at the south side of the chip. */
+ uint64_t bwadj : 12; /**< [ 43: 32](R/W) Bandwidth control for DCLK PLLs. */
+ uint64_t dclk_alt_refclk_sel : 1; /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
+ uint64_t reserved_45_63 : 19;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_ddr_pll_ctl_cn81xx cn88xx; */
+ struct bdk_lmcx_ddr_pll_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_45_63 : 19;
+ uint64_t dclk_alt_refclk_sel : 1; /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
+ uint64_t bwadj : 12; /**< [ 43: 32](R/W) Bandwidth control for DCLK PLLs. */
+ uint64_t dclk_invert : 1; /**< [ 31: 31](R/W) Invert dclk that feeds LMC/DDR at the south side of the chip. */
+ uint64_t phy_dcok : 1; /**< [ 30: 30](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
+ uint64_t ddr4_mode : 1; /**< [ 29: 29](R/W) DDR4 mode select: 1 = DDR4, 0 = DDR3. */
+ uint64_t pll_fbslip : 1; /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
+ uint64_t pll_lock : 1; /**< [ 27: 27](RO/H) PLL LOCK indication. */
+ uint64_t pll_rfslip : 1; /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
+ uint64_t clkr : 2; /**< [ 25: 24](R/W) PLL post-divider control. */
+ uint64_t jtg_test_mode : 1; /**< [ 23: 23](R/W) Reserved; must be zero.
+ Internal:
+ JTAG test mode. Clock alignment between DCLK & REFCLK as
+ well as FCLK & REFCLK can only be performed after the ddr_pll_divider_reset is deasserted.
+ Software needs to wait at least 10 reference clock cycles after deasserting
+ pll_divider_reset
+ before asserting LMC()_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can take up
+ to 160 microseconds) DCLK and FCLK can exhibit some high-frequency pulses. Therefore, all
+ bring up activities in that clock domain need to be delayed (when the chip operates in
+ jtg_test_mode) by about 160 microseconds to ensure that lock is achieved. */
+ uint64_t ddr_div_reset : 1; /**< [ 22: 22](R/W) DDR postscalar divider reset. */
+ uint64_t ddr_ps_en : 4; /**< [ 21: 18](R/W) DDR postscalar divide ratio. Determines the LMC CK speed.
+ 0x0 = divide LMC PLL by 1.
+ 0x1 = divide LMC PLL by 2.
+ 0x2 = divide LMC PLL by 3.
+ 0x3 = divide LMC PLL by 4.
+ 0x4 = divide LMC PLL by 5.
+ 0x5 = divide LMC PLL by 6.
+ 0x6 = divide LMC PLL by 7.
+ 0x7 = divide LMC PLL by 8.
+ 0x8 = divide LMC PLL by 10.
+ 0x9 = divide LMC PLL by 12.
+ 0xA = Reserved.
+ 0xB = Reserved.
+ 0xC = Reserved.
+ 0xD = Reserved.
+ 0xE = Reserved.
+ 0xF = Reserved.
+
+ [DDR_PS_EN] is not used when [DDR_DIV_RESET] = 1. */
+ uint64_t reserved_9_17 : 9;
+ uint64_t clkf_ext : 1; /**< [ 8: 8](R/W) A 1-bit extension to the [CLKF] register to support for DDR4-2666. */
+ uint64_t reset_n : 1; /**< [ 7: 7](R/W) PLL reset */
+ uint64_t clkf : 7; /**< [ 6: 0](R/W) Multiply reference by [CLKF]. 31 \<= [CLKF] \<= 99. LMC PLL frequency = 50 * [CLKF]. min =
+ 1.6
+ GHz, max = 5 GHz. */
+#else /* Word 0 - Little Endian */
+ uint64_t clkf : 7; /**< [ 6: 0](R/W) Multiply reference by [CLKF]. 31 \<= [CLKF] \<= 99. LMC PLL frequency = 50 * [CLKF]. min =
+ 1.6
+ GHz, max = 5 GHz. */
+ uint64_t reset_n : 1; /**< [ 7: 7](R/W) PLL reset */
+ uint64_t clkf_ext : 1; /**< [ 8: 8](R/W) A 1-bit extension to the [CLKF] register to support for DDR4-2666. */
+ uint64_t reserved_9_17 : 9;
+ uint64_t ddr_ps_en : 4; /**< [ 21: 18](R/W) DDR postscalar divide ratio. Determines the LMC CK speed.
+ 0x0 = divide LMC PLL by 1.
+ 0x1 = divide LMC PLL by 2.
+ 0x2 = divide LMC PLL by 3.
+ 0x3 = divide LMC PLL by 4.
+ 0x4 = divide LMC PLL by 5.
+ 0x5 = divide LMC PLL by 6.
+ 0x6 = divide LMC PLL by 7.
+ 0x7 = divide LMC PLL by 8.
+ 0x8 = divide LMC PLL by 10.
+ 0x9 = divide LMC PLL by 12.
+ 0xA = Reserved.
+ 0xB = Reserved.
+ 0xC = Reserved.
+ 0xD = Reserved.
+ 0xE = Reserved.
+ 0xF = Reserved.
+
+ [DDR_PS_EN] is not used when [DDR_DIV_RESET] = 1. */
+ uint64_t ddr_div_reset : 1; /**< [ 22: 22](R/W) DDR postscalar divider reset. */
+ uint64_t jtg_test_mode : 1; /**< [ 23: 23](R/W) Reserved; must be zero.
+ Internal:
+ JTAG test mode. Clock alignment between DCLK & REFCLK as
+ well as FCLK & REFCLK can only be performed after the ddr_pll_divider_reset is deasserted.
+ Software needs to wait at least 10 reference clock cycles after deasserting
+ pll_divider_reset
+ before asserting LMC()_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can take up
+ to 160 microseconds) DCLK and FCLK can exhibit some high-frequency pulses. Therefore, all
+ bring up activities in that clock domain need to be delayed (when the chip operates in
+ jtg_test_mode) by about 160 microseconds to ensure that lock is achieved. */
+ uint64_t clkr : 2; /**< [ 25: 24](R/W) PLL post-divider control. */
+ uint64_t pll_rfslip : 1; /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
+ uint64_t pll_lock : 1; /**< [ 27: 27](RO/H) PLL LOCK indication. */
+ uint64_t pll_fbslip : 1; /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
+ uint64_t ddr4_mode : 1; /**< [ 29: 29](R/W) DDR4 mode select: 1 = DDR4, 0 = DDR3. */
+ uint64_t phy_dcok : 1; /**< [ 30: 30](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
+ uint64_t dclk_invert : 1; /**< [ 31: 31](R/W) Invert dclk that feeds LMC/DDR at the south side of the chip. */
+ uint64_t bwadj : 12; /**< [ 43: 32](R/W) Bandwidth control for DCLK PLLs. */
+ uint64_t dclk_alt_refclk_sel : 1; /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
+ uint64_t reserved_45_63 : 19;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_ddr_pll_ctl bdk_lmcx_ddr_pll_ctl_t;
+
+static inline uint64_t BDK_LMCX_DDR_PLL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DDR_PLL_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000258ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000258ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000258ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000258ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_DDR_PLL_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DDR_PLL_CTL(a) bdk_lmcx_ddr_pll_ctl_t
+#define bustype_BDK_LMCX_DDR_PLL_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DDR_PLL_CTL(a) "LMCX_DDR_PLL_CTL"
+#define device_bar_BDK_LMCX_DDR_PLL_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DDR_PLL_CTL(a) (a)
+#define arguments_BDK_LMCX_DDR_PLL_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_dimm#_ddr4_params0
+ *
+ * LMC DIMM Parameters Registers 0
+ * This register contains values to be programmed into the extra DDR4 control words in the
+ * corresponding (registered) DIMM. These are control words RC1x through RC8x.
+ */
+union bdk_lmcx_dimmx_ddr4_params0
+{
+ uint64_t u;
+ struct bdk_lmcx_dimmx_ddr4_params0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rc8x : 8; /**< [ 63: 56](R/W) RC8x. */
+ uint64_t rc7x : 8; /**< [ 55: 48](R/W) RC7x. */
+ uint64_t rc6x : 8; /**< [ 47: 40](R/W) RC6x. */
+ uint64_t rc5x : 8; /**< [ 39: 32](R/W) RC5x. */
+ uint64_t rc4x : 8; /**< [ 31: 24](R/W) RC4x. */
+ uint64_t rc3x : 8; /**< [ 23: 16](R/W) RC3x. */
+ uint64_t rc2x : 8; /**< [ 15: 8](R/W) RC2x. */
+ uint64_t rc1x : 8; /**< [ 7: 0](R/W) RC1x. */
+#else /* Word 0 - Little Endian */
+ uint64_t rc1x : 8; /**< [ 7: 0](R/W) RC1x. */
+ uint64_t rc2x : 8; /**< [ 15: 8](R/W) RC2x. */
+ uint64_t rc3x : 8; /**< [ 23: 16](R/W) RC3x. */
+ uint64_t rc4x : 8; /**< [ 31: 24](R/W) RC4x. */
+ uint64_t rc5x : 8; /**< [ 39: 32](R/W) RC5x. */
+ uint64_t rc6x : 8; /**< [ 47: 40](R/W) RC6x. */
+ uint64_t rc7x : 8; /**< [ 55: 48](R/W) RC7x. */
+ uint64_t rc8x : 8; /**< [ 63: 56](R/W) RC8x. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_dimmx_ddr4_params0_s cn; */
+};
+typedef union bdk_lmcx_dimmx_ddr4_params0 bdk_lmcx_dimmx_ddr4_params0_t;
+
+static inline uint64_t BDK_LMCX_DIMMX_DDR4_PARAMS0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DIMMX_DDR4_PARAMS0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0880000d0ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0880000d0ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0880000d0ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e0880000d0ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("LMCX_DIMMX_DDR4_PARAMS0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) bdk_lmcx_dimmx_ddr4_params0_t
+#define bustype_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) "LMCX_DIMMX_DDR4_PARAMS0"
+#define device_bar_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) (a)
+#define arguments_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_dimm#_ddr4_params1
+ *
+ * LMC DIMM Parameters Registers 1
+ * This register contains values to be programmed into the extra DDR4 control words in the
+ * corresponding (registered) DIMM. These are control words RC9x through RCBx.
+ */
+union bdk_lmcx_dimmx_ddr4_params1
+{
+ uint64_t u;
+ struct bdk_lmcx_dimmx_ddr4_params1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t rcbx : 8; /**< [ 23: 16](R/W) RCBx. */
+ uint64_t rcax : 8; /**< [ 15: 8](R/W) RCAx. */
+ uint64_t rc9x : 8; /**< [ 7: 0](R/W) RC9x. */
+#else /* Word 0 - Little Endian */
+ uint64_t rc9x : 8; /**< [ 7: 0](R/W) RC9x. */
+ uint64_t rcax : 8; /**< [ 15: 8](R/W) RCAx. */
+ uint64_t rcbx : 8; /**< [ 23: 16](R/W) RCBx. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_dimmx_ddr4_params1_s cn; */
+};
+typedef union bdk_lmcx_dimmx_ddr4_params1 bdk_lmcx_dimmx_ddr4_params1_t;
+
+static inline uint64_t BDK_LMCX_DIMMX_DDR4_PARAMS1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DIMMX_DDR4_PARAMS1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e088000140ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e088000140ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e088000140ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e088000140ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("LMCX_DIMMX_DDR4_PARAMS1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) bdk_lmcx_dimmx_ddr4_params1_t
+#define bustype_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) "LMCX_DIMMX_DDR4_PARAMS1"
+#define device_bar_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) (a)
+#define arguments_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_dimm#_params
+ *
+ * LMC DIMM Parameters Register
+ * This register contains values to be programmed into each control word in the corresponding
+ * (registered) DIMM. The control words allow optimization of the device properties for different
+ * raw card designs. Note that LMC only uses this CSR when LMC()_CONTROL[RDIMM_ENA]=1. During
+ * a power-up/init sequence, LMC writes these fields into the control words in the JEDEC standard
+ * DDR4 registering clock driver when the corresponding LMC()_DIMM_CTL[DIMM*_WMASK] bits are set.
+ */
+union bdk_lmcx_dimmx_params
+{
+ uint64_t u;
+ struct bdk_lmcx_dimmx_params_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rc15 : 4; /**< [ 63: 60](R/W) RC15, Reserved. */
+ uint64_t rc14 : 4; /**< [ 59: 56](R/W) RC14, Reserved. */
+ uint64_t rc13 : 4; /**< [ 55: 52](R/W) RC13, Reserved. */
+ uint64_t rc12 : 4; /**< [ 51: 48](R/W) RC12, Reserved. */
+ uint64_t rc11 : 4; /**< [ 47: 44](R/W) RC11, Encoding for RDIMM operating VDD. */
+ uint64_t rc10 : 4; /**< [ 43: 40](R/W) RC10, Encoding for RDIMM operating speed. */
+ uint64_t rc9 : 4; /**< [ 39: 36](R/W) RC9, Power savings settings control word. */
+ uint64_t rc8 : 4; /**< [ 35: 32](R/W) RC8, Additional IBT settings control word. */
+ uint64_t rc7 : 4; /**< [ 31: 28](R/W) RC7, Reserved. */
+ uint64_t rc6 : 4; /**< [ 27: 24](R/W) RC6, Reserved. */
+ uint64_t rc5 : 4; /**< [ 23: 20](R/W) RC5, CK driver characteristics control word. */
+ uint64_t rc4 : 4; /**< [ 19: 16](R/W) RC4, Control signals driver characteristics control word. */
+ uint64_t rc3 : 4; /**< [ 15: 12](R/W) RC3, CA signals driver characteristics control word. */
+ uint64_t rc2 : 4; /**< [ 11: 8](R/W) RC2, Timing control word. */
+ uint64_t rc1 : 4; /**< [ 7: 4](R/W) RC1, Clock driver enable control word. */
+ uint64_t rc0 : 4; /**< [ 3: 0](R/W) RC0, Global features control word. */
+#else /* Word 0 - Little Endian */
+ uint64_t rc0 : 4; /**< [ 3: 0](R/W) RC0, Global features control word. */
+ uint64_t rc1 : 4; /**< [ 7: 4](R/W) RC1, Clock driver enable control word. */
+ uint64_t rc2 : 4; /**< [ 11: 8](R/W) RC2, Timing control word. */
+ uint64_t rc3 : 4; /**< [ 15: 12](R/W) RC3, CA signals driver characteristics control word. */
+ uint64_t rc4 : 4; /**< [ 19: 16](R/W) RC4, Control signals driver characteristics control word. */
+ uint64_t rc5 : 4; /**< [ 23: 20](R/W) RC5, CK driver characteristics control word. */
+ uint64_t rc6 : 4; /**< [ 27: 24](R/W) RC6, Reserved. */
+ uint64_t rc7 : 4; /**< [ 31: 28](R/W) RC7, Reserved. */
+ uint64_t rc8 : 4; /**< [ 35: 32](R/W) RC8, Additional IBT settings control word. */
+ uint64_t rc9 : 4; /**< [ 39: 36](R/W) RC9, Power savings settings control word. */
+ uint64_t rc10 : 4; /**< [ 43: 40](R/W) RC10, Encoding for RDIMM operating speed. */
+ uint64_t rc11 : 4; /**< [ 47: 44](R/W) RC11, Encoding for RDIMM operating VDD. */
+ uint64_t rc12 : 4; /**< [ 51: 48](R/W) RC12, Reserved. */
+ uint64_t rc13 : 4; /**< [ 55: 52](R/W) RC13, Reserved. */
+ uint64_t rc14 : 4; /**< [ 59: 56](R/W) RC14, Reserved. */
+ uint64_t rc15 : 4; /**< [ 63: 60](R/W) RC15, Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_dimmx_params_s cn; */
+};
+typedef union bdk_lmcx_dimmx_params bdk_lmcx_dimmx_params_t;
+
+static inline uint64_t BDK_LMCX_DIMMX_PARAMS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DIMMX_PARAMS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e088000270ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e088000270ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e088000270ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e088000270ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("LMCX_DIMMX_PARAMS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DIMMX_PARAMS(a,b) bdk_lmcx_dimmx_params_t
+#define bustype_BDK_LMCX_DIMMX_PARAMS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DIMMX_PARAMS(a,b) "LMCX_DIMMX_PARAMS"
+#define device_bar_BDK_LMCX_DIMMX_PARAMS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DIMMX_PARAMS(a,b) (a)
+#define arguments_BDK_LMCX_DIMMX_PARAMS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_dimm_ctl
+ *
+ * LMC DIMM Control Register
+ * Note that this CSR is only used when LMC()_CONTROL[RDIMM_ENA] = 1 or
+ * LMC()_CONFIG[LRDIMM_ENA] = 1. During a power-up/init sequence, this CSR controls
+ * LMC's write operations to the control words in the JEDEC standard DDR4 registering
+ * clock driver.
+ */
+union bdk_lmcx_dimm_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_dimm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_45_63 : 19;
+ uint64_t tcws : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
+ power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
+ (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
+ TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
+ RC10.DA4, RC11.DA3, and RC11.DA4)
+ TYP = 0x8, otherwise
+ 0x0 = Reserved. */
+ uint64_t dimm1_wmask : 16; /**< [ 31: 16](R/W) Reserved.
+ Internal:
+ DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t dimm0_wmask : 16; /**< [ 15: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+#else /* Word 0 - Little Endian */
+ uint64_t dimm0_wmask : 16; /**< [ 15: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+ uint64_t dimm1_wmask : 16; /**< [ 31: 16](R/W) Reserved.
+ Internal:
+ DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t tcws : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
+ power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
+ (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
+ TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
+ RC10.DA4, RC11.DA3, and RC11.DA4)
+ TYP = 0x8, otherwise
+ 0x0 = Reserved. */
+ uint64_t reserved_45_63 : 19;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_dimm_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t cke_assert : 1; /**< [ 45: 45](R/W) CKE assertion.
+ 0 = LMC does not change the current state of the CKE pin during
+ RCD_INIT. Note that clearing this field to 0 before running RCD_INIT is
+ necessary when initiating control gear-down mode on the RCD.
+ 1 = LMC will drive CKE output HIGH at the beginning of RCD_INIT sequence. */
+ uint64_t tcws : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
+ power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
+ (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
+ TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
+ RC10.DA4, RC11.DA3, and RC11.DA4)
+ TYP = 0x8, otherwise
+ 0x0 = Reserved. */
+ uint64_t dimm1_wmask : 16; /**< [ 31: 16](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t dimm0_wmask : 16; /**< [ 15: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+#else /* Word 0 - Little Endian */
+ uint64_t dimm0_wmask : 16; /**< [ 15: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+ uint64_t dimm1_wmask : 16; /**< [ 31: 16](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t tcws : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
+ power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
+ (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
+ TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
+ RC10.DA4, RC11.DA3, and RC11.DA4)
+ TYP = 0x8, otherwise
+ 0x0 = Reserved. */
+ uint64_t cke_assert : 1; /**< [ 45: 45](R/W) CKE assertion.
+ 0 = LMC does not change the current state of the CKE pin during
+ RCD_INIT. Note that clearing this field to 0 before running RCD_INIT is
+ necessary when initiating control gear-down mode on the RCD.
+ 1 = LMC will drive CKE output HIGH at the beginning of RCD_INIT sequence. */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_dimm_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t parity : 1; /**< [ 45: 45](R/W) "Parity. The Par_In input of a registered DIMM should be tied off. LMC adjusts the value
+ of the DDR_WE_L (DWE#) pin during DDR3 register part control word writes to ensure the
+ parity is observed correctly by the receiving DDR3 SSTE32882 or DDR4 DDR4RCD01 register
+ part. When Par_In is grounded, PARITY should be cleared to 0." */
+ uint64_t tcws : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
+ power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
+ (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
+ TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
+ RC10.DA4, RC11.DA3, and RC11.DA4)
+ TYP = 0x8, otherwise
+ 0x0 = Reserved. */
+ uint64_t dimm1_wmask : 16; /**< [ 31: 16](R/W) Reserved.
+ Internal:
+ DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t dimm0_wmask : 16; /**< [ 15: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+#else /* Word 0 - Little Endian */
+ uint64_t dimm0_wmask : 16; /**< [ 15: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+ uint64_t dimm1_wmask : 16; /**< [ 31: 16](R/W) Reserved.
+ Internal:
+ DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t tcws : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
+ power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
+ (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
+ TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
+ RC10.DA4, RC11.DA3, and RC11.DA4)
+ TYP = 0x8, otherwise
+ 0x0 = Reserved. */
+ uint64_t parity : 1; /**< [ 45: 45](R/W) "Parity. The Par_In input of a registered DIMM should be tied off. LMC adjusts the value
+ of the DDR_WE_L (DWE#) pin during DDR3 register part control word writes to ensure the
+ parity is observed correctly by the receiving DDR3 SSTE32882 or DDR4 DDR4RCD01 register
+ part. When Par_In is grounded, PARITY should be cleared to 0." */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_lmcx_dimm_ctl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t parity : 1; /**< [ 45: 45](R/W) "Parity. The Par_In input of a registered DIMM should be tied off. LMC adjusts the value
+ of the DDR_WE_L (DWE#) pin during DDR3 register part control word writes to ensure the
+ parity is observed correctly by the receiving DDR3 SSTE32882 or DDR4 DDR4RCD01 register
+ part. When Par_In is grounded, PARITY should be cleared to 0." */
+ uint64_t tcws : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
+ power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
+ (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
+ TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
+ RC10.DA4, RC11.DA3, and RC11.DA4)
+ TYP = 0x8, otherwise
+ 0x0 = Reserved. */
+ uint64_t dimm1_wmask : 16; /**< [ 31: 16](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t dimm0_wmask : 16; /**< [ 15: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+#else /* Word 0 - Little Endian */
+ uint64_t dimm0_wmask : 16; /**< [ 15: 0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
+ uint64_t dimm1_wmask : 16; /**< [ 31: 16](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
+ uint64_t tcws : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
+ power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
+ Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
+ (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
+ TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
+ RC10.DA4, RC11.DA3, and RC11.DA4)
+ TYP = 0x8, otherwise
+ 0x0 = Reserved. */
+ uint64_t parity : 1; /**< [ 45: 45](R/W) "Parity. The Par_In input of a registered DIMM should be tied off. LMC adjusts the value
+ of the DDR_WE_L (DWE#) pin during DDR3 register part control word writes to ensure the
+ parity is observed correctly by the receiving DDR3 SSTE32882 or DDR4 DDR4RCD01 register
+ part. When Par_In is grounded, PARITY should be cleared to 0." */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_lmcx_dimm_ctl_cn88xx cn83xx; */
+};
+typedef union bdk_lmcx_dimm_ctl bdk_lmcx_dimm_ctl_t;
+
+static inline uint64_t BDK_LMCX_DIMM_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DIMM_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000310ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000310ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000310ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000310ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_DIMM_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DIMM_CTL(a) bdk_lmcx_dimm_ctl_t
+#define bustype_BDK_LMCX_DIMM_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DIMM_CTL(a) "LMCX_DIMM_CTL"
+#define device_bar_BDK_LMCX_DIMM_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DIMM_CTL(a) (a)
+#define arguments_BDK_LMCX_DIMM_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_dll_ctl2
+ *
+ * LMC DLL Control/System-Memory-Clock-Reset Register
+ * See LMC initialization sequence for the initialization sequence.
+ * Internal:
+ * DLL Bringup sequence:
+ *
+ * 1. If not done already, set LMC()_DLL_CTL2 = 0, except when LMC()_DLL_CTL2[DRESET] = 1.
+ *
+ * 2. Write one to LMC()_DLL_CTL2[DLL_BRINGUP].
+ *
+ * 3. Wait for 10 CK cycles, then write one to LMC()_DLL_CTL2[QUAD_DLL_ENA]. It may not be
+ * feasible
+ * to count 10 CK cycles, but the idea is to configure the delay line into DLL mode by asserting
+ * LMC()_DLL_CTL2[DLL_BRINGUP] earlier than [QUAD_DLL_ENA], even if it is one cycle early.
+ * LMC()_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the LMC
+ * and/or
+ * DRESET initialization sequence.
+ *
+ * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it
+ * called in o63. It is still ok to go without step 4, since step 5 has enough time).
+ *
+ * 5. Wait 10 us.
+ *
+ * 6. Write zero to LMC()_DLL_CTL2[DLL_BRINGUP]. LMC()_DLL_CTL2[DLL_BRINGUP] must not change.
+ * after
+ * this point without restarting the LMC and/or DRESET initialization sequence.
+ *
+ * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some
+ * time before going to step 8, even it is one cycle is fine).
+ *
+ * 8. Write zero to LMC()_DLL_CTL2[DRESET]. LMC()_DLL_CTL2[DRESET] must not change after this
+ * point
+ * without restarting the LMC and/or DRESET initialization sequence.
+ *
+ * 9. Wait for LMC()_DLL_CTL2[DRESET_DLY] amount to ensure clocks were turned on and reset deasserted.
+ */
+union bdk_lmcx_dll_ctl2
+{
+ uint64_t u;
+ struct bdk_lmcx_dll_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t dreset_cclk_dis : 1; /**< [ 23: 23](R/W) Force conditional dclk on during reset. */
+ uint64_t dreset_dly : 6; /**< [ 22: 17](R/W) Wait time for DRESET propagation in dclk cycles. When DRESET is cleared
+ this value is counted down to allow clocks to turn on and capture reset state.
+ Once the counter expires, reset is deasserted. Setting this field to 0 will
+ default to 50 dclk cycles. */
+ uint64_t intf_en : 1; /**< [ 16: 16](R/W) Interface enable. */
+ uint64_t dll_bringup : 1; /**< [ 15: 15](R/W) DLL bring up. */
+ uint64_t dreset : 1; /**< [ 14: 14](R/W) System-memory-clock domain reset. The reset signal that is used by the system-memory-clock
+ domain is
+ (DRESET -OR- core-clock reset). */
+ uint64_t quad_dll_ena : 1; /**< [ 13: 13](R/W) DLL enable. */
+ uint64_t byp_sel : 4; /**< [ 12: 9](R/W) Reserved; must be zero.
+ Internal:
+ Bypass select.
+ 0x0 = no byte.
+ 0x1 = byte 0.
+ ...
+ 0x9 = byte 8.
+ 0xA = all bytes.
+ 0xB-0xF = Reserved. */
+ uint64_t byp_setting : 9; /**< [ 8: 0](R/W) Reserved; must be zero.
+ Internal:
+ Bypass setting.
+ DDR3-1600: 0x22.
+ DDR3-1333: 0x32.
+ DDR3-1066: 0x4B.
+ DDR3-800: 0x75.
+ DDR3-667: 0x96.
+ DDR3-600: 0xAC. */
+#else /* Word 0 - Little Endian */
+ uint64_t byp_setting : 9; /**< [ 8: 0](R/W) Reserved; must be zero.
+ Internal:
+ Bypass setting.
+ DDR3-1600: 0x22.
+ DDR3-1333: 0x32.
+ DDR3-1066: 0x4B.
+ DDR3-800: 0x75.
+ DDR3-667: 0x96.
+ DDR3-600: 0xAC. */
+ uint64_t byp_sel : 4; /**< [ 12: 9](R/W) Reserved; must be zero.
+ Internal:
+ Bypass select.
+ 0x0 = no byte.
+ 0x1 = byte 0.
+ ...
+ 0x9 = byte 8.
+ 0xA = all bytes.
+ 0xB-0xF = Reserved. */
+ uint64_t quad_dll_ena : 1; /**< [ 13: 13](R/W) DLL enable. */
+ uint64_t dreset : 1; /**< [ 14: 14](R/W) System-memory-clock domain reset. The reset signal that is used by the system-memory-clock
+ domain is
+ (DRESET -OR- core-clock reset). */
+ uint64_t dll_bringup : 1; /**< [ 15: 15](R/W) DLL bring up. */
+ uint64_t intf_en : 1; /**< [ 16: 16](R/W) Interface enable. */
+ uint64_t dreset_dly : 6; /**< [ 22: 17](R/W) Wait time for DRESET propagation in dclk cycles. When DRESET is cleared
+ this value is counted down to allow clocks to turn on and capture reset state.
+ Once the counter expires, reset is deasserted. Setting this field to 0 will
+ default to 50 dclk cycles. */
+ uint64_t dreset_cclk_dis : 1; /**< [ 23: 23](R/W) Force conditional dclk on during reset. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_dll_ctl2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ uint64_t intf_en : 1; /**< [ 16: 16](R/W) Interface enable. */
+ uint64_t dll_bringup : 1; /**< [ 15: 15](R/W) DLL bring up. */
+ uint64_t dreset : 1; /**< [ 14: 14](R/W) System-memory-clock domain reset. The reset signal that is used by the system-memory-clock
+ domain is
+ (DRESET -OR- core-clock reset). */
+ uint64_t quad_dll_ena : 1; /**< [ 13: 13](R/W) DLL enable. */
+ uint64_t byp_sel : 4; /**< [ 12: 9](R/W) Reserved; must be zero.
+ Internal:
+ Bypass select.
+ 0x0 = no byte.
+ 0x1 = byte 0.
+ ...
+ 0x9 = byte 8.
+ 0xA = all bytes.
+ 0xB-0xF = Reserved. */
+ uint64_t byp_setting : 9; /**< [ 8: 0](R/W) Reserved; must be zero.
+ Internal:
+ Bypass setting.
+ DDR3-1600: 0x22.
+ DDR3-1333: 0x32.
+ DDR3-1066: 0x4B.
+ DDR3-800: 0x75.
+ DDR3-667: 0x96.
+ DDR3-600: 0xAC. */
+#else /* Word 0 - Little Endian */
+ uint64_t byp_setting : 9; /**< [ 8: 0](R/W) Reserved; must be zero.
+ Internal:
+ Bypass setting.
+ DDR3-1600: 0x22.
+ DDR3-1333: 0x32.
+ DDR3-1066: 0x4B.
+ DDR3-800: 0x75.
+ DDR3-667: 0x96.
+ DDR3-600: 0xAC. */
+ uint64_t byp_sel : 4; /**< [ 12: 9](R/W) Reserved; must be zero.
+ Internal:
+ Bypass select.
+ 0x0 = no byte.
+ 0x1 = byte 0.
+ ...
+ 0x9 = byte 8.
+ 0xA = all bytes.
+ 0xB-0xF = Reserved. */
+ uint64_t quad_dll_ena : 1; /**< [ 13: 13](R/W) DLL enable. */
+ uint64_t dreset : 1; /**< [ 14: 14](R/W) System-memory-clock domain reset. The reset signal that is used by the system-memory-clock
+ domain is
+ (DRESET -OR- core-clock reset). */
+ uint64_t dll_bringup : 1; /**< [ 15: 15](R/W) DLL bring up. */
+ uint64_t intf_en : 1; /**< [ 16: 16](R/W) Interface enable. */
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_dll_ctl2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t dreset_cclk_dis : 1; /**< [ 23: 23](R/W) Force conditional dclk on during reset. */
+ uint64_t dreset_dly : 6; /**< [ 22: 17](R/W) Wait time for DRESET propagation in dclk cycles. When DRESET is cleared
+ this value is counted down to allow clocks to turn on and capture reset state.
+ Once the counter expires, reset is deasserted. Setting this field to 0 will
+ default to 50 dclk cycles. */
+ uint64_t intf_en : 1; /**< [ 16: 16](R/W) Interface enable. */
+ uint64_t dll_bringup : 1; /**< [ 15: 15](R/W) DLL bring up. */
+ uint64_t dreset : 1; /**< [ 14: 14](R/W) System memory clock domain reset. The reset signal that is used by the
+ system memory clock domain is (DRESET -OR- core-clock reset). */
+ uint64_t quad_dll_ena : 1; /**< [ 13: 13](R/W) DLL enable. */
+ uint64_t byp_sel : 4; /**< [ 12: 9](R/W) Reserved; must be zero.
+ Internal:
+ Bypass select.
+ 0x0 = no byte.
+ 0x1 = byte 0.
+ ...
+ 0x9 = byte 8.
+ 0xA = all bytes.
+ 0xB-0xF = Reserved. */
+ uint64_t byp_setting : 9; /**< [ 8: 0](R/W) Reserved; must be zero.
+ Internal:
+ Bypass setting.
+ DDR3-1600: 0x22.
+ DDR3-1333: 0x32.
+ DDR3-1066: 0x4B.
+ DDR3-800: 0x75.
+ DDR3-667: 0x96.
+ DDR3-600: 0xAC. */
+#else /* Word 0 - Little Endian */
+ uint64_t byp_setting : 9; /**< [ 8: 0](R/W) Reserved; must be zero.
+ Internal:
+ Bypass setting.
+ DDR3-1600: 0x22.
+ DDR3-1333: 0x32.
+ DDR3-1066: 0x4B.
+ DDR3-800: 0x75.
+ DDR3-667: 0x96.
+ DDR3-600: 0xAC. */
+ uint64_t byp_sel : 4; /**< [ 12: 9](R/W) Reserved; must be zero.
+ Internal:
+ Bypass select.
+ 0x0 = no byte.
+ 0x1 = byte 0.
+ ...
+ 0x9 = byte 8.
+ 0xA = all bytes.
+ 0xB-0xF = Reserved. */
+ uint64_t quad_dll_ena : 1; /**< [ 13: 13](R/W) DLL enable. */
+ uint64_t dreset : 1; /**< [ 14: 14](R/W) System memory clock domain reset. The reset signal that is used by the
+ system memory clock domain is (DRESET -OR- core-clock reset). */
+ uint64_t dll_bringup : 1; /**< [ 15: 15](R/W) DLL bring up. */
+ uint64_t intf_en : 1; /**< [ 16: 16](R/W) Interface enable. */
+ uint64_t dreset_dly : 6; /**< [ 22: 17](R/W) Wait time for DRESET propagation in dclk cycles. When DRESET is cleared
+ this value is counted down to allow clocks to turn on and capture reset state.
+ Once the counter expires, reset is deasserted. Setting this field to 0 will
+ default to 50 dclk cycles. */
+ uint64_t dreset_cclk_dis : 1; /**< [ 23: 23](R/W) Force conditional dclk on during reset. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_dll_ctl2 bdk_lmcx_dll_ctl2_t;
+
+static inline uint64_t BDK_LMCX_DLL_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DLL_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001c8ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001c8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001c8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001c8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_DLL_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DLL_CTL2(a) bdk_lmcx_dll_ctl2_t
+#define bustype_BDK_LMCX_DLL_CTL2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DLL_CTL2(a) "LMCX_DLL_CTL2"
+#define device_bar_BDK_LMCX_DLL_CTL2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DLL_CTL2(a) (a)
+#define arguments_BDK_LMCX_DLL_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_dll_ctl3
+ *
+ * LMC DLL Control/System-Memory-Clock Reset Register
+ */
+union bdk_lmcx_dll_ctl3
+{
+ uint64_t u;
+ struct bdk_lmcx_dll_ctl3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t rd_deskew_mem_sel_dis : 1; /**< [ 61: 61](R/W) By default, LMC always selects per-rank deskew settings that are stored inside
+ PHY. Set this field to one to manually disable this feature and that the common
+ deskew setting inside the PHY's state machine will get selected instead. */
+ uint64_t wr_deskew_mem_sel : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ Only relevant when [WR_DESKEW_ENA] is set.
+ 0 = Selects the common deskew settings stored in each DQ bit. All writes to any package
+ rank uses this common settings to deskew the data bits.
+ 1 = Selects the stored per-package rank deskew settings. Write to a particular
+ package rank uses the corresponding stored setting for that rank. */
+ uint64_t wr_deskew_mem_ld : 1; /**< [ 59: 59](WO) Reserved.
+ Internal:
+ When set, all DQ bit deskew settings in DDR PHY are all loaded into their corresponding
+ rank deskew storage. The rank is chosen by the CSR LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a
+ oneshot operation and clears itself each time it is set. Note this has to be done during
+ the bringup state where there isn't yet any traffic to DRAM. */
+ uint64_t reserved_50_58 : 9;
+ uint64_t wr_deskew_ena : 1; /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
+ uint64_t wr_deskew_ld : 1; /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
+ the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
+ for write bit deskew. This is a oneshot and clears itself each time
+ it is set. */
+ uint64_t bit_select : 4; /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
+ 0x8 = Selects dbi for write deskew setting assignment.
+ 0x9 = No-op.
+ 0xA = Reuse deskew setting on.
+ 0xB = Reuse deskew setting off.
+ 0xC = Vref bypass setting load.
+ 0xD = Vref bypass on.
+ 0xE = Vref bypass off.
+ 0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
+ Also sets Vref bypass to off and deskew reuse setting to off. */
+ uint64_t dclk90_fwd : 1; /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
+ Locak Initialization step for the LMC bring-up sequence.
+
+ Internal:
+ Generate a one cycle pulse to forward setting. This is a oneshot and clears
+ itself each time it is set. */
+ uint64_t ddr_90_dly_byp : 1; /**< [ 42: 42](R/W) Reserved; must be zero.
+ Internal:
+ Bypass DDR90_DLY in clock tree. */
+ uint64_t dclk90_recal_dis : 1; /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
+ uint64_t dclk90_byp_sel : 1; /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
+ uint64_t dclk90_byp_setting : 9; /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
+ uint64_t dll_fast : 1; /**< [ 30: 30](RO/H) Reserved; must be zero.
+ Internal:
+ DLL lock, 0=DLL locked. */
+ uint64_t dll90_setting : 9; /**< [ 29: 21](RO/H) Reserved; must be zero.
+ Internal:
+ Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
+ uint64_t fine_tune_mode : 1; /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll_mode : 1; /**< [ 19: 19](R/W) Reserved; must be zero.
+ Internal:
+ DLL mode. */
+ uint64_t dll90_byte_sel : 4; /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
+ 0x0 = byte 0.
+ 0x1 = byte 1.
+ ...
+ 0x8: byte 8.
+ 0x9-0xF: reserved. */
+ uint64_t offset_ena : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Offset enable. 1=enable. */
+ uint64_t load_offset : 1; /**< [ 13: 13](WO) Reserved; must be zero.
+ Internal:
+ Load offset. 0=disable, 1=generate a one cycle pulse to
+ the PHY. This field is a oneshot and clears itself each time it is set. */
+ uint64_t mode_sel : 2; /**< [ 12: 11](R/W) Reserved; must be zero.
+ Internal:
+ Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
+ write and read. */
+ uint64_t byte_sel : 4; /**< [ 10: 7](R/W) Reserved; must be zero.
+ Internal:
+ Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
+ byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t byte_sel : 4; /**< [ 10: 7](R/W) Reserved; must be zero.
+ Internal:
+ Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
+ byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
+ uint64_t mode_sel : 2; /**< [ 12: 11](R/W) Reserved; must be zero.
+ Internal:
+ Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
+ write and read. */
+ uint64_t load_offset : 1; /**< [ 13: 13](WO) Reserved; must be zero.
+ Internal:
+ Load offset. 0=disable, 1=generate a one cycle pulse to
+ the PHY. This field is a oneshot and clears itself each time it is set. */
+ uint64_t offset_ena : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Offset enable. 1=enable. */
+ uint64_t dll90_byte_sel : 4; /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
+ 0x0 = byte 0.
+ 0x1 = byte 1.
+ ...
+ 0x8: byte 8.
+ 0x9-0xF: reserved. */
+ uint64_t dll_mode : 1; /**< [ 19: 19](R/W) Reserved; must be zero.
+ Internal:
+ DLL mode. */
+ uint64_t fine_tune_mode : 1; /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll90_setting : 9; /**< [ 29: 21](RO/H) Reserved; must be zero.
+ Internal:
+ Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
+ uint64_t dll_fast : 1; /**< [ 30: 30](RO/H) Reserved; must be zero.
+ Internal:
+ DLL lock, 0=DLL locked. */
+ uint64_t dclk90_byp_setting : 9; /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
+ uint64_t dclk90_byp_sel : 1; /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
+ uint64_t dclk90_recal_dis : 1; /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
+ uint64_t ddr_90_dly_byp : 1; /**< [ 42: 42](R/W) Reserved; must be zero.
+ Internal:
+ Bypass DDR90_DLY in clock tree. */
+ uint64_t dclk90_fwd : 1; /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
+ Locak Initialization step for the LMC bring-up sequence.
+
+ Internal:
+ Generate a one cycle pulse to forward setting. This is a oneshot and clears
+ itself each time it is set. */
+ uint64_t bit_select : 4; /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
+ 0x8 = Selects dbi for write deskew setting assignment.
+ 0x9 = No-op.
+ 0xA = Reuse deskew setting on.
+ 0xB = Reuse deskew setting off.
+ 0xC = Vref bypass setting load.
+ 0xD = Vref bypass on.
+ 0xE = Vref bypass off.
+ 0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
+ Also sets Vref bypass to off and deskew reuse setting to off. */
+ uint64_t wr_deskew_ld : 1; /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
+ the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
+ for write bit deskew. This is a oneshot and clears itself each time
+ it is set. */
+ uint64_t wr_deskew_ena : 1; /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
+ uint64_t reserved_50_58 : 9;
+ uint64_t wr_deskew_mem_ld : 1; /**< [ 59: 59](WO) Reserved.
+ Internal:
+ When set, all DQ bit deskew settings in DDR PHY are all loaded into their corresponding
+ rank deskew storage. The rank is chosen by the CSR LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a
+ oneshot operation and clears itself each time it is set. Note this has to be done during
+ the bringup state where there isn't yet any traffic to DRAM. */
+ uint64_t wr_deskew_mem_sel : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ Only relevant when [WR_DESKEW_ENA] is set.
+ 0 = Selects the common deskew settings stored in each DQ bit. All writes to any package
+ rank uses this common settings to deskew the data bits.
+ 1 = Selects the stored per-package rank deskew settings. Write to a particular
+ package rank uses the corresponding stored setting for that rank. */
+ uint64_t rd_deskew_mem_sel_dis : 1; /**< [ 61: 61](R/W) By default, LMC always selects per-rank deskew settings that are stored inside
+ PHY. Set this field to one to manually disable this feature and that the common
+ deskew setting inside the PHY's state machine will get selected instead. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_dll_ctl3_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t wr_deskew_ena : 1; /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
+ uint64_t wr_deskew_ld : 1; /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
+ the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
+ for write bit deskew. This is a oneshot and clears itself each time
+ it is set. */
+ uint64_t bit_select : 4; /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit0-bit8 for write deskew setting assignment.
+ 0x8 = Selects dbi for write deskew setting assignment.
+ 0x9 = No-op.
+ 0xA = Reuse deskew setting on.
+ 0xB = Reuse deskew setting off.
+ 0xC-0xE = Reserved
+ 0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
+ Also sets Vref bypass to off and deskew reuse setting to off. */
+ uint64_t dclk90_fwd : 1; /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
+ Locak Initialization step for the LMC bring-up sequence.
+
+ Internal:
+ Generate a one cycle pulse to forward setting. This is a oneshot and clears
+ itself each time it is set. */
+ uint64_t ddr_90_dly_byp : 1; /**< [ 42: 42](R/W) Reserved; must be zero.
+ Internal:
+ Bypass DDR90_DLY in clock tree. */
+ uint64_t dclk90_recal_dis : 1; /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
+ uint64_t dclk90_byp_sel : 1; /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
+ uint64_t dclk90_byp_setting : 9; /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
+ uint64_t dll_fast : 1; /**< [ 30: 30](RO/H) Reserved; must be zero.
+ Internal:
+ DLL lock, 0=DLL locked. */
+ uint64_t dll90_setting : 9; /**< [ 29: 21](RO/H) Reserved; must be zero.
+ Internal:
+ Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
+ uint64_t fine_tune_mode : 1; /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll_mode : 1; /**< [ 19: 19](R/W) Reserved; must be zero.
+ Internal:
+ DLL mode. */
+ uint64_t dll90_byte_sel : 4; /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
+ 0x0 = byte 0.
+ 0x1 = byte 1.
+ ...
+ 0x8: byte 8.
+ 0x9-0xF: reserved. */
+ uint64_t offset_ena : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Offset enable. 1=enable. */
+ uint64_t load_offset : 1; /**< [ 13: 13](WO) Reserved; must be zero.
+ Internal:
+ Load offset. 0=disable, 1=generate a one cycle pulse to
+ the PHY. This field is a oneshot and clears itself each time it is set. */
+ uint64_t mode_sel : 2; /**< [ 12: 11](R/W) Reserved; must be zero.
+ Internal:
+ Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
+ write and read. */
+ uint64_t byte_sel : 4; /**< [ 10: 7](R/W) Reserved; must be zero.
+ Internal:
+ Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
+ byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
+ uint64_t offset : 7; /**< [ 6: 0](R/W) Reserved; must be zero.
+ Internal:
+ Write/read offset setting. \<5:0\>: offset (not
+ two's-complement), \<5\>: 0 = increment, 1 = decrement. */
+#else /* Word 0 - Little Endian */
+ uint64_t offset : 7; /**< [ 6: 0](R/W) Reserved; must be zero.
+ Internal:
+ Write/read offset setting. \<5:0\>: offset (not
+ two's-complement), \<5\>: 0 = increment, 1 = decrement. */
+ uint64_t byte_sel : 4; /**< [ 10: 7](R/W) Reserved; must be zero.
+ Internal:
+ Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
+ byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
+ uint64_t mode_sel : 2; /**< [ 12: 11](R/W) Reserved; must be zero.
+ Internal:
+ Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
+ write and read. */
+ uint64_t load_offset : 1; /**< [ 13: 13](WO) Reserved; must be zero.
+ Internal:
+ Load offset. 0=disable, 1=generate a one cycle pulse to
+ the PHY. This field is a oneshot and clears itself each time it is set. */
+ uint64_t offset_ena : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Offset enable. 1=enable. */
+ uint64_t dll90_byte_sel : 4; /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
+ 0x0 = byte 0.
+ 0x1 = byte 1.
+ ...
+ 0x8: byte 8.
+ 0x9-0xF: reserved. */
+ uint64_t dll_mode : 1; /**< [ 19: 19](R/W) Reserved; must be zero.
+ Internal:
+ DLL mode. */
+ uint64_t fine_tune_mode : 1; /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll90_setting : 9; /**< [ 29: 21](RO/H) Reserved; must be zero.
+ Internal:
+ Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
+ uint64_t dll_fast : 1; /**< [ 30: 30](RO/H) Reserved; must be zero.
+ Internal:
+ DLL lock, 0=DLL locked. */
+ uint64_t dclk90_byp_setting : 9; /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
+ uint64_t dclk90_byp_sel : 1; /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
+ uint64_t dclk90_recal_dis : 1; /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
+ uint64_t ddr_90_dly_byp : 1; /**< [ 42: 42](R/W) Reserved; must be zero.
+ Internal:
+ Bypass DDR90_DLY in clock tree. */
+ uint64_t dclk90_fwd : 1; /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
+ Locak Initialization step for the LMC bring-up sequence.
+
+ Internal:
+ Generate a one cycle pulse to forward setting. This is a oneshot and clears
+ itself each time it is set. */
+ uint64_t bit_select : 4; /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit0-bit8 for write deskew setting assignment.
+ 0x8 = Selects dbi for write deskew setting assignment.
+ 0x9 = No-op.
+ 0xA = Reuse deskew setting on.
+ 0xB = Reuse deskew setting off.
+ 0xC-0xE = Reserved
+ 0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
+ Also sets Vref bypass to off and deskew reuse setting to off. */
+ uint64_t wr_deskew_ld : 1; /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
+ the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
+ for write bit deskew. This is a oneshot and clears itself each time
+ it is set. */
+ uint64_t wr_deskew_ena : 1; /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_dll_ctl3_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t rd_deskew_mem_sel_dis : 1; /**< [ 61: 61](R/W) By default, LMC always selects per-rank deskew settings that are stored inside
+ PHY. Set this field to one to manually disable this feature and that the common
+ deskew setting inside the PHY's state machine will get selected instead. */
+ uint64_t wr_deskew_mem_sel : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ Only relevant when [WR_DESKEW_ENA] is set.
+ 0 = Selects the common deskew settings stored in each DQ bit. All writes to any package
+ rank uses this common settings to deskew the data bits.
+ 1 = Selects the stored per-package rank deskew settings. Write to a particular
+ package rank uses the corresponding stored setting for that rank. */
+ uint64_t wr_deskew_mem_ld : 1; /**< [ 59: 59](WO) Reserved.
+ Internal:
+ When set, all DQ bit deskew settings in DDR PHY are all loaded into their corresponding
+ rank deskew storage. The rank is chosen by the CSR LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a
+ oneshot operation and clears itself each time it is set. Note this has to be done during
+ the bringup state where there isn't yet any traffic to DRAM. */
+ uint64_t offset : 9; /**< [ 58: 50](R/W) Reserved; must be zero.
+ Internal:
+ Write/read offset setting. \<8:0\>: offset (not
+ two's-complement), \<8\>: 0 = increment, 1 = decrement. */
+ uint64_t wr_deskew_ena : 1; /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
+ uint64_t wr_deskew_ld : 1; /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
+ the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
+ for write bit deskew. This is a oneshot and clears itself each time
+ it is set. */
+ uint64_t bit_select : 4; /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
+ 0x8 = Selects dbi for write deskew setting assignment.
+ 0x9 = No-op.
+ 0xA = Reuse deskew setting on.
+ 0xB = Reuse deskew setting off.
+ 0xC = Vref bypass setting load.
+ 0xD = Vref bypass on.
+ 0xE = Vref bypass off.
+ 0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
+ Also sets Vref bypass to off and deskew reuse setting to off. */
+ uint64_t reserved_31_43 : 13;
+ uint64_t dll_fast : 1; /**< [ 30: 30](RO/H) Reserved; must be zero.
+ Internal:
+ DLL lock, 0=DLL locked. */
+ uint64_t dll90_setting : 9; /**< [ 29: 21](RO/H) Reserved; must be zero.
+ Internal:
+ Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
+ uint64_t fine_tune_mode : 1; /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll_mode : 1; /**< [ 19: 19](R/W) Reserved; must be zero.
+ Internal:
+ DLL mode. */
+ uint64_t dll90_byte_sel : 4; /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
+ 0x0 = byte 0.
+ 0x1 = byte 1.
+ ...
+ 0x8 = ECC byte.
+ 0x9-0xF = Reserved. */
+ uint64_t offset_ena : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Offset enable. 1=enable. */
+ uint64_t load_offset : 1; /**< [ 13: 13](WO) Reserved; must be zero.
+ Internal:
+ Load offset. 0=disable, 1=generate a one cycle pulse to
+ the PHY. This field is a oneshot and clears itself each time it is set. */
+ uint64_t mode_sel : 2; /**< [ 12: 11](R/W) Reserved; must be zero.
+ Internal:
+ Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
+ write and read. */
+ uint64_t byte_sel : 4; /**< [ 10: 7](R/W) Reserved; must be zero.
+ Internal:
+ Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
+ byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t byte_sel : 4; /**< [ 10: 7](R/W) Reserved; must be zero.
+ Internal:
+ Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
+ byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
+ uint64_t mode_sel : 2; /**< [ 12: 11](R/W) Reserved; must be zero.
+ Internal:
+ Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
+ write and read. */
+ uint64_t load_offset : 1; /**< [ 13: 13](WO) Reserved; must be zero.
+ Internal:
+ Load offset. 0=disable, 1=generate a one cycle pulse to
+ the PHY. This field is a oneshot and clears itself each time it is set. */
+ uint64_t offset_ena : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Offset enable. 1=enable. */
+ uint64_t dll90_byte_sel : 4; /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
+ 0x0 = byte 0.
+ 0x1 = byte 1.
+ ...
+ 0x8 = ECC byte.
+ 0x9-0xF = Reserved. */
+ uint64_t dll_mode : 1; /**< [ 19: 19](R/W) Reserved; must be zero.
+ Internal:
+ DLL mode. */
+ uint64_t fine_tune_mode : 1; /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll90_setting : 9; /**< [ 29: 21](RO/H) Reserved; must be zero.
+ Internal:
+ Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
+ uint64_t dll_fast : 1; /**< [ 30: 30](RO/H) Reserved; must be zero.
+ Internal:
+ DLL lock, 0=DLL locked. */
+ uint64_t reserved_31_43 : 13;
+ uint64_t bit_select : 4; /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
+ 0x8 = Selects dbi for write deskew setting assignment.
+ 0x9 = No-op.
+ 0xA = Reuse deskew setting on.
+ 0xB = Reuse deskew setting off.
+ 0xC = Vref bypass setting load.
+ 0xD = Vref bypass on.
+ 0xE = Vref bypass off.
+ 0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
+ Also sets Vref bypass to off and deskew reuse setting to off. */
+ uint64_t wr_deskew_ld : 1; /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
+ the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
+ for write bit deskew. This is a oneshot and clears itself each time
+ it is set. */
+ uint64_t wr_deskew_ena : 1; /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
+ uint64_t offset : 9; /**< [ 58: 50](R/W) Reserved; must be zero.
+ Internal:
+ Write/read offset setting. \<8:0\>: offset (not
+ two's-complement), \<8\>: 0 = increment, 1 = decrement. */
+ uint64_t wr_deskew_mem_ld : 1; /**< [ 59: 59](WO) Reserved.
+ Internal:
+ When set, all DQ bit deskew settings in DDR PHY are all loaded into their corresponding
+ rank deskew storage. The rank is chosen by the CSR LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a
+ oneshot operation and clears itself each time it is set. Note this has to be done during
+ the bringup state where there isn't yet any traffic to DRAM. */
+ uint64_t wr_deskew_mem_sel : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ Only relevant when [WR_DESKEW_ENA] is set.
+ 0 = Selects the common deskew settings stored in each DQ bit. All writes to any package
+ rank uses this common settings to deskew the data bits.
+ 1 = Selects the stored per-package rank deskew settings. Write to a particular
+ package rank uses the corresponding stored setting for that rank. */
+ uint64_t rd_deskew_mem_sel_dis : 1; /**< [ 61: 61](R/W) By default, LMC always selects per-rank deskew settings that are stored inside
+ PHY. Set this field to one to manually disable this feature and that the common
+ deskew setting inside the PHY's state machine will get selected instead. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_dll_ctl3_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t wr_deskew_ena : 1; /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
+ uint64_t wr_deskew_ld : 1; /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
+ the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
+ for write bit deskew. This is a oneshot and clears itself each time
+ it is set. */
+ uint64_t bit_select : 4; /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
+ 0x8 = Selects dbi for write deskew setting assignment.
+ 0x9 = No-op.
+ 0xA = Reuse deskew setting on.
+ 0xB = Reuse deskew setting off.
+ 0xC = Vref bypass setting load.
+ 0xD = Vref bypass on.
+ 0xE = Vref bypass off.
+ 0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
+ Also sets Vref bypass to off and deskew reuse setting to off. */
+ uint64_t dclk90_fwd : 1; /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
+ Locak Initialization step for the LMC bring-up sequence.
+
+ Internal:
+ Generate a one cycle pulse to forward setting. This is a oneshot and clears
+ itself each time it is set. */
+ uint64_t ddr_90_dly_byp : 1; /**< [ 42: 42](R/W) Reserved; must be zero.
+ Internal:
+ Bypass DDR90_DLY in clock tree. */
+ uint64_t dclk90_recal_dis : 1; /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
+ uint64_t dclk90_byp_sel : 1; /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
+ uint64_t dclk90_byp_setting : 9; /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
+ uint64_t dll_fast : 1; /**< [ 30: 30](RO/H) Reserved; must be zero.
+ Internal:
+ DLL lock, 0=DLL locked. */
+ uint64_t dll90_setting : 9; /**< [ 29: 21](RO/H) Reserved; must be zero.
+ Internal:
+ Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
+ uint64_t fine_tune_mode : 1; /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll_mode : 1; /**< [ 19: 19](R/W) Reserved; must be zero.
+ Internal:
+ DLL mode. */
+ uint64_t dll90_byte_sel : 4; /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
+ 0x0 = byte 0.
+ 0x1 = byte 1.
+ ...
+ 0x8: byte 8.
+ 0x9-0xF: reserved. */
+ uint64_t offset_ena : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Offset enable. 1=enable. */
+ uint64_t load_offset : 1; /**< [ 13: 13](WO) Reserved; must be zero.
+ Internal:
+ Load offset. 0=disable, 1=generate a one cycle pulse to
+ the PHY. This field is a oneshot and clears itself each time it is set. */
+ uint64_t mode_sel : 2; /**< [ 12: 11](R/W) Reserved; must be zero.
+ Internal:
+ Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
+ write and read. */
+ uint64_t byte_sel : 4; /**< [ 10: 7](R/W) Reserved; must be zero.
+ Internal:
+ Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
+ byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
+ uint64_t offset : 7; /**< [ 6: 0](R/W) Reserved; must be zero.
+ Internal:
+ Write/read offset setting. \<5:0\>: offset (not
+ two's-complement), \<5\>: 0 = increment, 1 = decrement. */
+#else /* Word 0 - Little Endian */
+ uint64_t offset : 7; /**< [ 6: 0](R/W) Reserved; must be zero.
+ Internal:
+ Write/read offset setting. \<5:0\>: offset (not
+ two's-complement), \<5\>: 0 = increment, 1 = decrement. */
+ uint64_t byte_sel : 4; /**< [ 10: 7](R/W) Reserved; must be zero.
+ Internal:
+ Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
+ byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
+ uint64_t mode_sel : 2; /**< [ 12: 11](R/W) Reserved; must be zero.
+ Internal:
+ Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
+ write and read. */
+ uint64_t load_offset : 1; /**< [ 13: 13](WO) Reserved; must be zero.
+ Internal:
+ Load offset. 0=disable, 1=generate a one cycle pulse to
+ the PHY. This field is a oneshot and clears itself each time it is set. */
+ uint64_t offset_ena : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Offset enable. 1=enable. */
+ uint64_t dll90_byte_sel : 4; /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
+ 0x0 = byte 0.
+ 0x1 = byte 1.
+ ...
+ 0x8: byte 8.
+ 0x9-0xF: reserved. */
+ uint64_t dll_mode : 1; /**< [ 19: 19](R/W) Reserved; must be zero.
+ Internal:
+ DLL mode. */
+ uint64_t fine_tune_mode : 1; /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
+ every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
+ uint64_t dll90_setting : 9; /**< [ 29: 21](RO/H) Reserved; must be zero.
+ Internal:
+ Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
+ uint64_t dll_fast : 1; /**< [ 30: 30](RO/H) Reserved; must be zero.
+ Internal:
+ DLL lock, 0=DLL locked. */
+ uint64_t dclk90_byp_setting : 9; /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
+ uint64_t dclk90_byp_sel : 1; /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
+ uint64_t dclk90_recal_dis : 1; /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
+ uint64_t ddr_90_dly_byp : 1; /**< [ 42: 42](R/W) Reserved; must be zero.
+ Internal:
+ Bypass DDR90_DLY in clock tree. */
+ uint64_t dclk90_fwd : 1; /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
+ Locak Initialization step for the LMC bring-up sequence.
+
+ Internal:
+ Generate a one cycle pulse to forward setting. This is a oneshot and clears
+ itself each time it is set. */
+ uint64_t bit_select : 4; /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
+ 0x8 = Selects dbi for write deskew setting assignment.
+ 0x9 = No-op.
+ 0xA = Reuse deskew setting on.
+ 0xB = Reuse deskew setting off.
+ 0xC = Vref bypass setting load.
+ 0xD = Vref bypass on.
+ 0xE = Vref bypass off.
+ 0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
+ Also sets Vref bypass to off and deskew reuse setting to off. */
+ uint64_t wr_deskew_ld : 1; /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
+ the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
+ for write bit deskew. This is a oneshot and clears itself each time
+ it is set. */
+ uint64_t wr_deskew_ena : 1; /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_dll_ctl3_cn81xx cn83xx; */
+ /* struct bdk_lmcx_dll_ctl3_cn81xx cn88xxp2; */
+};
+typedef union bdk_lmcx_dll_ctl3 bdk_lmcx_dll_ctl3_t;
+
+static inline uint64_t BDK_LMCX_DLL_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DLL_CTL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000218ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000218ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000218ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000218ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_DLL_CTL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DLL_CTL3(a) bdk_lmcx_dll_ctl3_t
+#define bustype_BDK_LMCX_DLL_CTL3(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DLL_CTL3(a) "LMCX_DLL_CTL3"
+#define device_bar_BDK_LMCX_DLL_CTL3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DLL_CTL3(a) (a)
+#define arguments_BDK_LMCX_DLL_CTL3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_dual_memcfg
+ *
+ * LMC Dual Memory Configuration Register
+ * This register controls certain parameters of dual-memory configuration.
+ *
+ * This register enables the design to have two separate memory configurations, selected
+ * dynamically by the reference address. Note however, that both configurations share
+ * LMC()_CONTROL[XOR_BANK], LMC()_CONFIG[PBANK_LSB], LMC()_CONFIG[RANK_ENA], and all
+ * timing parameters.
+ *
+ * In this description:
+ * * config0 refers to the normal memory configuration that is defined by the
+ * LMC()_CONFIG[ROW_LSB] parameter
+ * * config1 refers to the dual (or second) memory configuration that is defined by this
+ * register.
+ */
+union bdk_lmcx_dual_memcfg
+{
+ uint64_t u;
+ struct bdk_lmcx_dual_memcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t row_lsb : 3; /**< [ 18: 16](R/W) Encoding used to determine which memory address bit position represents the low order DDR
+ ROW address. Refer to
+ LMC()_CONFIG[ROW_LSB].
+ Refer to cache block read transaction example. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t cs_mask : 4; /**< [ 3: 0](R/W) Chip select mask. This mask corresponds to the four chip-select signals for a memory
+ configuration. Each reference address asserts one of the chip-select signals. If that
+ chip select signal has its corresponding [CS_MASK] bit set, then the config1 parameters are
+ used, otherwise the config0 parameters are used. */
+#else /* Word 0 - Little Endian */
+ uint64_t cs_mask : 4; /**< [ 3: 0](R/W) Chip select mask. This mask corresponds to the four chip-select signals for a memory
+ configuration. Each reference address asserts one of the chip-select signals. If that
+ chip select signal has its corresponding [CS_MASK] bit set, then the config1 parameters are
+ used, otherwise the config0 parameters are used. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t row_lsb : 3; /**< [ 18: 16](R/W) Encoding used to determine which memory address bit position represents the low order DDR
+ ROW address. Refer to
+ LMC()_CONFIG[ROW_LSB].
+ Refer to cache block read transaction example. */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_dual_memcfg_s cn; */
+};
+typedef union bdk_lmcx_dual_memcfg bdk_lmcx_dual_memcfg_t;
+
+static inline uint64_t BDK_LMCX_DUAL_MEMCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_DUAL_MEMCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000098ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000098ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000098ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000098ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_DUAL_MEMCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_DUAL_MEMCFG(a) bdk_lmcx_dual_memcfg_t
+#define bustype_BDK_LMCX_DUAL_MEMCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_DUAL_MEMCFG(a) "LMCX_DUAL_MEMCFG"
+#define device_bar_BDK_LMCX_DUAL_MEMCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_DUAL_MEMCFG(a) (a)
+#define arguments_BDK_LMCX_DUAL_MEMCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ecc_parity_test
+ *
+ * LMC ECC Parity Test Registers
+ * This register has bits to control the generation of ECC and command address parity errors.
+ * ECC error is generated by enabling [CA_PARITY_CORRUPT_ENA] and selecting any of the
+ * [ECC_CORRUPT_IDX] index of the dataword from the cacheline to be corrupted.
+ * User needs to select which bit of the 128-bit dataword to corrupt by asserting any of the
+ * CHAR_MASK0 and CHAR_MASK2 bits. (CHAR_MASK0 and CHAR_MASK2 corresponds to the lower and upper
+ * 64-bit signal that can corrupt any individual bit of the data).
+ *
+ * Command address parity error is generated by enabling [CA_PARITY_CORRUPT_ENA] and
+ * selecting the DDR command that the parity is to be corrupted with through [CA_PARITY_SEL].
+ */
+union bdk_lmcx_ecc_parity_test
+{
+ uint64_t u;
+ struct bdk_lmcx_ecc_parity_test_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ecc_corrupt_ena : 1; /**< [ 11: 11](R/W/H) Enables the ECC data corruption. */
+ uint64_t ecc_corrupt_idx : 3; /**< [ 10: 8](R/W) Selects the cacheline index with which the dataword is to be corrupted. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ca_parity_corrupt_ena : 1; /**< [ 5: 5](R/W/H) Enables the CA parity bit corruption. */
+ uint64_t ca_parity_sel : 5; /**< [ 4: 0](R/W) Selects the type of DDR command to corrupt the parity bit.
+ 0x0 = No command selected.
+ 0x1 = NOP.
+ 0x2 = ACT.
+ 0x3 = REF.
+ 0x4 = WRS4.
+ 0x5 = WRS8.
+ 0x6 = WRAS4.
+ 0x7 = WRAS8.
+ 0x8 = RDS4.
+ 0x9 = RDS8.
+ 0xa = RDAS4.
+ 0xb = RDAS8.
+ 0xc = SRE.
+ 0xd = SRX.
+ 0xe = PRE.
+ 0xf = PREA.
+ 0x10 = MRS.
+ 0x11-0x13 = Reserved.
+ 0x14 = ZQCL.
+ 0x15 = ZQCS.
+ 0x16-0x16 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t ca_parity_sel : 5; /**< [ 4: 0](R/W) Selects the type of DDR command to corrupt the parity bit.
+ 0x0 = No command selected.
+ 0x1 = NOP.
+ 0x2 = ACT.
+ 0x3 = REF.
+ 0x4 = WRS4.
+ 0x5 = WRS8.
+ 0x6 = WRAS4.
+ 0x7 = WRAS8.
+ 0x8 = RDS4.
+ 0x9 = RDS8.
+ 0xa = RDAS4.
+ 0xb = RDAS8.
+ 0xc = SRE.
+ 0xd = SRX.
+ 0xe = PRE.
+ 0xf = PREA.
+ 0x10 = MRS.
+ 0x11-0x13 = Reserved.
+ 0x14 = ZQCL.
+ 0x15 = ZQCS.
+ 0x16-0x16 = Reserved. */
+ uint64_t ca_parity_corrupt_ena : 1; /**< [ 5: 5](R/W/H) Enables the CA parity bit corruption. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ecc_corrupt_idx : 3; /**< [ 10: 8](R/W) Selects the cacheline index with which the dataword is to be corrupted. */
+ uint64_t ecc_corrupt_ena : 1; /**< [ 11: 11](R/W/H) Enables the ECC data corruption. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_ecc_parity_test_s cn; */
+};
+typedef union bdk_lmcx_ecc_parity_test bdk_lmcx_ecc_parity_test_t;
+
+static inline uint64_t BDK_LMCX_ECC_PARITY_TEST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_ECC_PARITY_TEST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000108ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000108ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000108ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000108ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_ECC_PARITY_TEST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_ECC_PARITY_TEST(a) bdk_lmcx_ecc_parity_test_t
+#define bustype_BDK_LMCX_ECC_PARITY_TEST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_ECC_PARITY_TEST(a) "LMCX_ECC_PARITY_TEST"
+#define device_bar_BDK_LMCX_ECC_PARITY_TEST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_ECC_PARITY_TEST(a) (a)
+#define arguments_BDK_LMCX_ECC_PARITY_TEST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ecc_synd
+ *
+ * LMC MRD ECC Syndromes Register
+ */
+union bdk_lmcx_ecc_synd
+{
+ uint64_t u;
+ struct bdk_lmcx_ecc_synd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t mrdsyn3 : 8; /**< [ 31: 24](RO/H) MRD ECC syndrome quad 3. [MRDSYN3] corresponds to DQ[63:0]_c1_p1, or in 32-bit mode
+ DQ[31:0]_c3_p1/0, where _cC_pP denotes cycle C and phase P. */
+ uint64_t mrdsyn2 : 8; /**< [ 23: 16](RO/H) MRD ECC syndrome quad 2. [MRDSYN2] corresponds to DQ[63:0]_c1_p0, or in 32-bit mode
+ DQ[31:0]_c2_p1/0, where _cC_pP denotes cycle C and phase P. */
+ uint64_t mrdsyn1 : 8; /**< [ 15: 8](RO/H) MRD ECC syndrome quad 1. [MRDSYN1] corresponds to DQ[63:0]_c0_p1, or in 32-bit mode
+ DQ[31:0]_c1_p1/0, where _cC_pP denotes cycle C and phase P. */
+ uint64_t mrdsyn0 : 8; /**< [ 7: 0](RO/H) MRD ECC syndrome quad 0. [MRDSYN0] corresponds to DQ[63:0]_c0_p0, or in 32-bit mode
+ DQ[31:0]_c0_p1/0, where _cC_pP denotes cycle C and phase P. */
+#else /* Word 0 - Little Endian */
+ uint64_t mrdsyn0 : 8; /**< [ 7: 0](RO/H) MRD ECC syndrome quad 0. [MRDSYN0] corresponds to DQ[63:0]_c0_p0, or in 32-bit mode
+ DQ[31:0]_c0_p1/0, where _cC_pP denotes cycle C and phase P. */
+ uint64_t mrdsyn1 : 8; /**< [ 15: 8](RO/H) MRD ECC syndrome quad 1. [MRDSYN1] corresponds to DQ[63:0]_c0_p1, or in 32-bit mode
+ DQ[31:0]_c1_p1/0, where _cC_pP denotes cycle C and phase P. */
+ uint64_t mrdsyn2 : 8; /**< [ 23: 16](RO/H) MRD ECC syndrome quad 2. [MRDSYN2] corresponds to DQ[63:0]_c1_p0, or in 32-bit mode
+ DQ[31:0]_c2_p1/0, where _cC_pP denotes cycle C and phase P. */
+ uint64_t mrdsyn3 : 8; /**< [ 31: 24](RO/H) MRD ECC syndrome quad 3. [MRDSYN3] corresponds to DQ[63:0]_c1_p1, or in 32-bit mode
+ DQ[31:0]_c3_p1/0, where _cC_pP denotes cycle C and phase P. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_ecc_synd_s cn; */
+};
+typedef union bdk_lmcx_ecc_synd bdk_lmcx_ecc_synd_t;
+
+static inline uint64_t BDK_LMCX_ECC_SYND(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_ECC_SYND(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000038ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000038ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000038ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_ECC_SYND", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_ECC_SYND(a) bdk_lmcx_ecc_synd_t
+#define bustype_BDK_LMCX_ECC_SYND(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_ECC_SYND(a) "LMCX_ECC_SYND"
+#define device_bar_BDK_LMCX_ECC_SYND(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_ECC_SYND(a) (a)
+#define arguments_BDK_LMCX_ECC_SYND(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ext_config
+ *
+ * LMC Extended Configuration Register
+ * This register has additional configuration and control bits for the LMC.
+ */
+union bdk_lmcx_ext_config
+{
+ uint64_t u;
+ struct bdk_lmcx_ext_config_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ref_rank_all : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ When set, cycles through all ranks during the refresh sequence disregarding
+ rank availability status. */
+ uint64_t ref_mode : 2; /**< [ 62: 61](R/W) Selects the refresh mode.
+ 0x0 = All ranks get refreshed together at the end of TREFI and all traffic is halted.
+ 0x1 = Ranks are refreshed in pairs during TREFI window. At TREFI/2, ranks 1 & 3
+ are refreshed while allowing traffic to 0 & 2. At TREFI, ranks 0 & 2 are
+ refreshed while allowing traffic to 1 & 3.
+ 0x2 = Ranks are refreshed in pairs during TREFI window. All traffic is halted
+ whenever each pair is refreshed. */
+ uint64_t reserved_59_60 : 2;
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
+ When this bit is set to one, it disables this default behavior.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) Reserved.
+ Internal:
+ When set to 1, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of 0 selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t sref_seq_stop_clock : 1; /**< [ 43: 43](R/W) When enabled, LMC disables all clock output pins to the DIMM at the end of
+ LMC_SEQ_SEL_E::SREF_ENTRY
+ sequence. In RDIMM applications, this implies that the RCD will be programmed into Clock
+ Stop Power Down mode at the end of the LMC_SEQ_SEL_E::SREF_ENTRY sequence.
+ It also automatically enables all clock outputs at the start of LMC_SEQ_SEL_E::SREF_EXIT sequence. */
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and
+ RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
+ effectively uses the scramble logic to instead invert all the data, so this bit must not
+ be set if data scrambling is enabled. May be useful if data inversion will result in lower
+ power. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t reserved_27 : 1;
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t dlcram_flip_synd : 2; /**< [ 5: 4](R/W) Reserved.
+ Internal:
+ DLC RAM flip syndrome control bits. */
+ uint64_t dlcram_cor_dis : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ DLC RAM correction disable control. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) Reserved.
+ Internal:
+ When set, enable NXM events for HFA read operations.
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) Reserved.
+ Internal:
+ When set, enable NXM events for HFA read operations.
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t dlcram_cor_dis : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ DLC RAM correction disable control. */
+ uint64_t dlcram_flip_synd : 2; /**< [ 5: 4](R/W) Reserved.
+ Internal:
+ DLC RAM flip syndrome control bits. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t reserved_27 : 1;
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
+ effectively uses the scramble logic to instead invert all the data, so this bit must not
+ be set if data scrambling is enabled. May be useful if data inversion will result in lower
+ power. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and
+ RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
+ uint64_t reserved_38_39 : 2;
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t sref_seq_stop_clock : 1; /**< [ 43: 43](R/W) When enabled, LMC disables all clock output pins to the DIMM at the end of
+ LMC_SEQ_SEL_E::SREF_ENTRY
+ sequence. In RDIMM applications, this implies that the RCD will be programmed into Clock
+ Stop Power Down mode at the end of the LMC_SEQ_SEL_E::SREF_ENTRY sequence.
+ It also automatically enables all clock outputs at the start of LMC_SEQ_SEL_E::SREF_EXIT sequence. */
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) Reserved.
+ Internal:
+ When set to 1, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of 0 selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
+ When this bit is set to one, it disables this default behavior.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t reserved_59_60 : 2;
+ uint64_t ref_mode : 2; /**< [ 62: 61](R/W) Selects the refresh mode.
+ 0x0 = All ranks get refreshed together at the end of TREFI and all traffic is halted.
+ 0x1 = Ranks are refreshed in pairs during TREFI window. At TREFI/2, ranks 1 & 3
+ are refreshed while allowing traffic to 0 & 2. At TREFI, ranks 0 & 2 are
+ refreshed while allowing traffic to 1 & 3.
+ 0x2 = Ranks are refreshed in pairs during TREFI window. All traffic is halted
+ whenever each pair is refreshed. */
+ uint64_t ref_rank_all : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ When set, cycles through all ranks during the refresh sequence disregarding
+ rank availability status. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_ext_config_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ref_rank_all : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ When set, cycles through all ranks during the refresh sequence disregarding
+ rank availability status. */
+ uint64_t ref_mode : 2; /**< [ 62: 61](R/W) Selects the refresh mode.
+ 0x0 = All ranks get refreshed together at the end of TREFI and all traffic is halted.
+ 0x1 = Ranks are refreshed in pairs during TREFI window. At TREFI/2, ranks 1 & 3
+ are refreshed while allowing traffic to 0 & 2. At TREFI, ranks 0 & 2 are
+ refreshed while allowing traffic to 1 & 3.
+ 0x2 = Ranks are refreshed in pairs during TREFI window. All traffic is halted
+ whenever each pair is refreshed. */
+ uint64_t ref_block : 1; /**< [ 60: 60](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
+ allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT0] or
+ LMC()_REF_STATUS[REF_COUNT1] has reached the maximum value of 0x7. */
+ uint64_t bc4_dqs_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ 0 = LMC produces the full bursts of DQS transitions,
+ even for BC4 Write ops.
+ 1 = LMC produces only three cycles of DQS transitions
+ every time it sends out a BC4 Write operation. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
+ When this bit is set to one, it disables this default behavior.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of zero selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t sref_seq_stop_clock : 1; /**< [ 43: 43](R/W) When enabled, LMC disables all clock output pins to the DIMM at the end of
+ LMC_SEQ_SEL_E::SREF_ENTRY
+ sequence. In RDIMM applications, this implies that the RCD will be programmed into Clock
+ Stop Power Down mode at the end of the LMC_SEQ_SEL_E::SREF_ENTRY sequence.
+ It also automatically enables all clock outputs at the start of LMC_SEQ_SEL_E::SREF_EXIT sequence. */
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the DDR_CS_L active cycle. */
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. May
+ be useful if data inversion will result in lower power. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t reserved_27 : 1;
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) Reserved.
+ Internal:
+ When set, enable NXM events for HFA read operations.
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) Reserved.
+ Internal:
+ When set, enable NXM events for HFA read operations.
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t reserved_27 : 1;
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. May
+ be useful if data inversion will result in lower power. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the DDR_CS_L active cycle. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t sref_seq_stop_clock : 1; /**< [ 43: 43](R/W) When enabled, LMC disables all clock output pins to the DIMM at the end of
+ LMC_SEQ_SEL_E::SREF_ENTRY
+ sequence. In RDIMM applications, this implies that the RCD will be programmed into Clock
+ Stop Power Down mode at the end of the LMC_SEQ_SEL_E::SREF_ENTRY sequence.
+ It also automatically enables all clock outputs at the start of LMC_SEQ_SEL_E::SREF_EXIT sequence. */
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of zero selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
+ When this bit is set to one, it disables this default behavior.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t bc4_dqs_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ 0 = LMC produces the full bursts of DQS transitions,
+ even for BC4 Write ops.
+ 1 = LMC produces only three cycles of DQS transitions
+ every time it sends out a BC4 Write operation. */
+ uint64_t ref_block : 1; /**< [ 60: 60](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
+ allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT0] or
+ LMC()_REF_STATUS[REF_COUNT1] has reached the maximum value of 0x7. */
+ uint64_t ref_mode : 2; /**< [ 62: 61](R/W) Selects the refresh mode.
+ 0x0 = All ranks get refreshed together at the end of TREFI and all traffic is halted.
+ 0x1 = Ranks are refreshed in pairs during TREFI window. At TREFI/2, ranks 1 & 3
+ are refreshed while allowing traffic to 0 & 2. At TREFI, ranks 0 & 2 are
+ refreshed while allowing traffic to 1 & 3.
+ 0x2 = Ranks are refreshed in pairs during TREFI window. All traffic is halted
+ whenever each pair is refreshed. */
+ uint64_t ref_rank_all : 1; /**< [ 63: 63](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ When set, cycles through all ranks during the refresh sequence disregarding
+ rank availability status. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_ext_config_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t bc4_dqs_ena : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ 0 = LMC produces the full bursts of DQS transitions,
+ even for BC4 Write ops.
+ 1 = LMC produces only three cycles of DQS transitions
+ every time it sends out a BC4 Write operation. */
+ uint64_t ref_block : 1; /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
+ allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
+ reached the maximum value of 0x7. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
+ When this bit is set to one, it disables this default behavior.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) Reserved.
+ Internal:
+ When set to 1, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of 0 selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t reserved_43 : 1;
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and
+ RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
+ effectively uses the scramble logic to instead invert all the data, so this bit must not
+ be set if data scrambling is enabled. May be useful if data inversion will result in lower
+ power. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t reserved_27 : 1;
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t dlcram_flip_synd : 2; /**< [ 5: 4](R/W) Reserved.
+ Internal:
+ DLC RAM flip syndrome control bits. */
+ uint64_t dlcram_cor_dis : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ DLC RAM correction disable control. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) Reserved.
+ Internal:
+ When set, enable NXM events for HFA read operations.
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) Reserved.
+ Internal:
+ When set, enable NXM events for HFA read operations.
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t dlcram_cor_dis : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ DLC RAM correction disable control. */
+ uint64_t dlcram_flip_synd : 2; /**< [ 5: 4](R/W) Reserved.
+ Internal:
+ DLC RAM flip syndrome control bits. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t reserved_27 : 1;
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
+ effectively uses the scramble logic to instead invert all the data, so this bit must not
+ be set if data scrambling is enabled. May be useful if data inversion will result in lower
+ power. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and
+ RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
+ uint64_t reserved_38_39 : 2;
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t reserved_43 : 1;
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) Reserved.
+ Internal:
+ When set to 1, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of 0 selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
+ When this bit is set to one, it disables this default behavior.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t ref_block : 1; /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
+ allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
+ reached the maximum value of 0x7. */
+ uint64_t bc4_dqs_ena : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ 0 = LMC produces the full bursts of DQS transitions,
+ even for BC4 Write ops.
+ 1 = LMC produces only three cycles of DQS transitions
+ every time it sends out a BC4 Write operation. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_lmcx_ext_config_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t bc4_dqs_ena : 1; /**< [ 60: 60](RO) Reserved. */
+ uint64_t ref_block : 1; /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
+ allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
+ reached the maximum value of 0x7. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
+ When this bit is set to one, it disables this default behavior.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of zero selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t reserved_43 : 1;
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and
+ RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
+ effectively uses the scramble logic to instead invert all the data, so this bit must not
+ be set if data scrambling is enabled. May be useful if data inversion will result in lower
+ power. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t reserved_27 : 1;
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t dlcram_flip_synd : 2; /**< [ 5: 4](R/W) Reserved.
+ Internal:
+ DLC RAM flip syndrome control bits. */
+ uint64_t dlcram_cor_dis : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ DLC RAM correction disable control. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) When set, enable NXM events for HFA read operations.
+ Internal:
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) When set, enable NXM events for HFA read operations.
+ Internal:
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t dlcram_cor_dis : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ DLC RAM correction disable control. */
+ uint64_t dlcram_flip_synd : 2; /**< [ 5: 4](R/W) Reserved.
+ Internal:
+ DLC RAM flip syndrome control bits. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t reserved_27 : 1;
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
+ effectively uses the scramble logic to instead invert all the data, so this bit must not
+ be set if data scrambling is enabled. May be useful if data inversion will result in lower
+ power. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and
+ RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
+ uint64_t reserved_38_39 : 2;
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t reserved_43 : 1;
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of zero selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
+ When this bit is set to one, it disables this default behavior.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t ref_block : 1; /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
+ allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
+ reached the maximum value of 0x7. */
+ uint64_t bc4_dqs_ena : 1; /**< [ 60: 60](RO) Reserved. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_lmcx_ext_config_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t bc4_dqs_ena : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ 0 = LMC produces the full bursts of DQS transitions,
+ even for BC4 Write ops.
+ 1 = LMC produces only three cycles of DQS transitions
+ every time it sends out a BC4 Write operation. */
+ uint64_t ref_block : 1; /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
+ allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
+ reached the maximum value of 0x7. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever [MEM_MSB_D1_R0] \> [MEM_MSB_D0_R0].
+ When this bit is set to 1, it disables this default behaviour.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of zero selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t reserved_43 : 1;
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and
+ RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
+ effectively uses the scramble logic to instead invert all the data, so this bit must not
+ be set if data scrambling is enabled. May be useful if data inversion will result in lower
+ power. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t reserved_27 : 1;
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t dlcram_flip_synd : 2; /**< [ 5: 4](R/W) Reserved.
+ Internal:
+ DLC RAM flip syndrome control bits. */
+ uint64_t dlcram_cor_dis : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ DLC RAM correction disable control. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) Reserved.
+ Internal:
+ When set, enable NXM events for HFA read operations.
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+#else /* Word 0 - Little Endian */
+ uint64_t l2c_nxm_wr : 1; /**< [ 0: 0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM write operations. NXM writes are generally an indication of
+ failure, so [L2C_NXM_WR] can generally be set. */
+ uint64_t l2c_nxm_rd : 1; /**< [ 1: 1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
+ loaded for L2C NXM read operations. NXM read operations may occur during normal operation
+ (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
+ LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
+ uint64_t dlc_nxm_rd : 1; /**< [ 2: 2](R/W) Reserved.
+ Internal:
+ When set, enable NXM events for HFA read operations.
+ Default is disabled, but
+ could be useful for debug of DLC/DFA accesses. */
+ uint64_t dlcram_cor_dis : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ DLC RAM correction disable control. */
+ uint64_t dlcram_flip_synd : 2; /**< [ 5: 4](R/W) Reserved.
+ Internal:
+ DLC RAM flip syndrome control bits. */
+ uint64_t drive_ena_fprch : 1; /**< [ 6: 6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
+ uint64_t drive_ena_bprch : 1; /**< [ 7: 7](R/W) Drive DQx for one cycle longer than normal during write operations. */
+ uint64_t ref_int_lsbs : 9; /**< [ 16: 8](R/W) The refresh interval value least significant bits. The default is 0x0.
+ Refresh interval is represented in number of 512 CK cycle increments and is controlled by
+ LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
+ one CK cycle) can be achieved by setting this field to a nonzero value. */
+ uint64_t slot_ctl_reset_force : 1; /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
+ 1 to this bit, slot-control registers will update with changes made to other timing-
+ control registers. This is a one-shot operation; it automatically returns to 0 after a
+ write to 1. */
+ uint64_t read_ena_fprch : 1; /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
+ uint64_t read_ena_bprch : 1; /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
+ uint64_t vrefint_seq_deskew : 1; /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
+ sequence into the deskew training sequence. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t gen_par : 1; /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
+ register or DRAM devices. */
+ uint64_t par_include_bg1 : 1; /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
+ uint64_t par_include_a17 : 1; /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
+ uint64_t reserved_27 : 1;
+ uint64_t cal_ena : 1; /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
+ set [CAL_ENA]. */
+ uint64_t cmd_rti : 1; /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
+ CS active, no command pins active, and address/bank address/bank group all low) on the
+ interface after an active command, rather than only forcing the CS inactive between
+ commands. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t invert_data : 1; /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
+ effectively uses the scramble logic to instead invert all the data, so this bit must not
+ be set if data scrambling is enabled. May be useful if data inversion will result in lower
+ power. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t mrs_cmd_select : 1; /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
+ and
+ RCW commands.
+
+ When this bit is clear, select operation where signals other than CS are active before and
+ after the DDR_CS_L active cycle.
+
+ When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
+ DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
+ DDR_CS_L is also active. */
+ uint64_t mrs_cmd_override : 1; /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
+ If this bit is set, the override behavior is governed by the control field
+ [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
+
+ If this bit is cleared, select operation where signals other than CS are active before
+ and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
+ uint64_t reserved_38_39 : 2;
+ uint64_t par_addr_mask : 3; /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
+ bits from the parity calculation, necessary if the DRAM device does not have these pins. */
+ uint64_t reserved_43 : 1;
+ uint64_t ea_int_polarity : 1; /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
+ signalled on
+ the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
+ edge of DDR*_ERROR_ALERT_L. */
+ uint64_t error_alert_n_sample : 1; /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t rcd_parity_check : 1; /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
+ when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
+ this to zero otherwise. To enable the parity checking in RCD, set this bit first
+ BEFORE issuing the RCW write RC0E DA0 = 1. */
+ uint64_t dimm0_cid : 2; /**< [ 50: 49](R/W) Reserved.
+ Internal:
+ DIMM0 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t dimm1_cid : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ DIMM1 configuration bits that represent the number of the chip
+ ID of the DRAM. This value is use for decoding the address
+ as well as routing Chip IDs to the appropriate output
+ pins.
+ 0x0 = 0 Chip ID (Mono-Die stack).
+ 0x1 = 1 Chip ID (2H 3DS).
+ 0x2 = 2 Chip IDs (4H 3DS).
+ 0x3 = 3 Chip IDs (8H 3DS). */
+ uint64_t coalesce_address_mode : 1; /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
+ to create a uniform memory space that is free from holes in
+ between ranks. When different size DIMMs are used, the DIMM with
+ the higher capacity is mapped to the lower address space. */
+ uint64_t dimm_sel_force_invert : 1; /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
+ when in coalesce_address_mode. That is, pbank value of zero selects
+ DIMM1 instead of DIMM0.
+ Intended to be used for the case of DIMM1 having bigger rank/s
+ than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
+ uint64_t dimm_sel_invert_off : 1; /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
+ the pbank bit whenever [MEM_MSB_D1_R0] \> [MEM_MSB_D0_R0].
+ When this bit is set to 1, it disables this default behaviour.
+ This configuration has lower priority compared to
+ [DIMM_SEL_FORCE_INVERT]. */
+ uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
+ A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
+ command to the B side of the RDIMM.
+ When set, make sure that the RCD's control word
+ RC00 DA[0] = 1 so that the output inversion is disabled in
+ the DDR4 RCD. */
+ uint64_t mrs_one_side : 1; /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
+ When set, MRS commands are directed to either the A or B
+ side of the RCD.
+
+ PDA operation is NOT allowed when this bit is set. In
+ other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
+ must be cleared before running MRW sequence with this
+ bit turned on. */
+ uint64_t mrs_side : 1; /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
+ 0 = MRS command is sent to the A side of an RDIMM.
+ 1 = MRS command is sent to the B side of an RDIMM. */
+ uint64_t ref_block : 1; /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
+ allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
+ reached the maximum value of 0x7. */
+ uint64_t bc4_dqs_ena : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ For diagnostic use only.
+ 0 = LMC produces the full bursts of DQS transitions,
+ even for BC4 Write ops.
+ 1 = LMC produces only three cycles of DQS transitions
+ every time it sends out a BC4 Write operation. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_ext_config bdk_lmcx_ext_config_t;
+
+static inline uint64_t BDK_LMCX_EXT_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_EXT_CONFIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000030ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000030ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000030ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000030ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_EXT_CONFIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_EXT_CONFIG(a) bdk_lmcx_ext_config_t
+#define bustype_BDK_LMCX_EXT_CONFIG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_EXT_CONFIG(a) "LMCX_EXT_CONFIG"
+#define device_bar_BDK_LMCX_EXT_CONFIG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_EXT_CONFIG(a) (a)
+#define arguments_BDK_LMCX_EXT_CONFIG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ext_config2
+ *
+ * LMC Extended Configuration Register
+ * This register has additional configuration and control bits for the LMC.
+ */
+union bdk_lmcx_ext_config2
+{
+ uint64_t u;
+ struct bdk_lmcx_ext_config2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](RO) Reserved. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](RO) Reserved. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](RO) Reserved. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](RO) Reserved. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](RO) Reserved. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](RO) Reserved. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](RO) Reserved. */
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](RO) Reserved. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t trr_on : 1; /**< [ 8: 8](R/W) When set, this enables row activates counts of the
+ DRAM used in target row refresh mode. This bit can
+ be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
+ has a value of 1. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t trr_on : 1; /**< [ 8: 8](R/W) When set, this enables row activates counts of the
+ DRAM used in target row refresh mode. This bit can
+ be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
+ has a value of 1. */
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](RO) Reserved. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](RO) Reserved. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](RO) Reserved. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](RO) Reserved. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](RO) Reserved. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](RO) Reserved. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](RO) Reserved. */
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](RO) Reserved. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_ext_config2_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](RO) Reserved. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](RO) Reserved. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](RO) Reserved. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](RO) Reserved. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](RO) Reserved. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](RO) Reserved. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](RO) Reserved. */
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](RO) Reserved. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t trr_on : 1; /**< [ 8: 8](R/W) When set, this enables row activates counts of the
+ DRAM used in target row refresh mode. This bit can
+ be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
+ has a value of 1. */
+ uint64_t mac : 3; /**< [ 7: 5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
+ 0x0 = 100K.
+ 0x1 = 400K/2.
+ 0x2 = 500K/2.
+ 0x3 = 600K/2.
+ 0x4 = 700K/2.
+ 0x5 = 800K/2.
+ 0x6 = 900K/2.
+ 0x7 = 1000K/2. */
+ uint64_t macram_scrub_done : 1; /**< [ 4: 4](RO/H) Maximum activate count memory scrub complete indication;
+ 1 means the memory has been scrubbed to all zero. */
+ uint64_t macram_scrub : 1; /**< [ 3: 3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
+ should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
+ This is a one-shot operation; it automatically returns to 0 after a write to 1. */
+ uint64_t macram_flip_synd : 2; /**< [ 2: 1](R/W) Reserved.
+ Internal:
+ MAC RAM flip syndrome control bits. */
+ uint64_t macram_cor_dis : 1; /**< [ 0: 0](R/W) Reserved.
+ Internal:
+ MAC RAM correction disable control. */
+#else /* Word 0 - Little Endian */
+ uint64_t macram_cor_dis : 1; /**< [ 0: 0](R/W) Reserved.
+ Internal:
+ MAC RAM correction disable control. */
+ uint64_t macram_flip_synd : 2; /**< [ 2: 1](R/W) Reserved.
+ Internal:
+ MAC RAM flip syndrome control bits. */
+ uint64_t macram_scrub : 1; /**< [ 3: 3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
+ should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
+ This is a one-shot operation; it automatically returns to 0 after a write to 1. */
+ uint64_t macram_scrub_done : 1; /**< [ 4: 4](RO/H) Maximum activate count memory scrub complete indication;
+ 1 means the memory has been scrubbed to all zero. */
+ uint64_t mac : 3; /**< [ 7: 5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
+ 0x0 = 100K.
+ 0x1 = 400K/2.
+ 0x2 = 500K/2.
+ 0x3 = 600K/2.
+ 0x4 = 700K/2.
+ 0x5 = 800K/2.
+ 0x6 = 900K/2.
+ 0x7 = 1000K/2. */
+ uint64_t trr_on : 1; /**< [ 8: 8](R/W) When set, this enables row activates counts of the
+ DRAM used in target row refresh mode. This bit can
+ be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
+ has a value of 1. */
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](RO) Reserved. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](RO) Reserved. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](RO) Reserved. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](RO) Reserved. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](RO) Reserved. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](RO) Reserved. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](RO) Reserved. */
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](RO) Reserved. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_ext_config2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](R/W) Self-refresh idle threshold.
+ Enter self-refresh mode after the memory controller has been idle for
+ 2^([SREF_AUTO_IDLE_THRES]-1) * TREFI.
+ Where TREFI time is controlled by LMC()_CONFIG[REF_ZQCS_INT]\<6:0\>.
+
+ 0x0 = Automatic self refresh interval is controlled by
+ 2^(2+LMC()_CONFIG[IDLEPOWER]) CK cycles instead. Self refresh has priority
+ over precharge power-down.
+
+ Only valid when LMC()_EXT_CONFIG2[SREF_AUTO_ENABLE] is set. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](R/W) Enable automatic self-refresh mode.
+ This field should only be set after initialization.
+ When set, software must not issue self refesh enter commands (LMC_SEQ_SEL_E::SREF_ENTRY).
+
+ Internal:
+ FIXME The LMC_SEQ_SEL_E::SREF_ENTRY requirement can be removed in 98xx when bug28110 is closed. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
+ signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
+ L2C-LMC address bits are used to XOR the bank bits with.
+ 0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
+ 0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
+ 0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
+ 0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
+ 0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
+ 0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
+ 0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
+ 0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
+ 0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
+ 0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
+ 0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
+ 0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
+ 0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
+ 0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
+ 0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
+ 0xF: Reserved. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t reserved_8 : 1;
+ uint64_t throttle_wr : 4; /**< [ 7: 4](R/W) When set, throttle 1/16th of available command bandwidth by limiting IFB usage.
+ 0x0 = Full bandwidth, 32 IFBs available.
+ 0x1 = 1/16th bandwidth, 2 IFBs available.
+ 0x2 = 2/16th bandwidth, 4 IFBs available.
+ ...
+ 0xF = 15/16th bandwidth, 30 IFBs available. */
+ uint64_t throttle_rd : 4; /**< [ 3: 0](R/W) When set, throttle 1/16th of available command bandwidth by limiting IFB usage.
+ 0x0 = Full bandwidth, 32 IFBs available.
+ 0x1 = 1/16th bandwidth, 2 IFBs available.
+ 0x2 = 2/16th bandwidth, 4 IFBs available.
+ ...
+ 0xF = 15/16th bandwidth, 30 IFBs available. */
+#else /* Word 0 - Little Endian */
+ uint64_t throttle_rd : 4; /**< [ 3: 0](R/W) When set, throttle 1/16th of available command bandwidth by limiting IFB usage.
+ 0x0 = Full bandwidth, 32 IFBs available.
+ 0x1 = 1/16th bandwidth, 2 IFBs available.
+ 0x2 = 2/16th bandwidth, 4 IFBs available.
+ ...
+ 0xF = 15/16th bandwidth, 30 IFBs available. */
+ uint64_t throttle_wr : 4; /**< [ 7: 4](R/W) When set, throttle 1/16th of available command bandwidth by limiting IFB usage.
+ 0x0 = Full bandwidth, 32 IFBs available.
+ 0x1 = 1/16th bandwidth, 2 IFBs available.
+ 0x2 = 2/16th bandwidth, 4 IFBs available.
+ ...
+ 0xF = 15/16th bandwidth, 30 IFBs available. */
+ uint64_t reserved_8 : 1;
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
+ L2C-LMC address bits are used to XOR the bank bits with.
+ 0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
+ 0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
+ 0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
+ 0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
+ 0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
+ 0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
+ 0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
+ 0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
+ 0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
+ 0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
+ 0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
+ 0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
+ 0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
+ 0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
+ 0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
+ 0xF: Reserved. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
+ signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](R/W) Enable automatic self-refresh mode.
+ This field should only be set after initialization.
+ When set, software must not issue self refesh enter commands (LMC_SEQ_SEL_E::SREF_ENTRY).
+
+ Internal:
+ FIXME The LMC_SEQ_SEL_E::SREF_ENTRY requirement can be removed in 98xx when bug28110 is closed. */
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](R/W) Self-refresh idle threshold.
+ Enter self-refresh mode after the memory controller has been idle for
+ 2^([SREF_AUTO_IDLE_THRES]-1) * TREFI.
+ Where TREFI time is controlled by LMC()_CONFIG[REF_ZQCS_INT]\<6:0\>.
+
+ 0x0 = Automatic self refresh interval is controlled by
+ 2^(2+LMC()_CONFIG[IDLEPOWER]) CK cycles instead. Self refresh has priority
+ over precharge power-down.
+
+ Only valid when LMC()_EXT_CONFIG2[SREF_AUTO_ENABLE] is set. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_ext_config2_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](R/W) Self-refresh idle threshold.
+ Enter self-refresh mode after the memory controller has been idle for
+ 2^([SREF_AUTO_IDLE_THRES]-1) * TREFI.
+ Where TREFI time is controlled by LMC()_CONFIG[REF_ZQCS_INT]\<6:0\>.
+
+ 0x0 = Automatic self refresh interval is controlled by
+ 2^(2+LMC()_CONFIG[IDLEPOWER]) CK cycles instead. Self refresh has priority
+ over precharge power-down.
+
+ Only valid when LMC()_EXT_CONFIG2[SREF_AUTO_ENABLE] is set. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](R/W) Enable automatic self-refresh mode.
+ This field should only be set after initialization.
+ When set, software must not issue self refesh enter commands (LMC_SEQ_SEL_E::SREF_ENTRY).
+
+ Internal:
+ FIXME The LMC_SEQ_SEL_E::SREF_ENTRY requirement can be removed in 98xx when bug28110 is closed. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
+ signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
+ L2C-LMC address bits are used to XOR the bank bits with.
+ 0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
+ 0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
+ 0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
+ 0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
+ 0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
+ 0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
+ 0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
+ 0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
+ 0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
+ 0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
+ 0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
+ 0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
+ 0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
+ 0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
+ 0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
+ 0xF: Reserved. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t trr_on : 1; /**< [ 8: 8](R/W) When set, this enables row activates counts of the
+ DRAM used in target row refresh mode. This bit can
+ be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
+ has a value of 1. */
+ uint64_t mac : 3; /**< [ 7: 5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
+ 0x0 = 100K.
+ 0x1 = 400K/2.
+ 0x2 = 500K/2.
+ 0x3 = 600K/2.
+ 0x4 = 700K/2.
+ 0x5 = 800K/2.
+ 0x6 = 900K/2.
+ 0x7 = 1000K/2. */
+ uint64_t macram_scrub_done : 1; /**< [ 4: 4](RO/H) Maximum activate count memory scrub complete indication;
+ 1 means the memory has been scrubbed to all zero. */
+ uint64_t macram_scrub : 1; /**< [ 3: 3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
+ should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
+ This is a one-shot operation; it automatically returns to 0 after a write to 1. */
+ uint64_t macram_flip_synd : 2; /**< [ 2: 1](R/W) Reserved.
+ Internal:
+ MAC RAM flip syndrome control bits. */
+ uint64_t macram_cor_dis : 1; /**< [ 0: 0](R/W) Reserved.
+ Internal:
+ MAC RAM correction disable control. */
+#else /* Word 0 - Little Endian */
+ uint64_t macram_cor_dis : 1; /**< [ 0: 0](R/W) Reserved.
+ Internal:
+ MAC RAM correction disable control. */
+ uint64_t macram_flip_synd : 2; /**< [ 2: 1](R/W) Reserved.
+ Internal:
+ MAC RAM flip syndrome control bits. */
+ uint64_t macram_scrub : 1; /**< [ 3: 3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
+ should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
+ This is a one-shot operation; it automatically returns to 0 after a write to 1. */
+ uint64_t macram_scrub_done : 1; /**< [ 4: 4](RO/H) Maximum activate count memory scrub complete indication;
+ 1 means the memory has been scrubbed to all zero. */
+ uint64_t mac : 3; /**< [ 7: 5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
+ 0x0 = 100K.
+ 0x1 = 400K/2.
+ 0x2 = 500K/2.
+ 0x3 = 600K/2.
+ 0x4 = 700K/2.
+ 0x5 = 800K/2.
+ 0x6 = 900K/2.
+ 0x7 = 1000K/2. */
+ uint64_t trr_on : 1; /**< [ 8: 8](R/W) When set, this enables row activates counts of the
+ DRAM used in target row refresh mode. This bit can
+ be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
+ has a value of 1. */
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
+ L2C-LMC address bits are used to XOR the bank bits with.
+ 0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
+ 0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
+ 0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
+ 0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
+ 0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
+ 0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
+ 0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
+ 0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
+ 0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
+ 0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
+ 0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
+ 0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
+ 0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
+ 0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
+ 0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
+ 0xF: Reserved. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
+ signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](R/W) Enable automatic self-refresh mode.
+ This field should only be set after initialization.
+ When set, software must not issue self refesh enter commands (LMC_SEQ_SEL_E::SREF_ENTRY).
+
+ Internal:
+ FIXME The LMC_SEQ_SEL_E::SREF_ENTRY requirement can be removed in 98xx when bug28110 is closed. */
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](R/W) Self-refresh idle threshold.
+ Enter self-refresh mode after the memory controller has been idle for
+ 2^([SREF_AUTO_IDLE_THRES]-1) * TREFI.
+ Where TREFI time is controlled by LMC()_CONFIG[REF_ZQCS_INT]\<6:0\>.
+
+ 0x0 = Automatic self refresh interval is controlled by
+ 2^(2+LMC()_CONFIG[IDLEPOWER]) CK cycles instead. Self refresh has priority
+ over precharge power-down.
+
+ Only valid when LMC()_EXT_CONFIG2[SREF_AUTO_ENABLE] is set. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_ext_config2_cn81xx cn83xx; */
+ struct bdk_lmcx_ext_config2_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](RO) Reserved. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](RO) Reserved. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
+ signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
+ L2C-LMC address bits are used to XOR the bank bits with.
+ 0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
+ 0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
+ 0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
+ 0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
+ 0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
+ 0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
+ 0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
+ 0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
+ 0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
+ 0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
+ 0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
+ 0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
+ 0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
+ 0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
+ 0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
+ 0xF: Reserved. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t trr_on : 1; /**< [ 8: 8](R/W) When set, this enables row activates counts of the
+ DRAM used in target row refresh mode. This bit can
+ be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
+ has a value of 1. */
+ uint64_t mac : 3; /**< [ 7: 5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
+ 0x0 = 100K.
+ 0x1 = 400K/2.
+ 0x2 = 500K/2.
+ 0x3 = 600K/2.
+ 0x4 = 700K/2.
+ 0x5 = 800K/2.
+ 0x6 = 900K/2.
+ 0x7 = 1000K/2. */
+ uint64_t macram_scrub_done : 1; /**< [ 4: 4](RO/H) Maximum activate count memory scrub complete indication;
+ 1 means the memory has been scrubbed to all zero. */
+ uint64_t macram_scrub : 1; /**< [ 3: 3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
+ should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
+ This is a one-shot operation; it automatically returns to 0 after a write to 1. */
+ uint64_t macram_flip_synd : 2; /**< [ 2: 1](R/W) Reserved.
+ Internal:
+ MAC RAM flip syndrome control bits. */
+ uint64_t macram_cor_dis : 1; /**< [ 0: 0](R/W) Reserved.
+ Internal:
+ MAC RAM correction disable control. */
+#else /* Word 0 - Little Endian */
+ uint64_t macram_cor_dis : 1; /**< [ 0: 0](R/W) Reserved.
+ Internal:
+ MAC RAM correction disable control. */
+ uint64_t macram_flip_synd : 2; /**< [ 2: 1](R/W) Reserved.
+ Internal:
+ MAC RAM flip syndrome control bits. */
+ uint64_t macram_scrub : 1; /**< [ 3: 3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
+ should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
+ This is a one-shot operation; it automatically returns to 0 after a write to 1. */
+ uint64_t macram_scrub_done : 1; /**< [ 4: 4](RO/H) Maximum activate count memory scrub complete indication;
+ 1 means the memory has been scrubbed to all zero. */
+ uint64_t mac : 3; /**< [ 7: 5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
+ 0x0 = 100K.
+ 0x1 = 400K/2.
+ 0x2 = 500K/2.
+ 0x3 = 600K/2.
+ 0x4 = 700K/2.
+ 0x5 = 800K/2.
+ 0x6 = 900K/2.
+ 0x7 = 1000K/2. */
+ uint64_t trr_on : 1; /**< [ 8: 8](R/W) When set, this enables row activates counts of the
+ DRAM used in target row refresh mode. This bit can
+ be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
+ has a value of 1. */
+ uint64_t row_col_switch : 1; /**< [ 9: 9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
+ address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
+ The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
+ (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
+ It is recommended to set this bit to one when TRR_ON is set. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t xor_bank_sel : 4; /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
+ L2C-LMC address bits are used to XOR the bank bits with.
+ 0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
+ 0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
+ 0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
+ 0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
+ 0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
+ 0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
+ 0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
+ 0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
+ 0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
+ 0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
+ 0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
+ 0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
+ 0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
+ 0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
+ 0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
+ 0xF: Reserved. */
+ uint64_t early_dqx2 : 1; /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
+ signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
+ uint64_t delay_unload_r0 : 1; /**< [ 17: 17](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
+ uint64_t delay_unload_r1 : 1; /**< [ 18: 18](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
+ uint64_t delay_unload_r2 : 1; /**< [ 19: 19](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
+ uint64_t delay_unload_r3 : 1; /**< [ 20: 20](R/W) Reserved, MBZ.
+ Internal:
+ When set, unload the PHY silo one cycle later for Rank 0 reads.
+ Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
+ uint64_t sref_auto_enable : 1; /**< [ 21: 21](RO) Reserved. */
+ uint64_t sref_auto_idle_thres : 5; /**< [ 26: 22](RO) Reserved. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_lmcx_ext_config2 bdk_lmcx_ext_config2_t;
+
+static inline uint64_t BDK_LMCX_EXT_CONFIG2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_EXT_CONFIG2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000090ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000090ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000090ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000090ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_EXT_CONFIG2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_EXT_CONFIG2(a) bdk_lmcx_ext_config2_t
+#define bustype_BDK_LMCX_EXT_CONFIG2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_EXT_CONFIG2(a) "LMCX_EXT_CONFIG2"
+#define device_bar_BDK_LMCX_EXT_CONFIG2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_EXT_CONFIG2(a) (a)
+#define arguments_BDK_LMCX_EXT_CONFIG2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_fadr
+ *
+ * LMC Failing (SEC/DED/NXM) Address Register
+ * This register captures only the first transaction with ECC errors. A DED error can over-write
+ * this register with its failing addresses if the first error was a SEC. If you write
+ * LMC()_INT -\> SEC_ERR/DED_ERR, it clears the error bits and captures the next failing
+ * address. If FDIMM is 1, that means the error is in the high DIMM.
+ * LMC()_FADR captures the failing prescrambled address location (split into DIMM, bunk,
+ * bank, etc). If scrambling is off, then LMC()_FADR will also capture the failing physical
+ * location in the DRAM parts. LMC()_SCRAMBLED_FADR captures the actual failing address
+ * location in the physical DRAM parts, i.e.,
+ * * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical location in the
+ * DRAM parts (split into DIMM, bunk, bank, etc.)
+ * If scrambling is off, the prescramble and postscramble addresses are the same; and so the
+ * contents of LMC()_SCRAMBLED_FADR match the contents of LMC()_FADR.
+ */
+union bdk_lmcx_fadr
+{
+ uint64_t u;
+ struct bdk_lmcx_fadr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t fcid : 3; /**< [ 42: 40](RO/H) Reserved.
+ Internal:
+ Failing CID number. This field is only valid when interfacing with 3DS DRAMs (i.e., when
+ LMC()_EXT_CONFIG[DIMM0_CID] is nonzero). Returns a value of zero otherwise. */
+ uint64_t fill_order : 2; /**< [ 39: 38](RO/H) Fill order for failing transaction. */
+ uint64_t fdimm : 1; /**< [ 37: 37](RO/H) Failing DIMM number. */
+ uint64_t fbunk : 1; /**< [ 36: 36](RO/H) Failing rank number. */
+ uint64_t fbank : 4; /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
+ uint64_t frow : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
+ uint64_t fcol : 14; /**< [ 13: 0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 64b data that
+ had an ECC error, i.e. FCOL[0] is always 0. Can be used in conjunction with
+ LMC()_INT[DED_ERR] to isolate the 64b chunk of data in error. */
+#else /* Word 0 - Little Endian */
+ uint64_t fcol : 14; /**< [ 13: 0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 64b data that
+ had an ECC error, i.e. FCOL[0] is always 0. Can be used in conjunction with
+ LMC()_INT[DED_ERR] to isolate the 64b chunk of data in error. */
+ uint64_t frow : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
+ uint64_t fbank : 4; /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
+ uint64_t fbunk : 1; /**< [ 36: 36](RO/H) Failing rank number. */
+ uint64_t fdimm : 1; /**< [ 37: 37](RO/H) Failing DIMM number. */
+ uint64_t fill_order : 2; /**< [ 39: 38](RO/H) Fill order for failing transaction. */
+ uint64_t fcid : 3; /**< [ 42: 40](RO/H) Reserved.
+ Internal:
+ Failing CID number. This field is only valid when interfacing with 3DS DRAMs (i.e., when
+ LMC()_EXT_CONFIG[DIMM0_CID] is nonzero). Returns a value of zero otherwise. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_fadr_s cn81xx; */
+ struct bdk_lmcx_fadr_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t fcid : 3; /**< [ 42: 40](RO/H) Reserved.
+ Internal:
+ Failing CID number. This field is only valid when interfacing with 3DS DRAMs
+ (i.e. when either LMC()_EXT_CONFIG[DIMM0_CID] or LMC()_EXT_CONFIG[DIMM1_CID] is
+ nonzero). Returns a value of zero otherwise. */
+ uint64_t fill_order : 2; /**< [ 39: 38](RO/H) Fill order for failing transaction. */
+ uint64_t fdimm : 1; /**< [ 37: 37](RO/H) Failing DIMM number. */
+ uint64_t fbunk : 1; /**< [ 36: 36](RO/H) Failing rank number. */
+ uint64_t fbank : 4; /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
+ uint64_t frow : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
+ uint64_t fcol : 14; /**< [ 13: 0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 64b data that
+ had an ECC error, i.e. FCOL[0] is always 0. Can be used in conjunction with
+ LMC()_INT[DED_ERR] to isolate the 64b chunk of data in error. */
+#else /* Word 0 - Little Endian */
+ uint64_t fcol : 14; /**< [ 13: 0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 64b data that
+ had an ECC error, i.e. FCOL[0] is always 0. Can be used in conjunction with
+ LMC()_INT[DED_ERR] to isolate the 64b chunk of data in error. */
+ uint64_t frow : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
+ uint64_t fbank : 4; /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
+ uint64_t fbunk : 1; /**< [ 36: 36](RO/H) Failing rank number. */
+ uint64_t fdimm : 1; /**< [ 37: 37](RO/H) Failing DIMM number. */
+ uint64_t fill_order : 2; /**< [ 39: 38](RO/H) Fill order for failing transaction. */
+ uint64_t fcid : 3; /**< [ 42: 40](RO/H) Reserved.
+ Internal:
+ Failing CID number. This field is only valid when interfacing with 3DS DRAMs
+ (i.e. when either LMC()_EXT_CONFIG[DIMM0_CID] or LMC()_EXT_CONFIG[DIMM1_CID] is
+ nonzero). Returns a value of zero otherwise. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_lmcx_fadr_cn88xx cn83xx; */
+};
+typedef union bdk_lmcx_fadr bdk_lmcx_fadr_t;
+
+static inline uint64_t BDK_LMCX_FADR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_FADR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000020ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000020ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_FADR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_FADR(a) bdk_lmcx_fadr_t
+#define bustype_BDK_LMCX_FADR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_FADR(a) "LMCX_FADR"
+#define device_bar_BDK_LMCX_FADR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_FADR(a) (a)
+#define arguments_BDK_LMCX_FADR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ffe_ctle_ctl
+ *
+ * LMC FFE & CTLE Control Register
+ */
+union bdk_lmcx_ffe_ctle_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_ffe_ctle_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t ctle_mem_ld : 9; /**< [ 26: 18](WO) Reserved.
+ Internal:
+ When set, the CTLE settings are loaded into the chosen rank's byte CTLE
+ storage. Bits 18-26 correspond to bytes 0-7, bit 27 corresponds to ECC. The byte
+ targeted will load its corresponding value from the CSR
+ LMC()_FFE_CTLE_SETTINGS[CTLE*]. The rank is chosen by the CSR
+ LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a oneshot operation and clears itself each
+ time it is set. Note this has to be done during the bringup state where there
+ isn't yet any traffic to DRAM. */
+ uint64_t ffe_mem_ld : 9; /**< [ 17: 9](WO) Reserved.
+ Internal:
+ When set, the FFE settings are loaded into the chosen rank's byte FFE
+ storage. Bits 9-16 correspond to bytes 0-7, bit 17 corresponds to ECC. The byte
+ targeted will load its corresponding value from the CSR
+ LMC()_FFE_CTLE_SETTINGS[FFE*]. The rank is chosen by the CSR
+ LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a oneshot operation and clears itself each
+ time it is set. Note this has to be done during the bringup state where there
+ isn't yet any traffic to DRAM. */
+ uint64_t ffe_enable : 9; /**< [ 8: 0](R/W) When set, it enables the FFE feature per byte.
+ Bits 0-7 correspond to bytes 0-7, bit 8 corresponds to ECC. */
+#else /* Word 0 - Little Endian */
+ uint64_t ffe_enable : 9; /**< [ 8: 0](R/W) When set, it enables the FFE feature per byte.
+ Bits 0-7 correspond to bytes 0-7, bit 8 corresponds to ECC. */
+ uint64_t ffe_mem_ld : 9; /**< [ 17: 9](WO) Reserved.
+ Internal:
+ When set, the FFE settings are loaded into the chosen rank's byte FFE
+ storage. Bits 9-16 correspond to bytes 0-7, bit 17 corresponds to ECC. The byte
+ targeted will load its corresponding value from the CSR
+ LMC()_FFE_CTLE_SETTINGS[FFE*]. The rank is chosen by the CSR
+ LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a oneshot operation and clears itself each
+ time it is set. Note this has to be done during the bringup state where there
+ isn't yet any traffic to DRAM. */
+ uint64_t ctle_mem_ld : 9; /**< [ 26: 18](WO) Reserved.
+ Internal:
+ When set, the CTLE settings are loaded into the chosen rank's byte CTLE
+ storage. Bits 18-26 correspond to bytes 0-7, bit 27 corresponds to ECC. The byte
+ targeted will load its corresponding value from the CSR
+ LMC()_FFE_CTLE_SETTINGS[CTLE*]. The rank is chosen by the CSR
+ LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a oneshot operation and clears itself each
+ time it is set. Note this has to be done during the bringup state where there
+ isn't yet any traffic to DRAM. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_ffe_ctle_ctl_s cn; */
+};
+typedef union bdk_lmcx_ffe_ctle_ctl bdk_lmcx_ffe_ctle_ctl_t;
+
+static inline uint64_t BDK_LMCX_FFE_CTLE_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_FFE_CTLE_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880002f0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_FFE_CTLE_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_FFE_CTLE_CTL(a) bdk_lmcx_ffe_ctle_ctl_t
+#define bustype_BDK_LMCX_FFE_CTLE_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_FFE_CTLE_CTL(a) "LMCX_FFE_CTLE_CTL"
+#define device_bar_BDK_LMCX_FFE_CTLE_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_FFE_CTLE_CTL(a) (a)
+#define arguments_BDK_LMCX_FFE_CTLE_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ffe_ctle_settings
+ *
+ * LMC FFE & CTLE Settings Register
+ */
+union bdk_lmcx_ffe_ctle_settings
+{
+ uint64_t u;
+ struct bdk_lmcx_ffe_ctle_settings_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_45_63 : 19;
+ uint64_t ctle8 : 2; /**< [ 44: 43](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle7 : 2; /**< [ 42: 41](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle6 : 2; /**< [ 40: 39](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle5 : 2; /**< [ 38: 37](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle4 : 2; /**< [ 36: 35](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle3 : 2; /**< [ 34: 33](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle2 : 2; /**< [ 32: 31](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle1 : 2; /**< [ 30: 29](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle0 : 2; /**< [ 28: 27](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ffe8 : 3; /**< [ 26: 24](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe7 : 3; /**< [ 23: 21](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe6 : 3; /**< [ 20: 18](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe5 : 3; /**< [ 17: 15](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe4 : 3; /**< [ 14: 12](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe3 : 3; /**< [ 11: 9](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe2 : 3; /**< [ 8: 6](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe1 : 3; /**< [ 5: 3](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe0 : 3; /**< [ 2: 0](R/W) Provides FFE TX calibration store per byte per rank. */
+#else /* Word 0 - Little Endian */
+ uint64_t ffe0 : 3; /**< [ 2: 0](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe1 : 3; /**< [ 5: 3](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe2 : 3; /**< [ 8: 6](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe3 : 3; /**< [ 11: 9](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe4 : 3; /**< [ 14: 12](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe5 : 3; /**< [ 17: 15](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe6 : 3; /**< [ 20: 18](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe7 : 3; /**< [ 23: 21](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ffe8 : 3; /**< [ 26: 24](R/W) Provides FFE TX calibration store per byte per rank. */
+ uint64_t ctle0 : 2; /**< [ 28: 27](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle1 : 2; /**< [ 30: 29](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle2 : 2; /**< [ 32: 31](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle3 : 2; /**< [ 34: 33](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle4 : 2; /**< [ 36: 35](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle5 : 2; /**< [ 38: 37](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle6 : 2; /**< [ 40: 39](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle7 : 2; /**< [ 42: 41](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t ctle8 : 2; /**< [ 44: 43](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
+ uint64_t reserved_45_63 : 19;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_ffe_ctle_settings_s cn; */
+};
+typedef union bdk_lmcx_ffe_ctle_settings bdk_lmcx_ffe_ctle_settings_t;
+
+static inline uint64_t BDK_LMCX_FFE_CTLE_SETTINGS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_FFE_CTLE_SETTINGS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880002e0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_FFE_CTLE_SETTINGS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_FFE_CTLE_SETTINGS(a) bdk_lmcx_ffe_ctle_settings_t
+#define bustype_BDK_LMCX_FFE_CTLE_SETTINGS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_FFE_CTLE_SETTINGS(a) "LMCX_FFE_CTLE_SETTINGS"
+#define device_bar_BDK_LMCX_FFE_CTLE_SETTINGS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_FFE_CTLE_SETTINGS(a) (a)
+#define arguments_BDK_LMCX_FFE_CTLE_SETTINGS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_general_purpose0
+ *
+ * LMC General Purpose Register
+ */
+union bdk_lmcx_general_purpose0
+{
+ uint64_t u;
+ struct bdk_lmcx_general_purpose0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) General purpose data register. See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) General purpose data register. See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_general_purpose0_s cn; */
+};
+typedef union bdk_lmcx_general_purpose0 bdk_lmcx_general_purpose0_t;
+
+static inline uint64_t BDK_LMCX_GENERAL_PURPOSE0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_GENERAL_PURPOSE0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000340ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000340ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000340ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000340ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_GENERAL_PURPOSE0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_GENERAL_PURPOSE0(a) bdk_lmcx_general_purpose0_t
+#define bustype_BDK_LMCX_GENERAL_PURPOSE0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_GENERAL_PURPOSE0(a) "LMCX_GENERAL_PURPOSE0"
+#define device_bar_BDK_LMCX_GENERAL_PURPOSE0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_GENERAL_PURPOSE0(a) (a)
+#define arguments_BDK_LMCX_GENERAL_PURPOSE0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_general_purpose1
+ *
+ * LMC General Purpose 1 Register
+ */
+union bdk_lmcx_general_purpose1
+{
+ uint64_t u;
+ struct bdk_lmcx_general_purpose1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) General purpose data register. See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) General purpose data register. See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_general_purpose1_s cn; */
+};
+typedef union bdk_lmcx_general_purpose1 bdk_lmcx_general_purpose1_t;
+
+static inline uint64_t BDK_LMCX_GENERAL_PURPOSE1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_GENERAL_PURPOSE1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000348ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000348ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000348ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000348ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_GENERAL_PURPOSE1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_GENERAL_PURPOSE1(a) bdk_lmcx_general_purpose1_t
+#define bustype_BDK_LMCX_GENERAL_PURPOSE1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_GENERAL_PURPOSE1(a) "LMCX_GENERAL_PURPOSE1"
+#define device_bar_BDK_LMCX_GENERAL_PURPOSE1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_GENERAL_PURPOSE1(a) (a)
+#define arguments_BDK_LMCX_GENERAL_PURPOSE1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_general_purpose2
+ *
+ * LMC General Purpose 2 Register
+ */
+union bdk_lmcx_general_purpose2
+{
+ uint64_t u;
+ struct bdk_lmcx_general_purpose2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t data : 16; /**< [ 15: 0](R/W) General purpose data register. See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 16; /**< [ 15: 0](R/W) General purpose data register. See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_general_purpose2_s cn; */
+};
+typedef union bdk_lmcx_general_purpose2 bdk_lmcx_general_purpose2_t;
+
+static inline uint64_t BDK_LMCX_GENERAL_PURPOSE2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_GENERAL_PURPOSE2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000350ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000350ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000350ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000350ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_GENERAL_PURPOSE2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_GENERAL_PURPOSE2(a) bdk_lmcx_general_purpose2_t
+#define bustype_BDK_LMCX_GENERAL_PURPOSE2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_GENERAL_PURPOSE2(a) "LMCX_GENERAL_PURPOSE2"
+#define device_bar_BDK_LMCX_GENERAL_PURPOSE2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_GENERAL_PURPOSE2(a) (a)
+#define arguments_BDK_LMCX_GENERAL_PURPOSE2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ifb_cnt
+ *
+ * LMC IFB Performance Counter Register
+ */
+union bdk_lmcx_ifb_cnt
+{
+ uint64_t u;
+ struct bdk_lmcx_ifb_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ifbcnt : 64; /**< [ 63: 0](RO/H) Performance counter. 64-bit counter that increments every CK cycle that there is something
+ in the in-flight buffer. */
+#else /* Word 0 - Little Endian */
+ uint64_t ifbcnt : 64; /**< [ 63: 0](RO/H) Performance counter. 64-bit counter that increments every CK cycle that there is something
+ in the in-flight buffer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_ifb_cnt_s cn; */
+};
+typedef union bdk_lmcx_ifb_cnt bdk_lmcx_ifb_cnt_t;
+
+static inline uint64_t BDK_LMCX_IFB_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_IFB_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001d0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001d0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001d0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001d0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_IFB_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_IFB_CNT(a) bdk_lmcx_ifb_cnt_t
+#define bustype_BDK_LMCX_IFB_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_IFB_CNT(a) "LMCX_IFB_CNT"
+#define device_bar_BDK_LMCX_IFB_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_IFB_CNT(a) (a)
+#define arguments_BDK_LMCX_IFB_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_int
+ *
+ * LMC Interrupt Register
+ * This register contains the different interrupt-summary bits of the LMC.
+ */
+union bdk_lmcx_int
+{
+ uint64_t u;
+ struct bdk_lmcx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1C/H) Error threshold set by LMC()_RETRY_CONFIG[MAX_ERRORS] is reached. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reserved. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reserved. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt.
+ Asserts whenever the corresponding DDR*_ERROR_ALERT_L pin (e.g. DDR4 ALERT_n)
+ asserts.
+
+ If LMC is auto-retrying address parity and/or write CRC errors, i.e. if
+ LMC()_RETRY_CONFIG[RETRY_ENABLE,AUTO_ERROR_CONTINUE]=1,1
+ (LMC()_MODEREG_PARAMS3[CA_PAR_PERS] should also be set - the DRAM should
+ be in persistent parity error mode), then the DDR_ERR interrupt routine
+ should:
+ \<pre\>
+ X=LMC()_RETRY_STATUS[ERROR_COUNT]
+ do {
+ Y = X
+ Wait approximately 100ns
+ Write a one to [DDR_ERR] to clear it (if set)
+ X = LMC()_RETRY_STATUS[ERROR_COUNT]
+ } while (X != Y);
+ Write LMC()_RETRY_STATUS[CLEAR_ERROR_COUNT]=1 (to clear
+ LMC()_RETRY_STATUS[ERROR_COUNT])
+ \</pre\>
+
+ If X \< LMC()_RETRY_CONFIG[MAX_ERRORS] after this sequence, assume that
+ the hardware successfully corrected the error - software may
+ choose to count the number of these errors. Else consider the error
+ to be uncorrected and possibly fatal.
+
+ Otherwise, if LMC is not auto-retrying, a [DDR_ERR] error may always be
+ considered fatal. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reserved. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reserved. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Double-bit error detected on a DRAM read. Generally an indication of DRAM
+ corruption and may be considered fatal.
+
+ In 64b mode:
+ _ \<5\> corresponds to DQ[63:0]_c0_p0.
+ _ \<6\> corresponds to DQ[63:0]_c0_p1.
+ _ \<7\> corresponds to DQ[63:0]_c1_p0.
+ _ \<8\> corresponds to DQ[63:0]_c1_p1.
+ _ where _cC_pP denotes cycle C and phase P.
+
+ In 32b mode, each bit corresponds to 2 phases:
+ _ \<5\> corresponds to DQ[31:0]_c0_p1/0.
+ _ \<6\> corresponds to DQ[31:0]_c1_p1/0.
+ _ \<7\> corresponds to DQ[31:0]_c2_p1/0.
+ _ \<8\> corresponds to DQ[31:0]_c3_p1/0. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
+ but LMC()_EXT_CONFIG[L2C_NXM_RD,L2C_NXM_WR] actually determine whether NXM reads and
+ writes (respectively) participate in [NXM_WR_ERR]. NXM writes are generally an indication
+ of failure. When [LMC()_NXM_FADR] is set, LMC()_NXM_FADR indicates the NXM address. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
+ but LMC()_EXT_CONFIG[L2C_NXM_RD,L2C_NXM_WR] actually determine whether NXM reads and
+ writes (respectively) participate in [NXM_WR_ERR]. NXM writes are generally an indication
+ of failure. When [LMC()_NXM_FADR] is set, LMC()_NXM_FADR indicates the NXM address. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Double-bit error detected on a DRAM read. Generally an indication of DRAM
+ corruption and may be considered fatal.
+
+ In 64b mode:
+ _ \<5\> corresponds to DQ[63:0]_c0_p0.
+ _ \<6\> corresponds to DQ[63:0]_c0_p1.
+ _ \<7\> corresponds to DQ[63:0]_c1_p0.
+ _ \<8\> corresponds to DQ[63:0]_c1_p1.
+ _ where _cC_pP denotes cycle C and phase P.
+
+ In 32b mode, each bit corresponds to 2 phases:
+ _ \<5\> corresponds to DQ[31:0]_c0_p1/0.
+ _ \<6\> corresponds to DQ[31:0]_c1_p1/0.
+ _ \<7\> corresponds to DQ[31:0]_c2_p1/0.
+ _ \<8\> corresponds to DQ[31:0]_c3_p1/0. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reserved. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reserved. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt.
+ Asserts whenever the corresponding DDR*_ERROR_ALERT_L pin (e.g. DDR4 ALERT_n)
+ asserts.
+
+ If LMC is auto-retrying address parity and/or write CRC errors, i.e. if
+ LMC()_RETRY_CONFIG[RETRY_ENABLE,AUTO_ERROR_CONTINUE]=1,1
+ (LMC()_MODEREG_PARAMS3[CA_PAR_PERS] should also be set - the DRAM should
+ be in persistent parity error mode), then the DDR_ERR interrupt routine
+ should:
+ \<pre\>
+ X=LMC()_RETRY_STATUS[ERROR_COUNT]
+ do {
+ Y = X
+ Wait approximately 100ns
+ Write a one to [DDR_ERR] to clear it (if set)
+ X = LMC()_RETRY_STATUS[ERROR_COUNT]
+ } while (X != Y);
+ Write LMC()_RETRY_STATUS[CLEAR_ERROR_COUNT]=1 (to clear
+ LMC()_RETRY_STATUS[ERROR_COUNT])
+ \</pre\>
+
+ If X \< LMC()_RETRY_CONFIG[MAX_ERRORS] after this sequence, assume that
+ the hardware successfully corrected the error - software may
+ choose to count the number of these errors. Else consider the error
+ to be uncorrected and possibly fatal.
+
+ Otherwise, if LMC is not auto-retrying, a [DDR_ERR] error may always be
+ considered fatal. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reserved. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reserved. */
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1C/H) Error threshold set by LMC()_RETRY_CONFIG[MAX_ERRORS] is reached. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_int_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reserved. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reserved. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt.
+ Asserts whenever the corresponding DDR*_ERROR_ALERT_L pin (e.g. DDR4 ALERT_n)
+ asserts.
+
+ If LMC is auto-retrying address parity and/or write CRC errors, i.e. if
+ LMC()_RETRY_CONFIG[RETRY_ENABLE,AUTO_ERROR_CONTINUE]=1,1
+ (LMC()_MODEREG_PARAMS3[CA_PAR_PERS] should also be set - the DRAM should
+ be in persistent parity error mode), then the DDR_ERR interrupt routine
+ should:
+ \<pre\>
+ X=LMC()_RETRY_STATUS[ERROR_COUNT]
+ do {
+ Y = X
+ Wait approximately 100ns
+ Write a one to [DDR_ERR] to clear it (if set)
+ X = LMC()_RETRY_STATUS[ERROR_COUNT]
+ } while (X != Y);
+ Write LMC()_RETRY_STATUS[CLEAR_ERROR_COUNT]=1 (to clear
+ LMC()_RETRY_STATUS[ERROR_COUNT])
+ \</pre\>
+
+ If X \< LMC()_RETRY_CONFIG[MAX_ERRORS] after this sequence, assume that
+ the hardware successfully corrected the error - software may
+ choose to count the number of these errors. Else consider the error
+ to be uncorrected and possibly fatal.
+
+ Otherwise, if LMC is not auto-retrying, a [DDR_ERR] error may always be
+ considered fatal. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reserved. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reserved. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Double-bit error detected on a DRAM read. Generally an indication of DRAM
+ corruption and may be considered fatal.
+
+ In 64b mode:
+ _ \<5\> corresponds to DQ[63:0]_c0_p0.
+ _ \<6\> corresponds to DQ[63:0]_c0_p1.
+ _ \<7\> corresponds to DQ[63:0]_c1_p0.
+ _ \<8\> corresponds to DQ[63:0]_c1_p1.
+ _ where _cC_pP denotes cycle C and phase P.
+
+ In 32b mode, each bit corresponds to 2 phases:
+ _ \<5\> corresponds to DQ[31:0]_c0_p1/0.
+ _ \<6\> corresponds to DQ[31:0]_c1_p1/0.
+ _ \<7\> corresponds to DQ[31:0]_c2_p1/0.
+ _ \<8\> corresponds to DQ[31:0]_c3_p1/0. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1C/H) Single-bit error detected on a DRAM read.
+ When any of [SEC_ERR\<3:0\>] are set, hardware corrected the error before using the value,
+ but did not correct any stored value. When any of [SEC_ERR\<3:0\>] are set, software should
+ scrub the memory location whose address is in LMC()_SCRAMBLED_FADR before clearing the
+ [SEC_ERR\<3:0\>] bits. Otherwise, hardware may encounter the error again the next time the
+ same memory location is referenced. We recommend that the entire 128-byte cache block be
+ scrubbed via load-exclusive/store-release instructions, but other methods are possible.
+ Software may also choose to count the number of these single-bit errors.
+
+ In 64b mode:
+ _ \<1\> corresponds to DQ[63:0]_c0_p0.
+ _ \<2\> corresponds to DQ[63:0]_c0_p1.
+ _ \<3\> corresponds to DQ[63:0]_c1_p0.
+ _ \<4\> corresponds to DQ[63:0]_c1_p1.
+ _ where _cC_pP denotes cycle C and phase P.
+
+ In 32b mode, each bit corresponds to 2 phases:
+ _ \<1\> corresponds to DQ[31:0]_c0_p1/0.
+ _ \<2\> corresponds to DQ[31:0]_c1_p1/0.
+ _ \<3\> corresponds to DQ[31:0]_c2_p1/0.
+ _ \<4\> corresponds to DQ[31:0]_c3_p1/0. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
+ but LMC()_EXT_CONFIG[L2C_NXM_RD,L2C_NXM_WR] actually determine whether NXM reads and
+ writes (respectively) participate in [NXM_WR_ERR]. NXM writes are generally an indication
+ of failure. When [LMC()_NXM_FADR] is set, LMC()_NXM_FADR indicates the NXM address. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
+ but LMC()_EXT_CONFIG[L2C_NXM_RD,L2C_NXM_WR] actually determine whether NXM reads and
+ writes (respectively) participate in [NXM_WR_ERR]. NXM writes are generally an indication
+ of failure. When [LMC()_NXM_FADR] is set, LMC()_NXM_FADR indicates the NXM address. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1C/H) Single-bit error detected on a DRAM read.
+ When any of [SEC_ERR\<3:0\>] are set, hardware corrected the error before using the value,
+ but did not correct any stored value. When any of [SEC_ERR\<3:0\>] are set, software should
+ scrub the memory location whose address is in LMC()_SCRAMBLED_FADR before clearing the
+ [SEC_ERR\<3:0\>] bits. Otherwise, hardware may encounter the error again the next time the
+ same memory location is referenced. We recommend that the entire 128-byte cache block be
+ scrubbed via load-exclusive/store-release instructions, but other methods are possible.
+ Software may also choose to count the number of these single-bit errors.
+
+ In 64b mode:
+ _ \<1\> corresponds to DQ[63:0]_c0_p0.
+ _ \<2\> corresponds to DQ[63:0]_c0_p1.
+ _ \<3\> corresponds to DQ[63:0]_c1_p0.
+ _ \<4\> corresponds to DQ[63:0]_c1_p1.
+ _ where _cC_pP denotes cycle C and phase P.
+
+ In 32b mode, each bit corresponds to 2 phases:
+ _ \<1\> corresponds to DQ[31:0]_c0_p1/0.
+ _ \<2\> corresponds to DQ[31:0]_c1_p1/0.
+ _ \<3\> corresponds to DQ[31:0]_c2_p1/0.
+ _ \<4\> corresponds to DQ[31:0]_c3_p1/0. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Double-bit error detected on a DRAM read. Generally an indication of DRAM
+ corruption and may be considered fatal.
+
+ In 64b mode:
+ _ \<5\> corresponds to DQ[63:0]_c0_p0.
+ _ \<6\> corresponds to DQ[63:0]_c0_p1.
+ _ \<7\> corresponds to DQ[63:0]_c1_p0.
+ _ \<8\> corresponds to DQ[63:0]_c1_p1.
+ _ where _cC_pP denotes cycle C and phase P.
+
+ In 32b mode, each bit corresponds to 2 phases:
+ _ \<5\> corresponds to DQ[31:0]_c0_p1/0.
+ _ \<6\> corresponds to DQ[31:0]_c1_p1/0.
+ _ \<7\> corresponds to DQ[31:0]_c2_p1/0.
+ _ \<8\> corresponds to DQ[31:0]_c3_p1/0. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reserved. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reserved. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt.
+ Asserts whenever the corresponding DDR*_ERROR_ALERT_L pin (e.g. DDR4 ALERT_n)
+ asserts.
+
+ If LMC is auto-retrying address parity and/or write CRC errors, i.e. if
+ LMC()_RETRY_CONFIG[RETRY_ENABLE,AUTO_ERROR_CONTINUE]=1,1
+ (LMC()_MODEREG_PARAMS3[CA_PAR_PERS] should also be set - the DRAM should
+ be in persistent parity error mode), then the DDR_ERR interrupt routine
+ should:
+ \<pre\>
+ X=LMC()_RETRY_STATUS[ERROR_COUNT]
+ do {
+ Y = X
+ Wait approximately 100ns
+ Write a one to [DDR_ERR] to clear it (if set)
+ X = LMC()_RETRY_STATUS[ERROR_COUNT]
+ } while (X != Y);
+ Write LMC()_RETRY_STATUS[CLEAR_ERROR_COUNT]=1 (to clear
+ LMC()_RETRY_STATUS[ERROR_COUNT])
+ \</pre\>
+
+ If X \< LMC()_RETRY_CONFIG[MAX_ERRORS] after this sequence, assume that
+ the hardware successfully corrected the error - software may
+ choose to count the number of these errors. Else consider the error
+ to be uncorrected and possibly fatal.
+
+ Otherwise, if LMC is not auto-retrying, a [DDR_ERR] error may always be
+ considered fatal. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reserved. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reserved. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_int_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1C/H) Error threshold set by LMC()_RETRY_CONFIG[MAX_ERRORS] is reached. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt. */
+ uint64_t reserved_3_10 : 8;
+ uint64_t ref_pend_max1 : 1; /**< [ 2: 2](R/W1C/H) Indicates that the number of pending refreshes has reached 7.
+ Software needs to clear the interrupt before a new max can be detected.
+ This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
+ uint64_t ref_pend_max0 : 1; /**< [ 1: 1](R/W1C/H) Indicates that the number of pending refreshes has reached 7.
+ Software needs to clear the interrupt before a new max can be detected.
+ This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
+ but LMC()_EXT_CONFIG[L2C_NXM_RD], LMC()_EXT_CONFIG[L2C_NXM_WR] actually
+ determine whether NXM reads and writes (respectively) participate in
+ [NXM_WR_ERR]. NXM writes are generally an indication of failure. LMC()_NXM_FADR
+ indicates the NXM address. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
+ but LMC()_EXT_CONFIG[L2C_NXM_RD], LMC()_EXT_CONFIG[L2C_NXM_WR] actually
+ determine whether NXM reads and writes (respectively) participate in
+ [NXM_WR_ERR]. NXM writes are generally an indication of failure. LMC()_NXM_FADR
+ indicates the NXM address. */
+ uint64_t ref_pend_max0 : 1; /**< [ 1: 1](R/W1C/H) Indicates that the number of pending refreshes has reached 7.
+ Software needs to clear the interrupt before a new max can be detected.
+ This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
+ uint64_t ref_pend_max1 : 1; /**< [ 2: 2](R/W1C/H) Indicates that the number of pending refreshes has reached 7.
+ Software needs to clear the interrupt before a new max can be detected.
+ This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
+ uint64_t reserved_3_10 : 8;
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1C/H) Error threshold set by LMC()_RETRY_CONFIG[MAX_ERRORS] is reached. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_int bdk_lmcx_int_t;
+
+static inline uint64_t BDK_LMCX_INT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_INT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001f0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001f0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001f0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001f0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_INT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_INT(a) bdk_lmcx_int_t
+#define bustype_BDK_LMCX_INT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_INT(a) "LMCX_INT"
+#define device_bar_BDK_LMCX_INT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_INT(a) (a)
+#define arguments_BDK_LMCX_INT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_int_en
+ *
+ * INTERNAL: LMC Legacy Interrupt Enable Register
+ *
+ * Internal:
+ * Deprecated and unused CSR.
+ */
+union bdk_lmcx_int_en
+{
+ uint64_t u;
+ struct bdk_lmcx_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t ddr_error_alert_ena : 1; /**< [ 5: 5](R/W) DDR4 error alert interrupt enable bit. */
+ uint64_t dlcram_ded_ena : 1; /**< [ 4: 4](R/W) DLC RAM ECC double error detect (DED) interrupt enable bit. */
+ uint64_t dlcram_sec_ena : 1; /**< [ 3: 3](R/W) DLC RAM ECC single error correct (SEC) interrupt enable bit. */
+ uint64_t intr_ded_ena : 1; /**< [ 2: 2](R/W) ECC double error detect (DED) interrupt enable bit. When set, the memory controller raises
+ a processor interrupt on detecting an uncorrectable double-bit ECC error. */
+ uint64_t intr_sec_ena : 1; /**< [ 1: 1](R/W) ECC single error correct (SEC) interrupt enable bit. When set, the memory controller
+ raises a processor interrupt on detecting a correctable single-bit ECC error. */
+ uint64_t intr_nxm_wr_ena : 1; /**< [ 0: 0](R/W) Nonwrite error interrupt enable bit. When set, the memory controller raises a processor
+ interrupt on detecting an nonexistent memory write. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr_nxm_wr_ena : 1; /**< [ 0: 0](R/W) Nonwrite error interrupt enable bit. When set, the memory controller raises a processor
+ interrupt on detecting an nonexistent memory write. */
+ uint64_t intr_sec_ena : 1; /**< [ 1: 1](R/W) ECC single error correct (SEC) interrupt enable bit. When set, the memory controller
+ raises a processor interrupt on detecting a correctable single-bit ECC error. */
+ uint64_t intr_ded_ena : 1; /**< [ 2: 2](R/W) ECC double error detect (DED) interrupt enable bit. When set, the memory controller raises
+ a processor interrupt on detecting an uncorrectable double-bit ECC error. */
+ uint64_t dlcram_sec_ena : 1; /**< [ 3: 3](R/W) DLC RAM ECC single error correct (SEC) interrupt enable bit. */
+ uint64_t dlcram_ded_ena : 1; /**< [ 4: 4](R/W) DLC RAM ECC double error detect (DED) interrupt enable bit. */
+ uint64_t ddr_error_alert_ena : 1; /**< [ 5: 5](R/W) DDR4 error alert interrupt enable bit. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_int_en_s cn; */
+};
+typedef union bdk_lmcx_int_en bdk_lmcx_int_en_t;
+
+static inline uint64_t BDK_LMCX_INT_EN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_INT_EN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001e8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_INT_EN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_INT_EN(a) bdk_lmcx_int_en_t
+#define bustype_BDK_LMCX_INT_EN(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_INT_EN(a) "LMCX_INT_EN"
+#define device_bar_BDK_LMCX_INT_EN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_INT_EN(a) (a)
+#define arguments_BDK_LMCX_INT_EN(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_int_ena_w1c
+ *
+ * LMC Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_lmcx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_lmcx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DED_ERR]. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[NXM_WR_ERR]. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_int_ena_w1c_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ERR]. */
+ uint64_t reserved_3_10 : 8;
+ uint64_t ref_pend_max1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[REF_PEND_MAX1]. */
+ uint64_t ref_pend_max0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[REF_PEND_MAX0]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[NXM_WR_ERR]. */
+ uint64_t ref_pend_max0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[REF_PEND_MAX0]. */
+ uint64_t ref_pend_max1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[REF_PEND_MAX1]. */
+ uint64_t reserved_3_10 : 8;
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ERR]. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for LMC(0)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Reads or clears enable for LMC(0)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1C/H) Reads or clears enable for LMC(0)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1C/H) Reads or clears enable for LMC(0)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Reads or clears enable for LMC(0)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for LMC(0)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_lmcx_int_ena_w1c_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_lmcx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_int_ena_w1c bdk_lmcx_int_ena_w1c_t;
+
+static inline uint64_t BDK_LMCX_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000158ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000158ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000158ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000158ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_INT_ENA_W1C(a) bdk_lmcx_int_ena_w1c_t
+#define bustype_BDK_LMCX_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_INT_ENA_W1C(a) "LMCX_INT_ENA_W1C"
+#define device_bar_BDK_LMCX_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_INT_ENA_W1C(a) (a)
+#define arguments_BDK_LMCX_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_int_ena_w1s
+ *
+ * LMC Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_lmcx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_lmcx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DED_ERR]. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[NXM_WR_ERR]. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_int_ena_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ERR]. */
+ uint64_t reserved_3_10 : 8;
+ uint64_t ref_pend_max1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[REF_PEND_MAX1]. */
+ uint64_t ref_pend_max0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[REF_PEND_MAX0]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[NXM_WR_ERR]. */
+ uint64_t ref_pend_max0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[REF_PEND_MAX0]. */
+ uint64_t ref_pend_max1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[REF_PEND_MAX1]. */
+ uint64_t reserved_3_10 : 8;
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ERR]. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for LMC(0)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets enable for LMC(0)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets enable for LMC(0)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets enable for LMC(0)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets enable for LMC(0)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for LMC(0)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_lmcx_int_ena_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_lmcx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_int_ena_w1s bdk_lmcx_int_ena_w1s_t;
+
+static inline uint64_t BDK_LMCX_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000160ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000160ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000160ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000160ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_INT_ENA_W1S(a) bdk_lmcx_int_ena_w1s_t
+#define bustype_BDK_LMCX_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_INT_ENA_W1S(a) "LMCX_INT_ENA_W1S"
+#define device_bar_BDK_LMCX_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_INT_ENA_W1S(a) (a)
+#define arguments_BDK_LMCX_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_int_w1s
+ *
+ * LMC Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_lmcx_int_w1s
+{
+ uint64_t u;
+ struct bdk_lmcx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets LMC(0..3)_INT[DED_ERR]. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0..3)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0..3)_INT[NXM_WR_ERR]. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets LMC(0..3)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_int_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ERR]. */
+ uint64_t reserved_3_10 : 8;
+ uint64_t ref_pend_max1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets LMC(0..2)_INT[REF_PEND_MAX1]. */
+ uint64_t ref_pend_max0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets LMC(0..2)_INT[REF_PEND_MAX0]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0..2)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0..2)_INT[NXM_WR_ERR]. */
+ uint64_t ref_pend_max0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets LMC(0..2)_INT[REF_PEND_MAX0]. */
+ uint64_t ref_pend_max1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets LMC(0..2)_INT[REF_PEND_MAX1]. */
+ uint64_t reserved_3_10 : 8;
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ERR]. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t ddr_alert_sat : 1; /**< [ 14: 14](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ALERT_SAT]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_int_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets LMC(0)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets LMC(0)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets LMC(0)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets LMC(0)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets LMC(0)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets LMC(0)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_lmcx_int_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets LMC(0..3)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets LMC(0..3)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0..3)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0..3)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets LMC(0..3)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets LMC(0..3)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..3)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_lmcx_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..1)_INT[MACRAM_DED_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..1)_INT[MACRAM_SEC_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..1)_INT[DDR_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..1)_INT[DLCRAM_DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets LMC(0..1)_INT[DED_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets LMC(0..1)_INT[SEC_ERR]. */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0..1)_INT[NXM_WR_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_wr_err : 1; /**< [ 0: 0](R/W1S/H) Reads or sets LMC(0..1)_INT[NXM_WR_ERR]. */
+ uint64_t sec_err : 4; /**< [ 4: 1](R/W1S/H) Reads or sets LMC(0..1)_INT[SEC_ERR]. */
+ uint64_t ded_err : 4; /**< [ 8: 5](R/W1S/H) Reads or sets LMC(0..1)_INT[DED_ERR]. */
+ uint64_t dlcram_sec_err : 1; /**< [ 9: 9](R/W1S/H) Reads or sets LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
+ uint64_t dlcram_ded_err : 1; /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..1)_INT[DLCRAM_DED_ERR]. */
+ uint64_t ddr_err : 1; /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..1)_INT[DDR_ERR]. */
+ uint64_t macram_sec_err : 1; /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..1)_INT[MACRAM_SEC_ERR]. */
+ uint64_t macram_ded_err : 1; /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..1)_INT[MACRAM_DED_ERR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_int_w1s bdk_lmcx_int_w1s_t;
+
+static inline uint64_t BDK_LMCX_INT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_INT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000150ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000150ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000150ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000150ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_INT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_INT_W1S(a) bdk_lmcx_int_w1s_t
+#define bustype_BDK_LMCX_INT_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_INT_W1S(a) "LMCX_INT_W1S"
+#define device_bar_BDK_LMCX_INT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_INT_W1S(a) (a)
+#define arguments_BDK_LMCX_INT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_lane#_crc_swiz
+ *
+ * LMC MR Write Control Register
+ * This register contains the CRC bit swizzle for even and odd ranks.
+ */
+union bdk_lmcx_lanex_crc_swiz
+{
+ uint64_t u;
+ struct bdk_lmcx_lanex_crc_swiz_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t r1_swiz7 : 3; /**< [ 55: 53](R/W) Bit select for odd rank, bit 7. */
+ uint64_t r1_swiz6 : 3; /**< [ 52: 50](R/W) Bit select for odd rank, bit 6. */
+ uint64_t r1_swiz5 : 3; /**< [ 49: 47](R/W) Bit select for odd rank, bit 5. */
+ uint64_t r1_swiz4 : 3; /**< [ 46: 44](R/W) Bit select for odd rank, bit 4. */
+ uint64_t r1_swiz3 : 3; /**< [ 43: 41](R/W) Bit select for odd rank, bit 3. */
+ uint64_t r1_swiz2 : 3; /**< [ 40: 38](R/W) Bit select for odd rank, bit 2. */
+ uint64_t r1_swiz1 : 3; /**< [ 37: 35](R/W) Bit select for odd rank, bit 1. */
+ uint64_t r1_swiz0 : 3; /**< [ 34: 32](R/W) Bit select for odd rank, bit 0. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t r0_swiz7 : 3; /**< [ 23: 21](R/W) Bit select for even rank, bit 7. */
+ uint64_t r0_swiz6 : 3; /**< [ 20: 18](R/W) Bit select for even rank, bit 6. */
+ uint64_t r0_swiz5 : 3; /**< [ 17: 15](R/W) Bit select for even rank, bit 5. */
+ uint64_t r0_swiz4 : 3; /**< [ 14: 12](R/W) Bit select for even rank, bit 4. */
+ uint64_t r0_swiz3 : 3; /**< [ 11: 9](R/W) Bit select for even rank, bit 3. */
+ uint64_t r0_swiz2 : 3; /**< [ 8: 6](R/W) Bit select for even rank, bit 2. */
+ uint64_t r0_swiz1 : 3; /**< [ 5: 3](R/W) Bit select for even rank, bit 1. */
+ uint64_t r0_swiz0 : 3; /**< [ 2: 0](R/W) Bit select for even rank, bit 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t r0_swiz0 : 3; /**< [ 2: 0](R/W) Bit select for even rank, bit 0. */
+ uint64_t r0_swiz1 : 3; /**< [ 5: 3](R/W) Bit select for even rank, bit 1. */
+ uint64_t r0_swiz2 : 3; /**< [ 8: 6](R/W) Bit select for even rank, bit 2. */
+ uint64_t r0_swiz3 : 3; /**< [ 11: 9](R/W) Bit select for even rank, bit 3. */
+ uint64_t r0_swiz4 : 3; /**< [ 14: 12](R/W) Bit select for even rank, bit 4. */
+ uint64_t r0_swiz5 : 3; /**< [ 17: 15](R/W) Bit select for even rank, bit 5. */
+ uint64_t r0_swiz6 : 3; /**< [ 20: 18](R/W) Bit select for even rank, bit 6. */
+ uint64_t r0_swiz7 : 3; /**< [ 23: 21](R/W) Bit select for even rank, bit 7. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t r1_swiz0 : 3; /**< [ 34: 32](R/W) Bit select for odd rank, bit 0. */
+ uint64_t r1_swiz1 : 3; /**< [ 37: 35](R/W) Bit select for odd rank, bit 1. */
+ uint64_t r1_swiz2 : 3; /**< [ 40: 38](R/W) Bit select for odd rank, bit 2. */
+ uint64_t r1_swiz3 : 3; /**< [ 43: 41](R/W) Bit select for odd rank, bit 3. */
+ uint64_t r1_swiz4 : 3; /**< [ 46: 44](R/W) Bit select for odd rank, bit 4. */
+ uint64_t r1_swiz5 : 3; /**< [ 49: 47](R/W) Bit select for odd rank, bit 5. */
+ uint64_t r1_swiz6 : 3; /**< [ 52: 50](R/W) Bit select for odd rank, bit 6. */
+ uint64_t r1_swiz7 : 3; /**< [ 55: 53](R/W) Bit select for odd rank, bit 7. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_lanex_crc_swiz_s cn; */
+};
+typedef union bdk_lmcx_lanex_crc_swiz bdk_lmcx_lanex_crc_swiz_t;
+
+static inline uint64_t BDK_LMCX_LANEX_CRC_SWIZ(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_LANEX_CRC_SWIZ(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=8)))
+ return 0x87e088000380ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=8)))
+ return 0x87e088000380ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=8)))
+ return 0x87e088000380ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=8)))
+ return 0x87e088000380ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("LMCX_LANEX_CRC_SWIZ", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_LANEX_CRC_SWIZ(a,b) bdk_lmcx_lanex_crc_swiz_t
+#define bustype_BDK_LMCX_LANEX_CRC_SWIZ(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_LANEX_CRC_SWIZ(a,b) "LMCX_LANEX_CRC_SWIZ"
+#define device_bar_BDK_LMCX_LANEX_CRC_SWIZ(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_LANEX_CRC_SWIZ(a,b) (a)
+#define arguments_BDK_LMCX_LANEX_CRC_SWIZ(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_modereg_params0
+ *
+ * LMC Mode Register Parameters 0 Register
+ * These parameters are written into the DDR4 MR0, MR1, MR2 and MR3 registers.
+ */
+union bdk_lmcx_modereg_params0
+{
+ uint64_t u;
+ struct bdk_lmcx_modereg_params0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t wrp_ext : 1; /**< [ 27: 27](R/W) A 1-bit extension to the WRP register. */
+ uint64_t cl_ext : 1; /**< [ 26: 26](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the proposed CAS Latency spec change. The new
+ CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
+ the A12 bit.
+
+ See LMC()_MODEREG_PARAMS0[CL]. */
+ uint64_t al_ext : 1; /**< [ 25: 25](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the new Additive latency settings for DDR4 3DS.
+ Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
+ of up to CL-6.
+
+ 0: CL - (LMC()_MODEREG_PARAMS0[AL])
+ 1: CL - (LMC()_MODEREG_PARAMS0[AL] + 4)
+
+ See LMC()_MODEREG_PARAMS0[AL]. */
+ uint64_t ppd : 1; /**< [ 24: 24](R/W) DLL control for precharge powerdown.
+ 0 = Slow exit (DLL off).
+ 1 = Fast exit (DLL on).
+
+ LMC writes this value to MR0[PPD] in the selected DDR3/DDR4 parts during power-up/init
+ and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal
+ the MR0[PPD] value in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t wrp : 3; /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
+ RNDUP[TWR(ns) / Tcyc(ns)].
+
+ DDR3:
+ 0x0 = 16.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5 = 10.
+ 0x6 = 12.
+ 0x7 = 14.
+
+ DDR4:
+ 0x0 = 10.
+ 0x1 = 12.
+ 0x2 = 14.
+ 0x3 = 16.
+ 0x4 = 18.
+ 0x5 = 20.
+ 0x6 = 24.
+ 0x7 = 22.
+ 0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+ 0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+
+ LMC writes this value to MR0[WR] in the selected DDR3/DDR4 parts during power-up/init and,
+ if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal
+ the MR0[WR] value in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t dllr : 1; /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL] in the selected DDR3/DDR4 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[DLL] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during
+ normal operation. */
+ uint64_t tm : 1; /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR3/DDR4 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[TM] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t rbt : 1; /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
+ selected DDR3/DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,
+ RANKMASK]. The MR0[RBT] value must be 1 in all the DDR3/DDR4 parts attached to all ranks
+ during normal operation. */
+ uint64_t cl : 4; /**< [ 17: 14](R/W) CAS latency.
+
+ In DDR3 mode:
+
+ 0x2 = 5. 0x1 = 12.
+ 0x4 = 6. 0x3 = 13.
+ 0x6 = 7. 0x5 = 14.
+ 0x8 = 8. 0x7 = 15.
+ 0xA = 9. 0x9 = 16.
+ 0xC = 10.
+ 0xE = 11.
+ 0x0, 0xB, 0xD, 0xF = Reserved.
+
+ In DDR4 mode:
+
+ 0x0 = 9. 0x1 = 10.
+ 0x2 = 11. 0x3 = 12.
+ 0x4 = 13. 0x5 = 14.
+ 0x6 = 15. 0x7 = 16.
+ 0x8 = 18. 0x9 = 20.
+ 0xA = 22. 0xB = 24.
+ 0xD = 17, 0xE = 19.
+ 0xF = 21, 0xC = Reserved.
+
+ LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,RANKMASK]. This value must equal the
+ MR0[CAS Latency / CL] value in all the DDR3/4 parts attached to all ranks during normal
+ operation.
+
+ tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
+ uint64_t bl : 2; /**< [ 13: 12](R/W) Burst length.
+ 0x0 = 8 (fixed).
+ 0x1 = 4 or 8 (on-the-fly).
+
+ LMC writes this value to MR0[BL] in the selected DDR3 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
+ must be 1 in all the DDR3/4 parts attached to all ranks during normal operation. */
+ uint64_t qoff : 1; /**< [ 11: 11](R/W) Qoff enable. 0: enable; 1: disable.
+ LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks during power-
+ up/init, write-leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
+ and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK,INIT_STATUS] and LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. The
+ MR1[Qoff] value must be 0 in all the DDR3 parts attached to all ranks during normal
+ operation. */
+ uint64_t tdqs : 1; /**< [ 10: 10](R/W) TDQS enable. 0: disable. LMC writes this value to MR1[TDQS] in the DDR3 parts in the
+ selected ranks during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK,INIT_STATUS] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t wlev : 1; /**< [ 9: 9](RO) Write leveling enable. 0: disable. LMC writes MR1[Level]=0 in the DDR3 parts in the
+ selected ranks during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
+ leveling can only be initiated via the write leveling instruction sequence.) See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK,INIT_STATUS] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t al : 2; /**< [ 8: 7](R/W) Reserved; must be zero.
+ Internal:
+ Additive latency:
+ 0x0: 0.
+ 0x1: CL-1.
+ 0x2: CL - 2.
+ 0x3: Reserved.
+ LMC writes this value to MR1[AL] in the selected DDR3 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK]
+ and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value must equal the MR1[AL] value in
+ all
+ the DDR3 parts attached to all ranks during normal operation. See also
+ LMC()_CONTROL[POCAS]. */
+ uint64_t dll : 1; /**< [ 6: 6](R/W) DLL Enable. 0: enable; 1: disable. LMC writes this value to MR1[DLL] in the selected DDR3
+ parts during power-up/init, write-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
+ set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START]
+ and LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value
+ must equal the MR1[DLL] value in all the DDR3 parts attached to all ranks during normal
+ operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6, respectively, as per
+ the JEDEC DDR3 specifications. */
+ uint64_t mpr : 1; /**< [ 5: 5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR3 parts during power-up/init,
+ read-leveling, and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit
+ instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read-leveling
+ instruction sequence. Read-leveling should only be initiated via the read-leveling
+ instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK].
+ The MR3[MPR] value must be 0 in all the DDR3 parts attached to all ranks during normal
+ operation. */
+ uint64_t mprloc : 2; /**< [ 4: 3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts during
+ power-up/init, read-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
+ exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
+ leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be 0 in all the DDR3 parts attached
+ to all ranks during normal operation. */
+ uint64_t cwl : 3; /**< [ 2: 0](R/W) CAS write latency.
+
+ In DDR3 mode:
+ 0x0 = 5.
+ 0x1 = 6.
+ 0x2 = 7.
+ 0x3 = 8.
+ 0x4 = 9.
+ 0x5 = 10.
+ 0x6 = 11.
+ 0x7 = 12.
+
+ In DDR4 mode:
+ 0x0 = 9.
+ 0x1 = 10.
+ 0x2 = 11.
+ 0x3 = 12.
+ 0x4 = 13.
+ 0x5 = 16.
+ 0x6 = 18.
+ 0x7 = Reserved.
+
+ LMC writes this value to MR2[CWL] in the selected DDR3 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. This value must equal the MR2[CWL] value in
+ all the DDR3 parts attached to all ranks during normal operation.
+ tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t cwl : 3; /**< [ 2: 0](R/W) CAS write latency.
+
+ In DDR3 mode:
+ 0x0 = 5.
+ 0x1 = 6.
+ 0x2 = 7.
+ 0x3 = 8.
+ 0x4 = 9.
+ 0x5 = 10.
+ 0x6 = 11.
+ 0x7 = 12.
+
+ In DDR4 mode:
+ 0x0 = 9.
+ 0x1 = 10.
+ 0x2 = 11.
+ 0x3 = 12.
+ 0x4 = 13.
+ 0x5 = 16.
+ 0x6 = 18.
+ 0x7 = Reserved.
+
+ LMC writes this value to MR2[CWL] in the selected DDR3 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. This value must equal the MR2[CWL] value in
+ all the DDR3 parts attached to all ranks during normal operation.
+ tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
+ uint64_t mprloc : 2; /**< [ 4: 3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts during
+ power-up/init, read-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
+ exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
+ leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be 0 in all the DDR3 parts attached
+ to all ranks during normal operation. */
+ uint64_t mpr : 1; /**< [ 5: 5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR3 parts during power-up/init,
+ read-leveling, and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit
+ instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read-leveling
+ instruction sequence. Read-leveling should only be initiated via the read-leveling
+ instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK].
+ The MR3[MPR] value must be 0 in all the DDR3 parts attached to all ranks during normal
+ operation. */
+ uint64_t dll : 1; /**< [ 6: 6](R/W) DLL Enable. 0: enable; 1: disable. LMC writes this value to MR1[DLL] in the selected DDR3
+ parts during power-up/init, write-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
+ set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START]
+ and LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value
+ must equal the MR1[DLL] value in all the DDR3 parts attached to all ranks during normal
+ operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6, respectively, as per
+ the JEDEC DDR3 specifications. */
+ uint64_t al : 2; /**< [ 8: 7](R/W) Reserved; must be zero.
+ Internal:
+ Additive latency:
+ 0x0: 0.
+ 0x1: CL-1.
+ 0x2: CL - 2.
+ 0x3: Reserved.
+ LMC writes this value to MR1[AL] in the selected DDR3 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK]
+ and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value must equal the MR1[AL] value in
+ all
+ the DDR3 parts attached to all ranks during normal operation. See also
+ LMC()_CONTROL[POCAS]. */
+ uint64_t wlev : 1; /**< [ 9: 9](RO) Write leveling enable. 0: disable. LMC writes MR1[Level]=0 in the DDR3 parts in the
+ selected ranks during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
+ leveling can only be initiated via the write leveling instruction sequence.) See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK,INIT_STATUS] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t tdqs : 1; /**< [ 10: 10](R/W) TDQS enable. 0: disable. LMC writes this value to MR1[TDQS] in the DDR3 parts in the
+ selected ranks during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK,INIT_STATUS] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t qoff : 1; /**< [ 11: 11](R/W) Qoff enable. 0: enable; 1: disable.
+ LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks during power-
+ up/init, write-leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
+ and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK,INIT_STATUS] and LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. The
+ MR1[Qoff] value must be 0 in all the DDR3 parts attached to all ranks during normal
+ operation. */
+ uint64_t bl : 2; /**< [ 13: 12](R/W) Burst length.
+ 0x0 = 8 (fixed).
+ 0x1 = 4 or 8 (on-the-fly).
+
+ LMC writes this value to MR0[BL] in the selected DDR3 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
+ must be 1 in all the DDR3/4 parts attached to all ranks during normal operation. */
+ uint64_t cl : 4; /**< [ 17: 14](R/W) CAS latency.
+
+ In DDR3 mode:
+
+ 0x2 = 5. 0x1 = 12.
+ 0x4 = 6. 0x3 = 13.
+ 0x6 = 7. 0x5 = 14.
+ 0x8 = 8. 0x7 = 15.
+ 0xA = 9. 0x9 = 16.
+ 0xC = 10.
+ 0xE = 11.
+ 0x0, 0xB, 0xD, 0xF = Reserved.
+
+ In DDR4 mode:
+
+ 0x0 = 9. 0x1 = 10.
+ 0x2 = 11. 0x3 = 12.
+ 0x4 = 13. 0x5 = 14.
+ 0x6 = 15. 0x7 = 16.
+ 0x8 = 18. 0x9 = 20.
+ 0xA = 22. 0xB = 24.
+ 0xD = 17, 0xE = 19.
+ 0xF = 21, 0xC = Reserved.
+
+ LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,RANKMASK]. This value must equal the
+ MR0[CAS Latency / CL] value in all the DDR3/4 parts attached to all ranks during normal
+ operation.
+
+ tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
+ uint64_t rbt : 1; /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
+ selected DDR3/DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,
+ RANKMASK]. The MR0[RBT] value must be 1 in all the DDR3/DDR4 parts attached to all ranks
+ during normal operation. */
+ uint64_t tm : 1; /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR3/DDR4 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[TM] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t dllr : 1; /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL] in the selected DDR3/DDR4 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[DLL] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during
+ normal operation. */
+ uint64_t wrp : 3; /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
+ RNDUP[TWR(ns) / Tcyc(ns)].
+
+ DDR3:
+ 0x0 = 16.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5 = 10.
+ 0x6 = 12.
+ 0x7 = 14.
+
+ DDR4:
+ 0x0 = 10.
+ 0x1 = 12.
+ 0x2 = 14.
+ 0x3 = 16.
+ 0x4 = 18.
+ 0x5 = 20.
+ 0x6 = 24.
+ 0x7 = 22.
+ 0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+ 0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+
+ LMC writes this value to MR0[WR] in the selected DDR3/DDR4 parts during power-up/init and,
+ if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal
+ the MR0[WR] value in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t ppd : 1; /**< [ 24: 24](R/W) DLL control for precharge powerdown.
+ 0 = Slow exit (DLL off).
+ 1 = Fast exit (DLL on).
+
+ LMC writes this value to MR0[PPD] in the selected DDR3/DDR4 parts during power-up/init
+ and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal
+ the MR0[PPD] value in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t al_ext : 1; /**< [ 25: 25](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the new Additive latency settings for DDR4 3DS.
+ Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
+ of up to CL-6.
+
+ 0: CL - (LMC()_MODEREG_PARAMS0[AL])
+ 1: CL - (LMC()_MODEREG_PARAMS0[AL] + 4)
+
+ See LMC()_MODEREG_PARAMS0[AL]. */
+ uint64_t cl_ext : 1; /**< [ 26: 26](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the proposed CAS Latency spec change. The new
+ CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
+ the A12 bit.
+
+ See LMC()_MODEREG_PARAMS0[CL]. */
+ uint64_t wrp_ext : 1; /**< [ 27: 27](R/W) A 1-bit extension to the WRP register. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_modereg_params0_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t wrp_ext : 1; /**< [ 27: 27](RO) Reserved. */
+ uint64_t cl_ext : 1; /**< [ 26: 26](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the proposed CAS Latency spec change. The new
+ CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
+ the A12 bit.
+
+ See LMC()_MODEREG_PARAMS0[CL]. */
+ uint64_t al_ext : 1; /**< [ 25: 25](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the new Additive latency settings for DDR4 3DS.
+ Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
+ of up to CL-6.
+
+ 0: CL - (LMC()_MODEREG_PARAMS0[AL])
+ 1: CL - (LMC()_MODEREG_PARAMS0[AL] + 4)
+
+ See LMC()_MODEREG_PARAMS0[AL]. */
+ uint64_t ppd : 1; /**< [ 24: 24](R/W) DLL control for precharge powerdown.
+ 0 = Slow exit (DLL off).
+ 1 = Fast exit (DLL on).
+
+ LMC writes this value to MR0[PPD] in the selected DDR3/DDR4 parts during power-up/init
+ and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal
+ the MR0[PPD] value in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t wrp : 3; /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
+ RNDUP[TWR(ns) / Tcyc(ns)].
+
+ DDR3:
+ 0x0 = 16.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5 = 10.
+ 0x6 = 12.
+ 0x7 = 14.
+
+ DDR4:
+ 0x0 = 10.
+ 0x1 = 12.
+ 0x2 = 14.
+ 0x3 = 16.
+ 0x4 = 18.
+ 0x5 = 20.
+ 0x6 = 24.
+ 0x7 = 22.
+ 0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+ 0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+
+ LMC writes this value to MR0[WR] in the selected DDR3/DDR4 parts during power-up/init and,
+ if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal
+ the MR0[WR] value in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t dllr : 1; /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL] in the selected DDR3/DDR4 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[DLL] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during
+ normal operation. */
+ uint64_t tm : 1; /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR3/DDR4 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[TM] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t rbt : 1; /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
+ selected DDR3/DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,
+ RANKMASK]. The MR0[RBT] value must be 1 in all the DDR3/DDR4 parts attached to all ranks
+ during normal operation. */
+ uint64_t cl : 4; /**< [ 17: 14](R/W) CAS latency.
+
+ In DDR3 mode:
+
+ 0x2 = 5. 0x1 = 12.
+ 0x4 = 6. 0x3 = 13.
+ 0x6 = 7. 0x5 = 14.
+ 0x8 = 8. 0x7 = 15.
+ 0xA = 9. 0x9 = 16.
+ 0xC = 10.
+ 0xE = 11.
+ 0x0, 0xB, 0xD, 0xF = Reserved.
+
+ In DDR4 mode:
+
+ 0x0 = 9. 0x1 = 10.
+ 0x2 = 11. 0x3 = 12.
+ 0x4 = 13. 0x5 = 14.
+ 0x6 = 15. 0x7 = 16.
+ 0x8 = 18. 0x9 = 20.
+ 0xA = 22. 0xB = 24.
+ 0xD = 17, 0xE = 19.
+ 0xF = 21, 0xC = Reserved.
+
+ LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,RANKMASK]. This value must equal the
+ MR0[CAS Latency / CL] value in all the DDR3/4 parts attached to all ranks during normal
+ operation.
+
+ tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
+ uint64_t bl : 2; /**< [ 13: 12](R/W) Burst length.
+ 0x0 = 8 (fixed).
+ 0x1 = 4 or 8 (on-the-fly).
+
+ LMC writes this value to MR0[BL] in the selected DDR3 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
+ must be 1 in all the DDR3/4 parts attached to all ranks during normal operation. */
+ uint64_t qoff : 1; /**< [ 11: 11](R/W) Qoff enable. 0: enable; 1: disable.
+ LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks during power-
+ up/init, write-leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
+ and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK,INIT_STATUS] and LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. The
+ MR1[Qoff] value must be 0 in all the DDR3 parts attached to all ranks during normal
+ operation. */
+ uint64_t tdqs : 1; /**< [ 10: 10](R/W) TDQS enable. 0: disable. LMC writes this value to MR1[TDQS] in the DDR3 parts in the
+ selected ranks during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK,INIT_STATUS] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t wlev : 1; /**< [ 9: 9](RO) Write leveling enable. 0: disable. LMC writes MR1[Level]=0 in the DDR3 parts in the
+ selected ranks during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
+ leveling can only be initiated via the write leveling instruction sequence.) See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK,INIT_STATUS] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t al : 2; /**< [ 8: 7](R/W) Reserved; must be zero.
+ Internal:
+ Additive latency:
+ 0x0: 0.
+ 0x1: CL-1.
+ 0x2: CL - 2.
+ 0x3: Reserved.
+ LMC writes this value to MR1[AL] in the selected DDR3 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK]
+ and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value must equal the MR1[AL] value in
+ all
+ the DDR3 parts attached to all ranks during normal operation. See also
+ LMC()_CONTROL[POCAS]. */
+ uint64_t dll : 1; /**< [ 6: 6](R/W) DLL Enable. 0: enable; 1: disable. LMC writes this value to MR1[DLL] in the selected DDR3
+ parts during power-up/init, write-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
+ set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START]
+ and LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value
+ must equal the MR1[DLL] value in all the DDR3 parts attached to all ranks during normal
+ operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6, respectively, as per
+ the JEDEC DDR3 specifications. */
+ uint64_t mpr : 1; /**< [ 5: 5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR3 parts during power-up/init,
+ read-leveling, and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit
+ instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read-leveling
+ instruction sequence. Read-leveling should only be initiated via the read-leveling
+ instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK].
+ The MR3[MPR] value must be 0 in all the DDR3 parts attached to all ranks during normal
+ operation. */
+ uint64_t mprloc : 2; /**< [ 4: 3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts during
+ power-up/init, read-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
+ exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
+ leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be 0 in all the DDR3 parts attached
+ to all ranks during normal operation. */
+ uint64_t cwl : 3; /**< [ 2: 0](R/W) CAS write latency.
+
+ In DDR3 mode:
+ 0x0 = 5.
+ 0x1 = 6.
+ 0x2 = 7.
+ 0x3 = 8.
+ 0x4 = 9.
+ 0x5 = 10.
+ 0x6 = 11.
+ 0x7 = 12.
+
+ In DDR4 mode:
+ 0x0 = 9.
+ 0x1 = 10.
+ 0x2 = 11.
+ 0x3 = 12.
+ 0x4 = 13.
+ 0x5 = 16.
+ 0x6 = 18.
+ 0x7 = Reserved.
+
+ LMC writes this value to MR2[CWL] in the selected DDR3 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. This value must equal the MR2[CWL] value in
+ all the DDR3 parts attached to all ranks during normal operation.
+ tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t cwl : 3; /**< [ 2: 0](R/W) CAS write latency.
+
+ In DDR3 mode:
+ 0x0 = 5.
+ 0x1 = 6.
+ 0x2 = 7.
+ 0x3 = 8.
+ 0x4 = 9.
+ 0x5 = 10.
+ 0x6 = 11.
+ 0x7 = 12.
+
+ In DDR4 mode:
+ 0x0 = 9.
+ 0x1 = 10.
+ 0x2 = 11.
+ 0x3 = 12.
+ 0x4 = 13.
+ 0x5 = 16.
+ 0x6 = 18.
+ 0x7 = Reserved.
+
+ LMC writes this value to MR2[CWL] in the selected DDR3 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. This value must equal the MR2[CWL] value in
+ all the DDR3 parts attached to all ranks during normal operation.
+ tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
+ uint64_t mprloc : 2; /**< [ 4: 3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts during
+ power-up/init, read-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
+ exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
+ leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be 0 in all the DDR3 parts attached
+ to all ranks during normal operation. */
+ uint64_t mpr : 1; /**< [ 5: 5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR3 parts during power-up/init,
+ read-leveling, and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit
+ instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read-leveling
+ instruction sequence. Read-leveling should only be initiated via the read-leveling
+ instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK].
+ The MR3[MPR] value must be 0 in all the DDR3 parts attached to all ranks during normal
+ operation. */
+ uint64_t dll : 1; /**< [ 6: 6](R/W) DLL Enable. 0: enable; 1: disable. LMC writes this value to MR1[DLL] in the selected DDR3
+ parts during power-up/init, write-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
+ set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START]
+ and LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value
+ must equal the MR1[DLL] value in all the DDR3 parts attached to all ranks during normal
+ operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6, respectively, as per
+ the JEDEC DDR3 specifications. */
+ uint64_t al : 2; /**< [ 8: 7](R/W) Reserved; must be zero.
+ Internal:
+ Additive latency:
+ 0x0: 0.
+ 0x1: CL-1.
+ 0x2: CL - 2.
+ 0x3: Reserved.
+ LMC writes this value to MR1[AL] in the selected DDR3 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK]
+ and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value must equal the MR1[AL] value in
+ all
+ the DDR3 parts attached to all ranks during normal operation. See also
+ LMC()_CONTROL[POCAS]. */
+ uint64_t wlev : 1; /**< [ 9: 9](RO) Write leveling enable. 0: disable. LMC writes MR1[Level]=0 in the DDR3 parts in the
+ selected ranks during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
+ leveling can only be initiated via the write leveling instruction sequence.) See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK,INIT_STATUS] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t tdqs : 1; /**< [ 10: 10](R/W) TDQS enable. 0: disable. LMC writes this value to MR1[TDQS] in the DDR3 parts in the
+ selected ranks during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK,INIT_STATUS] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t qoff : 1; /**< [ 11: 11](R/W) Qoff enable. 0: enable; 1: disable.
+ LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks during power-
+ up/init, write-leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
+ and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
+ LMC()_CONFIG[RANKMASK,INIT_STATUS] and LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. The
+ MR1[Qoff] value must be 0 in all the DDR3 parts attached to all ranks during normal
+ operation. */
+ uint64_t bl : 2; /**< [ 13: 12](R/W) Burst length.
+ 0x0 = 8 (fixed).
+ 0x1 = 4 or 8 (on-the-fly).
+
+ LMC writes this value to MR0[BL] in the selected DDR3 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
+ must be 1 in all the DDR3/4 parts attached to all ranks during normal operation. */
+ uint64_t cl : 4; /**< [ 17: 14](R/W) CAS latency.
+
+ In DDR3 mode:
+
+ 0x2 = 5. 0x1 = 12.
+ 0x4 = 6. 0x3 = 13.
+ 0x6 = 7. 0x5 = 14.
+ 0x8 = 8. 0x7 = 15.
+ 0xA = 9. 0x9 = 16.
+ 0xC = 10.
+ 0xE = 11.
+ 0x0, 0xB, 0xD, 0xF = Reserved.
+
+ In DDR4 mode:
+
+ 0x0 = 9. 0x1 = 10.
+ 0x2 = 11. 0x3 = 12.
+ 0x4 = 13. 0x5 = 14.
+ 0x6 = 15. 0x7 = 16.
+ 0x8 = 18. 0x9 = 20.
+ 0xA = 22. 0xB = 24.
+ 0xD = 17, 0xE = 19.
+ 0xF = 21, 0xC = Reserved.
+
+ LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,RANKMASK]. This value must equal the
+ MR0[CAS Latency / CL] value in all the DDR3/4 parts attached to all ranks during normal
+ operation.
+
+ tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
+ uint64_t rbt : 1; /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
+ selected DDR3/DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set,
+ self-refresh exit instruction sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,
+ RANKMASK]. The MR0[RBT] value must be 1 in all the DDR3/DDR4 parts attached to all ranks
+ during normal operation. */
+ uint64_t tm : 1; /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR3/DDR4 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[TM] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t dllr : 1; /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL] in the selected DDR3/DDR4 parts during power-
+ up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[DLL] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during
+ normal operation. */
+ uint64_t wrp : 3; /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
+ RNDUP[TWR(ns) / Tcyc(ns)].
+
+ DDR3:
+ 0x0 = 16.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5 = 10.
+ 0x6 = 12.
+ 0x7 = 14.
+
+ DDR4:
+ 0x0 = 10.
+ 0x1 = 12.
+ 0x2 = 14.
+ 0x3 = 16.
+ 0x4 = 18.
+ 0x5 = 20.
+ 0x6 = 24.
+ 0x7 = 22.
+ 0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+ 0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+
+ LMC writes this value to MR0[WR] in the selected DDR3/DDR4 parts during power-up/init and,
+ if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal
+ the MR0[WR] value in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t ppd : 1; /**< [ 24: 24](R/W) DLL control for precharge powerdown.
+ 0 = Slow exit (DLL off).
+ 1 = Fast exit (DLL on).
+
+ LMC writes this value to MR0[PPD] in the selected DDR3/DDR4 parts during power-up/init
+ and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal
+ the MR0[PPD] value in all the DDR3/DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t al_ext : 1; /**< [ 25: 25](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the new Additive latency settings for DDR4 3DS.
+ Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
+ of up to CL-6.
+
+ 0: CL - (LMC()_MODEREG_PARAMS0[AL])
+ 1: CL - (LMC()_MODEREG_PARAMS0[AL] + 4)
+
+ See LMC()_MODEREG_PARAMS0[AL]. */
+ uint64_t cl_ext : 1; /**< [ 26: 26](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the proposed CAS Latency spec change. The new
+ CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
+ the A12 bit.
+
+ See LMC()_MODEREG_PARAMS0[CL]. */
+ uint64_t wrp_ext : 1; /**< [ 27: 27](RO) Reserved. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_modereg_params0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t wrp_ext : 1; /**< [ 27: 27](R/W) A 1-bit extension to the WRP register. */
+ uint64_t cl_ext : 1; /**< [ 26: 26](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the proposed CAS Latency spec change. The new
+ CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
+ the A12 bit.
+
+ See LMC()_MODEREG_PARAMS0[CL]. */
+ uint64_t al_ext : 1; /**< [ 25: 25](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the new Additive latency settings for DDR4 3DS.
+ Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
+ of up to CL-6.
+
+ 0 = CL - (LMC()_MODEREG_PARAMS0[AL]).
+ 1 = CL - (LMC()_MODEREG_PARAMS0[AL] + 4).
+
+ See LMC()_MODEREG_PARAMS0[AL]. */
+ uint64_t ppd : 1; /**< [ 24: 24](R/W) DLL control for precharge powerdown.
+ 0 = Slow exit (DLL off).
+ 1 = Fast exit (DLL on).
+
+ LMC writes this value to MR0[PPD] in the selected DDR4 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal the MR0[PPD] value in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t wrp : 3; /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
+ RNDUP[TWR(ns) / Tcyc(ns)].
+
+ 0x0 = 10.
+ 0x1 = 12.
+ 0x2 = 14.
+ 0x3 = 16.
+ 0x4 = 18.
+ 0x5 = 20.
+ 0x6 = 24.
+ 0x7 = 22.
+ 0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+ 0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+
+ LMC writes this value to MR0[WR] in the selected DDR4 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
+ This value must equal the MR0[WR] value in all the DDR4 parts attached to all ranks during
+ normal operation. */
+ uint64_t dllr : 1; /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL Reset] in the selected DDR4 parts during power-
+ up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[DLL Reset] value must be zero in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t tm : 1; /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR4 parts during power-
+ up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[TM] value must be zero in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t rbt : 1; /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
+ selected DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-
+ refresh exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
+ LMC()_CONFIG[RANKMASK]. The MR0[RBT] value must be one in all the DDR4 parts attached to all ranks
+ during normal operation. */
+ uint64_t cl : 4; /**< [ 17: 14](R/W) CAS latency. Together with [CL_EXT] field, the following shows all possible CAS latency
+ values.
+
+ 0x0 = 9. 0x1 = 10.
+ 0x2 = 11. 0x3 = 12.
+ 0x4 = 13. 0x5 = 14.
+ 0x6 = 15. 0x7 = 16.
+ 0x8 = 18. 0x9 = 20.
+ 0xA = 22. 0xB = 24.
+ 0xC = 23. 0xD = 17.
+ 0xE = 19. 0xF = 21.
+ 0x10 = 25. 0x11 = 26.
+ 0x12 = 27. 0x13 = 28.
+ 0x14 = 29. 0x15 = 30.
+ 0x16 = 31. 0x17 = 32.
+ 0x18-0x1F = Reserved.
+
+ LMC writes this value to MR0[CAS Latency / CL] in the selected DDR4 parts during power-
+ up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
+ LMC()_CONFIG[RANKMASK]. This value must equal the
+ MR0[CAS Latency / CL] value in all the DDR4 parts attached to all ranks during normal
+ operation.
+
+ tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
+ uint64_t bl : 2; /**< [ 13: 12](R/W) Burst length.
+ 0x0 = 8 (fixed).
+ 0x1 = 4 or 8 (on-the-fly).
+
+ LMC writes this value to MR0[BL] in the selected DDR4 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
+ must be one in all the DDR4 parts attached to all ranks during normal operation. */
+ uint64_t qoff : 1; /**< [ 11: 11](R/W) Qoff enable. 0 = enable; 1 = disable.
+ LMC writes this value to MR1[Qoff] in the DDR4 parts in the selected ranks during power
+ up/init, write leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
+ and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK], LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. The
+ MR1[Qoff] value must be zero in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t tdqs : 1; /**< [ 10: 10](R/W) TDQS enable. 0 = disable. LMC writes this value to MR1[TDQS] in the DDR4 parts in the
+ selected ranks during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK],
+ LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t wlev : 1; /**< [ 9: 9](RO) Write leveling enable. 0 = disable. LMC writes MR1[Level]=0 in the DDR4 parts in the
+ selected ranks during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
+ leveling can only be initiated via the write leveling instruction sequence.) See
+ LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK],
+ LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t al : 2; /**< [ 8: 7](R/W) Reserved; must be zero.
+ Internal:
+ Additive latency:
+ 0x0: 0.
+ 0x1: CL-1.
+ 0x2: CL - 2.
+ 0x3: Reserved.
+ LMC writes this value to MR1[AL] in the selected DDR4 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. This value must equal the MR1[AL] value in all the DDR4
+ parts attached to all ranks during normal operation. See also LMC()_CONTROL[POCAS]. */
+ uint64_t dll : 1; /**< [ 6: 6](R/W) DLL Enable. 0 = enable; 1 = disable. LMC writes this value to MR1[DLL] in the selected
+ DDR4
+ parts during power-up/init, write leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
+ set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN].
+ This value must equal the MR1[DLL] value in all the DDR4 parts attached to all ranks
+ during normal operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6,
+ respectively, as per the JEDEC DDR4 specifications. */
+ uint64_t mpr : 1; /**< [ 5: 5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR4 parts during power-up/init,
+ read leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit
+ instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read leveling
+ instruction sequence. Read leveling should only be initiated via the read leveling
+ instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK].
+ The MR3[MPR] value must be zero in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t mprloc : 2; /**< [ 4: 3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR4 parts during
+ power-up/init, read leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
+ exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
+ leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be zero in all the DDR4 parts attached
+ to all ranks during normal operation. */
+ uint64_t cwl : 3; /**< [ 2: 0](R/W) CAS write latency.
+
+ 0x0 = 9.
+ 0x1 = 10.
+ 0x2 = 11.
+ 0x3 = 12.
+ 0x4 = 14.
+ 0x5 = 16.
+ 0x6 = 18.
+ 0x7 = 20.
+
+ LMC writes this value to MR2[CWL] in the selected DDR4 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
+ LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. This value must equal the MR2[CWL] value in
+ all the DDR4 parts attached to all ranks during normal operation.
+ tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t cwl : 3; /**< [ 2: 0](R/W) CAS write latency.
+
+ 0x0 = 9.
+ 0x1 = 10.
+ 0x2 = 11.
+ 0x3 = 12.
+ 0x4 = 14.
+ 0x5 = 16.
+ 0x6 = 18.
+ 0x7 = 20.
+
+ LMC writes this value to MR2[CWL] in the selected DDR4 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
+ LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. This value must equal the MR2[CWL] value in
+ all the DDR4 parts attached to all ranks during normal operation.
+ tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
+ uint64_t mprloc : 2; /**< [ 4: 3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR4 parts during
+ power-up/init, read leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
+ exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
+ leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be zero in all the DDR4 parts attached
+ to all ranks during normal operation. */
+ uint64_t mpr : 1; /**< [ 5: 5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR4 parts during power-up/init,
+ read leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit
+ instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read leveling
+ instruction sequence. Read leveling should only be initiated via the read leveling
+ instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK].
+ The MR3[MPR] value must be zero in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t dll : 1; /**< [ 6: 6](R/W) DLL Enable. 0 = enable; 1 = disable. LMC writes this value to MR1[DLL] in the selected
+ DDR4
+ parts during power-up/init, write leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
+ set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN].
+ This value must equal the MR1[DLL] value in all the DDR4 parts attached to all ranks
+ during normal operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6,
+ respectively, as per the JEDEC DDR4 specifications. */
+ uint64_t al : 2; /**< [ 8: 7](R/W) Reserved; must be zero.
+ Internal:
+ Additive latency:
+ 0x0: 0.
+ 0x1: CL-1.
+ 0x2: CL - 2.
+ 0x3: Reserved.
+ LMC writes this value to MR1[AL] in the selected DDR4 parts during power-up/init, write
+ leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. This value must equal the MR1[AL] value in all the DDR4
+ parts attached to all ranks during normal operation. See also LMC()_CONTROL[POCAS]. */
+ uint64_t wlev : 1; /**< [ 9: 9](RO) Write leveling enable. 0 = disable. LMC writes MR1[Level]=0 in the DDR4 parts in the
+ selected ranks during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
+ leveling can only be initiated via the write leveling instruction sequence.) See
+ LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK],
+ LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t tdqs : 1; /**< [ 10: 10](R/W) TDQS enable. 0 = disable. LMC writes this value to MR1[TDQS] in the DDR4 parts in the
+ selected ranks during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK],
+ LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t qoff : 1; /**< [ 11: 11](R/W) Qoff enable. 0 = enable; 1 = disable.
+ LMC writes this value to MR1[Qoff] in the DDR4 parts in the selected ranks during power
+ up/init, write leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
+ and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK], LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. The
+ MR1[Qoff] value must be zero in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t bl : 2; /**< [ 13: 12](R/W) Burst length.
+ 0x0 = 8 (fixed).
+ 0x1 = 4 or 8 (on-the-fly).
+
+ LMC writes this value to MR0[BL] in the selected DDR4 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
+ must be one in all the DDR4 parts attached to all ranks during normal operation. */
+ uint64_t cl : 4; /**< [ 17: 14](R/W) CAS latency. Together with [CL_EXT] field, the following shows all possible CAS latency
+ values.
+
+ 0x0 = 9. 0x1 = 10.
+ 0x2 = 11. 0x3 = 12.
+ 0x4 = 13. 0x5 = 14.
+ 0x6 = 15. 0x7 = 16.
+ 0x8 = 18. 0x9 = 20.
+ 0xA = 22. 0xB = 24.
+ 0xC = 23. 0xD = 17.
+ 0xE = 19. 0xF = 21.
+ 0x10 = 25. 0x11 = 26.
+ 0x12 = 27. 0x13 = 28.
+ 0x14 = 29. 0x15 = 30.
+ 0x16 = 31. 0x17 = 32.
+ 0x18-0x1F = Reserved.
+
+ LMC writes this value to MR0[CAS Latency / CL] in the selected DDR4 parts during power-
+ up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
+ LMC()_CONFIG[RANKMASK]. This value must equal the
+ MR0[CAS Latency / CL] value in all the DDR4 parts attached to all ranks during normal
+ operation.
+
+ tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
+ uint64_t rbt : 1; /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
+ selected DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-
+ refresh exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
+ LMC()_CONFIG[RANKMASK]. The MR0[RBT] value must be one in all the DDR4 parts attached to all ranks
+ during normal operation. */
+ uint64_t tm : 1; /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR4 parts during power-
+ up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[TM] value must be zero in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t dllr : 1; /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL Reset] in the selected DDR4 parts during power-
+ up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
+ sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
+ The MR0[DLL Reset] value must be zero in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t wrp : 3; /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
+ RNDUP[TWR(ns) / Tcyc(ns)].
+
+ 0x0 = 10.
+ 0x1 = 12.
+ 0x2 = 14.
+ 0x3 = 16.
+ 0x4 = 18.
+ 0x5 = 20.
+ 0x6 = 24.
+ 0x7 = 22.
+ 0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+ 0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
+
+ LMC writes this value to MR0[WR] in the selected DDR4 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
+ This value must equal the MR0[WR] value in all the DDR4 parts attached to all ranks during
+ normal operation. */
+ uint64_t ppd : 1; /**< [ 24: 24](R/W) DLL control for precharge powerdown.
+ 0 = Slow exit (DLL off).
+ 1 = Fast exit (DLL on).
+
+ LMC writes this value to MR0[PPD] in the selected DDR4 parts during power-up/init and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
+ equal the MR0[PPD] value in all the DDR4 parts attached to all ranks during normal
+ operation. */
+ uint64_t al_ext : 1; /**< [ 25: 25](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the new Additive latency settings for DDR4 3DS.
+ Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
+ of up to CL-6.
+
+ 0 = CL - (LMC()_MODEREG_PARAMS0[AL]).
+ 1 = CL - (LMC()_MODEREG_PARAMS0[AL] + 4).
+
+ See LMC()_MODEREG_PARAMS0[AL]. */
+ uint64_t cl_ext : 1; /**< [ 26: 26](R/W) Reserved; must be zero.
+ Internal:
+ The extended bit for the proposed CAS Latency spec change. The new
+ CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
+ the A12 bit.
+
+ See LMC()_MODEREG_PARAMS0[CL]. */
+ uint64_t wrp_ext : 1; /**< [ 27: 27](R/W) A 1-bit extension to the WRP register. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_modereg_params0_s cn81xx; */
+ /* struct bdk_lmcx_modereg_params0_s cn83xx; */
+ /* struct bdk_lmcx_modereg_params0_s cn88xxp2; */
+};
+typedef union bdk_lmcx_modereg_params0 bdk_lmcx_modereg_params0_t;
+
+static inline uint64_t BDK_LMCX_MODEREG_PARAMS0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MODEREG_PARAMS0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001a8ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001a8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001a8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001a8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_MODEREG_PARAMS0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MODEREG_PARAMS0(a) bdk_lmcx_modereg_params0_t
+#define bustype_BDK_LMCX_MODEREG_PARAMS0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MODEREG_PARAMS0(a) "LMCX_MODEREG_PARAMS0"
+#define device_bar_BDK_LMCX_MODEREG_PARAMS0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_MODEREG_PARAMS0(a) (a)
+#define arguments_BDK_LMCX_MODEREG_PARAMS0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_modereg_params1
+ *
+ * LMC Mode Register Parameters 1 Register
+ * These parameters are written into the DDR4 MR0, MR1, MR2 and MR3 registers.
+ */
+union bdk_lmcx_modereg_params1
+{
+ uint64_t u;
+ struct bdk_lmcx_modereg_params1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t rtt_wr_11_ext : 1; /**< [ 54: 54](R/W) Reserved.
+ Internal:
+ RTT_WR rank 3 extension bit for DDR4. */
+ uint64_t rtt_wr_10_ext : 1; /**< [ 53: 53](R/W) Reserved.
+ Internal:
+ RTT_WR rank 2 extension bit for DDR4. */
+ uint64_t rtt_wr_01_ext : 1; /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
+ uint64_t rtt_wr_00_ext : 1; /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
+ uint64_t db_output_impedance : 3; /**< [ 50: 48](R/W) Reserved.
+ Internal:
+ Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
+ This is the default value used during Host Interface Write Leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t rtt_nom_11 : 3; /**< [ 47: 45](R/W) Reserved.
+ Internal:
+ RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t dic_11 : 2; /**< [ 44: 43](R/W) Reserved.
+ Internal:
+ Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t rtt_wr_11 : 2; /**< [ 42: 41](R/W) Reserved.
+ Internal:
+ RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_11 : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_11 : 1; /**< [ 39: 39](R/W) Reserved.
+ Internal:
+ Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_11 : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
+ DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_10 : 3; /**< [ 35: 33](R/W) Reserved.
+ Internal:
+ RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
+ is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
+ 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
+ also allowed. */
+ uint64_t dic_10 : 2; /**< [ 32: 31](R/W) Reserved.
+ Internal:
+ Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_10 : 2; /**< [ 30: 29](R/W) Reserved.
+ Internal:
+ RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_10 : 1; /**< [ 28: 28](R/W) Reserved.
+ Internal:
+ Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_10 : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t pasr_10 : 3; /**< [ 26: 24](R/W) Reserved.
+ Internal:
+ Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
+ DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_01 : 3; /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t dic_01 : 2; /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_01 : 2; /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_01 : 1; /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_01 : 1; /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t pasr_01 : 3; /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
+ DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_00 : 3; /**< [ 11: 9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
+ or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
+ are also allowed. */
+ uint64_t dic_00 : 2; /**< [ 8: 7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_00 : 2; /**< [ 6: 5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_00 : 1; /**< [ 4: 4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_00 : 1; /**< [ 3: 3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_00 : 3; /**< [ 2: 0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
+ DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pasr_00 : 3; /**< [ 2: 0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
+ DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_00 : 1; /**< [ 3: 3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_00 : 1; /**< [ 4: 4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_00 : 2; /**< [ 6: 5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_00 : 2; /**< [ 8: 7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_00 : 3; /**< [ 11: 9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
+ or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
+ are also allowed. */
+ uint64_t pasr_01 : 3; /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
+ DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_01 : 1; /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_01 : 1; /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_01 : 2; /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_01 : 2; /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_01 : 3; /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t pasr_10 : 3; /**< [ 26: 24](R/W) Reserved.
+ Internal:
+ Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
+ DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_10 : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_10 : 1; /**< [ 28: 28](R/W) Reserved.
+ Internal:
+ Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_10 : 2; /**< [ 30: 29](R/W) Reserved.
+ Internal:
+ RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_10 : 2; /**< [ 32: 31](R/W) Reserved.
+ Internal:
+ Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_10 : 3; /**< [ 35: 33](R/W) Reserved.
+ Internal:
+ RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
+ is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
+ 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
+ also allowed. */
+ uint64_t pasr_11 : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
+ DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_11 : 1; /**< [ 39: 39](R/W) Reserved.
+ Internal:
+ Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_11 : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_11 : 2; /**< [ 42: 41](R/W) Reserved.
+ Internal:
+ RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t dic_11 : 2; /**< [ 44: 43](R/W) Reserved.
+ Internal:
+ Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t rtt_nom_11 : 3; /**< [ 47: 45](R/W) Reserved.
+ Internal:
+ RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t db_output_impedance : 3; /**< [ 50: 48](R/W) Reserved.
+ Internal:
+ Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
+ This is the default value used during Host Interface Write Leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t rtt_wr_00_ext : 1; /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
+ uint64_t rtt_wr_01_ext : 1; /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
+ uint64_t rtt_wr_10_ext : 1; /**< [ 53: 53](R/W) Reserved.
+ Internal:
+ RTT_WR rank 2 extension bit for DDR4. */
+ uint64_t rtt_wr_11_ext : 1; /**< [ 54: 54](R/W) Reserved.
+ Internal:
+ RTT_WR rank 3 extension bit for DDR4. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_modereg_params1_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t rtt_wr_11_ext : 1; /**< [ 54: 54](RO) Reserved. */
+ uint64_t rtt_wr_10_ext : 1; /**< [ 53: 53](RO) Reserved. */
+ uint64_t rtt_wr_01_ext : 1; /**< [ 52: 52](RO) Reserved. */
+ uint64_t rtt_wr_00_ext : 1; /**< [ 51: 51](RO) Reserved. */
+ uint64_t db_output_impedance : 3; /**< [ 50: 48](R/W) Reserved.
+ Internal:
+ Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
+ This is the default value used during Host Interface Write Leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t rtt_nom_11 : 3; /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t dic_11 : 2; /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t rtt_wr_11 : 2; /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_11 : 1; /**< [ 40: 40](R/W) Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_11 : 1; /**< [ 39: 39](R/W) Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_11 : 3; /**< [ 38: 36](R/W) Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
+ DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_10 : 3; /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
+ is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
+ 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
+ also allowed. */
+ uint64_t dic_10 : 2; /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_10 : 2; /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_10 : 1; /**< [ 28: 28](R/W) Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_10 : 1; /**< [ 27: 27](R/W) Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t pasr_10 : 3; /**< [ 26: 24](R/W) Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
+ DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_01 : 3; /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t dic_01 : 2; /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_01 : 2; /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_01 : 1; /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_01 : 1; /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t pasr_01 : 3; /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
+ DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_00 : 3; /**< [ 11: 9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
+ or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
+ are also allowed. */
+ uint64_t dic_00 : 2; /**< [ 8: 7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_00 : 2; /**< [ 6: 5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_00 : 1; /**< [ 4: 4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_00 : 1; /**< [ 3: 3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_00 : 3; /**< [ 2: 0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
+ DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pasr_00 : 3; /**< [ 2: 0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
+ DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_00 : 1; /**< [ 3: 3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_00 : 1; /**< [ 4: 4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_00 : 2; /**< [ 6: 5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_00 : 2; /**< [ 8: 7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_00 : 3; /**< [ 11: 9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
+ or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
+ are also allowed. */
+ uint64_t pasr_01 : 3; /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
+ DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_01 : 1; /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_01 : 1; /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_01 : 2; /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_01 : 2; /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_01 : 3; /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t pasr_10 : 3; /**< [ 26: 24](R/W) Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
+ DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_10 : 1; /**< [ 27: 27](R/W) Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_10 : 1; /**< [ 28: 28](R/W) Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_10 : 2; /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_10 : 2; /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_10 : 3; /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
+ is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
+ 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
+ also allowed. */
+ uint64_t pasr_11 : 3; /**< [ 38: 36](R/W) Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
+ DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_11 : 1; /**< [ 39: 39](R/W) Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_11 : 1; /**< [ 40: 40](R/W) Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_11 : 2; /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t dic_11 : 2; /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t rtt_nom_11 : 3; /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t db_output_impedance : 3; /**< [ 50: 48](R/W) Reserved.
+ Internal:
+ Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
+ This is the default value used during Host Interface Write Leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t rtt_wr_00_ext : 1; /**< [ 51: 51](RO) Reserved. */
+ uint64_t rtt_wr_01_ext : 1; /**< [ 52: 52](RO) Reserved. */
+ uint64_t rtt_wr_10_ext : 1; /**< [ 53: 53](RO) Reserved. */
+ uint64_t rtt_wr_11_ext : 1; /**< [ 54: 54](RO) Reserved. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_modereg_params1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t rtt_wr_11_ext : 1; /**< [ 54: 54](R/W) RTT_WR rank 3 extension bit for DDR4. */
+ uint64_t rtt_wr_10_ext : 1; /**< [ 53: 53](R/W) RTT_WR rank 2 extension bit for DDR4. */
+ uint64_t rtt_wr_01_ext : 1; /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
+ uint64_t rtt_wr_00_ext : 1; /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
+ uint64_t db_output_impedance : 3; /**< [ 50: 48](R/W) Reserved.
+ Internal:
+ Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
+ This is the default value used during Host Interface Write Leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t rtt_nom_11 : 3; /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR4
+ parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t dic_11 : 2; /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
+ (i.e. DIMM1_CS1) DDR4 parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t rtt_wr_11 : 2; /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3
+ (i.e. DIMM1_CS1) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t srt_11 : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t asr_11 : 1; /**< [ 39: 39](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t pasr_11 : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t rtt_nom_10 : 3; /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2
+ (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4
+ specifications, if RTT_NOM is used during write operations, only values
+ MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are allowed. Otherwise, values
+ MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also allowed. */
+ uint64_t dic_10 : 2; /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
+ (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t rtt_wr_10 : 2; /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2
+ (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t srt_10 : 1; /**< [ 28: 28](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t asr_10 : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ FIXME, no longer needed. */
+ uint64_t pasr_10 : 3; /**< [ 26: 24](R/W) Reserved.
+ Internal:
+ FIXME, no longer needed. */
+ uint64_t rtt_nom_01 : 3; /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR4
+ parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t dic_01 : 2; /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
+ (i.e. DIMM0_CS1) DDR4 parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t rtt_wr_01 : 2; /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1
+ (i.e. DIMM0_CS1) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t srt_01 : 1; /**< [ 16: 16](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t asr_01 : 1; /**< [ 15: 15](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t pasr_01 : 3; /**< [ 14: 12](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t rtt_nom_00 : 3; /**< [ 11: 9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0
+ (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4
+ specifications, if RTT_NOM is used during write operations, only values
+ MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are allowed. Otherwise, values
+ MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also allowed. */
+ uint64_t dic_00 : 2; /**< [ 8: 7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
+ (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t rtt_wr_00 : 2; /**< [ 6: 5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0
+ (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t srt_00 : 1; /**< [ 4: 4](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t asr_00 : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t pasr_00 : 3; /**< [ 2: 0](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+#else /* Word 0 - Little Endian */
+ uint64_t pasr_00 : 3; /**< [ 2: 0](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t asr_00 : 1; /**< [ 3: 3](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t srt_00 : 1; /**< [ 4: 4](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t rtt_wr_00 : 2; /**< [ 6: 5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0
+ (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t dic_00 : 2; /**< [ 8: 7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
+ (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t rtt_nom_00 : 3; /**< [ 11: 9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0
+ (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4
+ specifications, if RTT_NOM is used during write operations, only values
+ MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are allowed. Otherwise, values
+ MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also allowed. */
+ uint64_t pasr_01 : 3; /**< [ 14: 12](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t asr_01 : 1; /**< [ 15: 15](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t srt_01 : 1; /**< [ 16: 16](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t rtt_wr_01 : 2; /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1
+ (i.e. DIMM0_CS1) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t dic_01 : 2; /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
+ (i.e. DIMM0_CS1) DDR4 parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t rtt_nom_01 : 3; /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR4
+ parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t pasr_10 : 3; /**< [ 26: 24](R/W) Reserved.
+ Internal:
+ FIXME, no longer needed. */
+ uint64_t asr_10 : 1; /**< [ 27: 27](R/W) Reserved.
+ Internal:
+ FIXME, no longer needed. */
+ uint64_t srt_10 : 1; /**< [ 28: 28](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t rtt_wr_10 : 2; /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2
+ (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t dic_10 : 2; /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
+ (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t rtt_nom_10 : 3; /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2
+ (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4
+ specifications, if RTT_NOM is used during write operations, only values
+ MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are allowed. Otherwise, values
+ MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also allowed. */
+ uint64_t pasr_11 : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t asr_11 : 1; /**< [ 39: 39](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t srt_11 : 1; /**< [ 40: 40](R/W) Reserved.
+ Internal:
+ FIXME, No longer needed. */
+ uint64_t rtt_wr_11 : 2; /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3
+ (i.e. DIMM1_CS1) DDR4 parts when selected during power-up/init, write leveling,
+ and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
+ instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
+ LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t dic_11 : 2; /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
+ (i.e. DIMM1_CS1) DDR4 parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. */
+ uint64_t rtt_nom_11 : 3; /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR4
+ parts when selected during power-up/init, write leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t db_output_impedance : 3; /**< [ 50: 48](R/W) Reserved.
+ Internal:
+ Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
+ This is the default value used during Host Interface Write Leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t rtt_wr_00_ext : 1; /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
+ uint64_t rtt_wr_01_ext : 1; /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
+ uint64_t rtt_wr_10_ext : 1; /**< [ 53: 53](R/W) RTT_WR rank 2 extension bit for DDR4. */
+ uint64_t rtt_wr_11_ext : 1; /**< [ 54: 54](R/W) RTT_WR rank 3 extension bit for DDR4. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_modereg_params1_s cn81xx; */
+ struct bdk_lmcx_modereg_params1_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t rtt_wr_11_ext : 1; /**< [ 54: 54](R/W) RTT_WR rank 3 extension bit for DDR4. */
+ uint64_t rtt_wr_10_ext : 1; /**< [ 53: 53](R/W) RTT_WR rank 2 extension bit for DDR4. */
+ uint64_t rtt_wr_01_ext : 1; /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
+ uint64_t rtt_wr_00_ext : 1; /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
+ uint64_t db_output_impedance : 3; /**< [ 50: 48](R/W) Reserved.
+ Internal:
+ Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
+ This is the default value used during Host Interface Write Leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t rtt_nom_11 : 3; /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t dic_11 : 2; /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t rtt_wr_11 : 2; /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_11 : 1; /**< [ 40: 40](R/W) Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_11 : 1; /**< [ 39: 39](R/W) Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_11 : 3; /**< [ 38: 36](R/W) Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
+ DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_10 : 3; /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
+ is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
+ 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
+ also allowed. */
+ uint64_t dic_10 : 2; /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_10 : 2; /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_10 : 1; /**< [ 28: 28](R/W) Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_10 : 1; /**< [ 27: 27](R/W) Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t pasr_10 : 3; /**< [ 26: 24](R/W) Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
+ DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_01 : 3; /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t dic_01 : 2; /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_01 : 2; /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_01 : 1; /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_01 : 1; /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t pasr_01 : 3; /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
+ DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_00 : 3; /**< [ 11: 9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
+ or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
+ are also allowed. */
+ uint64_t dic_00 : 2; /**< [ 8: 7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_00 : 2; /**< [ 6: 5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_00 : 1; /**< [ 4: 4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_00 : 1; /**< [ 3: 3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t pasr_00 : 3; /**< [ 2: 0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
+ DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pasr_00 : 3; /**< [ 2: 0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
+ DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_00 : 1; /**< [ 3: 3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_00 : 1; /**< [ 4: 4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_00 : 2; /**< [ 6: 5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_00 : 2; /**< [ 8: 7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
+ (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_00 : 3; /**< [ 11: 9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
+ or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
+ are also allowed. */
+ uint64_t pasr_01 : 3; /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
+ DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_01 : 1; /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_01 : 1; /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_01 : 2; /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_01 : 2; /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
+ (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_01 : 3; /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t pasr_10 : 3; /**< [ 26: 24](R/W) Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
+ DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_10 : 1; /**< [ 27: 27](R/W) Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t srt_10 : 1; /**< [ 28: 28](R/W) Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_10 : 2; /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t dic_10 : 2; /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
+ (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_nom_10 : 3; /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
+ is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
+ 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
+ also allowed. */
+ uint64_t pasr_11 : 3; /**< [ 38: 36](R/W) Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
+ DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t asr_11 : 1; /**< [ 39: 39](R/W) Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
+ DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t srt_11 : 1; /**< [ 40: 40](R/W) Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
+ uint64_t rtt_wr_11 : 2; /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
+ [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
+ LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
+ uint64_t dic_11 : 2; /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
+ (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
+ uint64_t rtt_nom_11 : 3; /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
+ parts when selected during power-up/init, write-leveling, and, if
+ LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
+ See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
+ LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
+ used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
+ (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
+ allowed. */
+ uint64_t db_output_impedance : 3; /**< [ 50: 48](R/W) Reserved.
+ Internal:
+ Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
+ This is the default value used during Host Interface Write Leveling in LRDIMM
+ environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
+ 0x0 = RZQ/6 (40 ohm).
+ 0x1 = RZQ/7 (34 ohm).
+ 0x2 = RZQ/5 (48 ohm).
+ 0x3-0x7 = Reserved. */
+ uint64_t rtt_wr_00_ext : 1; /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
+ uint64_t rtt_wr_01_ext : 1; /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
+ uint64_t rtt_wr_10_ext : 1; /**< [ 53: 53](R/W) RTT_WR rank 2 extension bit for DDR4. */
+ uint64_t rtt_wr_11_ext : 1; /**< [ 54: 54](R/W) RTT_WR rank 3 extension bit for DDR4. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_lmcx_modereg_params1_cn83xx cn88xxp2; */
+};
+typedef union bdk_lmcx_modereg_params1 bdk_lmcx_modereg_params1_t;
+
+static inline uint64_t BDK_LMCX_MODEREG_PARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MODEREG_PARAMS1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000260ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000260ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000260ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000260ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_MODEREG_PARAMS1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MODEREG_PARAMS1(a) bdk_lmcx_modereg_params1_t
+#define bustype_BDK_LMCX_MODEREG_PARAMS1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MODEREG_PARAMS1(a) "LMCX_MODEREG_PARAMS1"
+#define device_bar_BDK_LMCX_MODEREG_PARAMS1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_MODEREG_PARAMS1(a) (a)
+#define arguments_BDK_LMCX_MODEREG_PARAMS1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_modereg_params2
+ *
+ * LMC Mode Register Parameters Register 2
+ * These parameters are written into the DDR4 mode registers.
+ */
+union bdk_lmcx_modereg_params2
+{
+ uint64_t u;
+ struct bdk_lmcx_modereg_params2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t vrefdq_train_en : 1; /**< [ 40: 40](R/W) Vref training mode enable, used for all ranks. */
+ uint64_t vref_range_11 : 1; /**< [ 39: 39](R/W) VREF range for rank 3. */
+ uint64_t vref_value_11 : 6; /**< [ 38: 33](R/W) VREF value for rank 3. */
+ uint64_t rtt_park_11 : 3; /**< [ 32: 30](R/W) RTT park value for rank 3. */
+ uint64_t vref_range_10 : 1; /**< [ 29: 29](R/W) VREF range for rank 2. */
+ uint64_t vref_value_10 : 6; /**< [ 28: 23](R/W) VREF value for rank 2. */
+ uint64_t rtt_park_10 : 3; /**< [ 22: 20](R/W) RTT park value for rank 2. */
+ uint64_t vref_range_01 : 1; /**< [ 19: 19](R/W) VREF range for rank 1. */
+ uint64_t vref_value_01 : 6; /**< [ 18: 13](R/W) VREF value for rank 1. */
+ uint64_t rtt_park_01 : 3; /**< [ 12: 10](R/W) RTT park value for rank 1. */
+ uint64_t vref_range_00 : 1; /**< [ 9: 9](R/W) VREF range for rank 0. */
+ uint64_t vref_value_00 : 6; /**< [ 8: 3](R/W) VREF value for rank 0. */
+ uint64_t rtt_park_00 : 3; /**< [ 2: 0](R/W) RTT park value for rank 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t rtt_park_00 : 3; /**< [ 2: 0](R/W) RTT park value for rank 0. */
+ uint64_t vref_value_00 : 6; /**< [ 8: 3](R/W) VREF value for rank 0. */
+ uint64_t vref_range_00 : 1; /**< [ 9: 9](R/W) VREF range for rank 0. */
+ uint64_t rtt_park_01 : 3; /**< [ 12: 10](R/W) RTT park value for rank 1. */
+ uint64_t vref_value_01 : 6; /**< [ 18: 13](R/W) VREF value for rank 1. */
+ uint64_t vref_range_01 : 1; /**< [ 19: 19](R/W) VREF range for rank 1. */
+ uint64_t rtt_park_10 : 3; /**< [ 22: 20](R/W) RTT park value for rank 2. */
+ uint64_t vref_value_10 : 6; /**< [ 28: 23](R/W) VREF value for rank 2. */
+ uint64_t vref_range_10 : 1; /**< [ 29: 29](R/W) VREF range for rank 2. */
+ uint64_t rtt_park_11 : 3; /**< [ 32: 30](R/W) RTT park value for rank 3. */
+ uint64_t vref_value_11 : 6; /**< [ 38: 33](R/W) VREF value for rank 3. */
+ uint64_t vref_range_11 : 1; /**< [ 39: 39](R/W) VREF range for rank 3. */
+ uint64_t vrefdq_train_en : 1; /**< [ 40: 40](R/W) Vref training mode enable, used for all ranks. */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_modereg_params2_s cn; */
+};
+typedef union bdk_lmcx_modereg_params2 bdk_lmcx_modereg_params2_t;
+
+static inline uint64_t BDK_LMCX_MODEREG_PARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MODEREG_PARAMS2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000050ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000050ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000050ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000050ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_MODEREG_PARAMS2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MODEREG_PARAMS2(a) bdk_lmcx_modereg_params2_t
+#define bustype_BDK_LMCX_MODEREG_PARAMS2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MODEREG_PARAMS2(a) "LMCX_MODEREG_PARAMS2"
+#define device_bar_BDK_LMCX_MODEREG_PARAMS2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_MODEREG_PARAMS2(a) (a)
+#define arguments_BDK_LMCX_MODEREG_PARAMS2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_modereg_params3
+ *
+ * LMC Mode Register Parameters Register 3
+ * These parameters are written into the DDR4 mode registers.
+ */
+union bdk_lmcx_modereg_params3
+{
+ uint64_t u;
+ struct bdk_lmcx_modereg_params3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t tc_ref_range : 1; /**< [ 40: 40](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
+ uint64_t reserved_39 : 1;
+ uint64_t xrank_add_tccd_l : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_L
+ when crossing logical rank (to the same bank group) of a 3DS DRAM. */
+ uint64_t xrank_add_tccd_s : 3; /**< [ 35: 33](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_S
+ when crossing logical rank (to a different bank group) of a 3DS DRAM. */
+ uint64_t mpr_fmt : 2; /**< [ 32: 31](R/W) MPR format. */
+ uint64_t wr_cmd_lat : 2; /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
+ uint64_t fgrm : 3; /**< [ 28: 26](R/W) Fine granularity refresh mode. */
+ uint64_t temp_sense : 1; /**< [ 25: 25](R/W) Temperature sensor readout enable. */
+ uint64_t pda : 1; /**< [ 24: 24](R/W) Per DRAM addressability. */
+ uint64_t gd : 1; /**< [ 23: 23](R/W) Gear-down mode. */
+ uint64_t crc : 1; /**< [ 22: 22](R/W) CRC mode. */
+ uint64_t lpasr : 2; /**< [ 21: 20](R/W) LP auto self refresh. */
+ uint64_t tccd_l : 3; /**< [ 19: 17](R/W) tCCD_L timing parameter:
+ 0x0 = 4.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5-0x7 = reserved. */
+ uint64_t rd_dbi : 1; /**< [ 16: 16](R/W) Read DBI. */
+ uint64_t wr_dbi : 1; /**< [ 15: 15](R/W) Write DBI. */
+ uint64_t dm : 1; /**< [ 14: 14](R/W) Data mask enable. */
+ uint64_t ca_par_pers : 1; /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
+ uint64_t odt_pd : 1; /**< [ 12: 12](R/W) ODT in PD mode. */
+ uint64_t par_lat_mode : 3; /**< [ 11: 9](R/W) Parity latency mode. */
+ uint64_t wr_preamble : 1; /**< [ 8: 8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t rd_preamble : 1; /**< [ 7: 7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t sre_abort : 1; /**< [ 6: 6](R/W) Self refresh abort. */
+ uint64_t cal : 3; /**< [ 5: 3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
+ uint64_t vref_mon : 1; /**< [ 2: 2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
+ uint64_t reserved_1 : 1;
+ uint64_t max_pd : 1; /**< [ 0: 0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t max_pd : 1; /**< [ 0: 0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
+ uint64_t reserved_1 : 1;
+ uint64_t vref_mon : 1; /**< [ 2: 2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
+ uint64_t cal : 3; /**< [ 5: 3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
+ uint64_t sre_abort : 1; /**< [ 6: 6](R/W) Self refresh abort. */
+ uint64_t rd_preamble : 1; /**< [ 7: 7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t wr_preamble : 1; /**< [ 8: 8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t par_lat_mode : 3; /**< [ 11: 9](R/W) Parity latency mode. */
+ uint64_t odt_pd : 1; /**< [ 12: 12](R/W) ODT in PD mode. */
+ uint64_t ca_par_pers : 1; /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
+ uint64_t dm : 1; /**< [ 14: 14](R/W) Data mask enable. */
+ uint64_t wr_dbi : 1; /**< [ 15: 15](R/W) Write DBI. */
+ uint64_t rd_dbi : 1; /**< [ 16: 16](R/W) Read DBI. */
+ uint64_t tccd_l : 3; /**< [ 19: 17](R/W) tCCD_L timing parameter:
+ 0x0 = 4.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5-0x7 = reserved. */
+ uint64_t lpasr : 2; /**< [ 21: 20](R/W) LP auto self refresh. */
+ uint64_t crc : 1; /**< [ 22: 22](R/W) CRC mode. */
+ uint64_t gd : 1; /**< [ 23: 23](R/W) Gear-down mode. */
+ uint64_t pda : 1; /**< [ 24: 24](R/W) Per DRAM addressability. */
+ uint64_t temp_sense : 1; /**< [ 25: 25](R/W) Temperature sensor readout enable. */
+ uint64_t fgrm : 3; /**< [ 28: 26](R/W) Fine granularity refresh mode. */
+ uint64_t wr_cmd_lat : 2; /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
+ uint64_t mpr_fmt : 2; /**< [ 32: 31](R/W) MPR format. */
+ uint64_t xrank_add_tccd_s : 3; /**< [ 35: 33](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_S
+ when crossing logical rank (to a different bank group) of a 3DS DRAM. */
+ uint64_t xrank_add_tccd_l : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_L
+ when crossing logical rank (to the same bank group) of a 3DS DRAM. */
+ uint64_t reserved_39 : 1;
+ uint64_t tc_ref_range : 1; /**< [ 40: 40](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_modereg_params3_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_39_63 : 25;
+ uint64_t xrank_add_tccd_l : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_L
+ when crossing logical rank (to the same bank group) of a 3DS DRAM. */
+ uint64_t xrank_add_tccd_s : 3; /**< [ 35: 33](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_S
+ when crossing logical rank (to a different bank group) of a 3DS DRAM. */
+ uint64_t mpr_fmt : 2; /**< [ 32: 31](R/W) MPR format. */
+ uint64_t wr_cmd_lat : 2; /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
+ uint64_t fgrm : 3; /**< [ 28: 26](R/W) Fine granularity refresh mode. */
+ uint64_t temp_sense : 1; /**< [ 25: 25](R/W) Temperature sensor readout enable. */
+ uint64_t pda : 1; /**< [ 24: 24](R/W) Per DRAM addressability. */
+ uint64_t gd : 1; /**< [ 23: 23](R/W) Gear-down mode. */
+ uint64_t crc : 1; /**< [ 22: 22](R/W) CRC mode. */
+ uint64_t lpasr : 2; /**< [ 21: 20](R/W) LP auto self refresh. */
+ uint64_t tccd_l : 3; /**< [ 19: 17](R/W) tCCD_L timing parameter:
+ 0x0 = 4.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5-0x7 = reserved. */
+ uint64_t rd_dbi : 1; /**< [ 16: 16](R/W) Read DBI. */
+ uint64_t wr_dbi : 1; /**< [ 15: 15](R/W) Write DBI. */
+ uint64_t dm : 1; /**< [ 14: 14](R/W) Data mask enable. */
+ uint64_t ca_par_pers : 1; /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
+ uint64_t odt_pd : 1; /**< [ 12: 12](R/W) ODT in PD mode. */
+ uint64_t par_lat_mode : 3; /**< [ 11: 9](R/W) Parity latency mode. */
+ uint64_t wr_preamble : 1; /**< [ 8: 8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t rd_preamble : 1; /**< [ 7: 7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t sre_abort : 1; /**< [ 6: 6](R/W) Self refresh abort. */
+ uint64_t cal : 3; /**< [ 5: 3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
+ uint64_t vref_mon : 1; /**< [ 2: 2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
+ uint64_t tc_ref : 1; /**< [ 1: 1](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
+ uint64_t max_pd : 1; /**< [ 0: 0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t max_pd : 1; /**< [ 0: 0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
+ uint64_t tc_ref : 1; /**< [ 1: 1](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
+ uint64_t vref_mon : 1; /**< [ 2: 2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
+ uint64_t cal : 3; /**< [ 5: 3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
+ uint64_t sre_abort : 1; /**< [ 6: 6](R/W) Self refresh abort. */
+ uint64_t rd_preamble : 1; /**< [ 7: 7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t wr_preamble : 1; /**< [ 8: 8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t par_lat_mode : 3; /**< [ 11: 9](R/W) Parity latency mode. */
+ uint64_t odt_pd : 1; /**< [ 12: 12](R/W) ODT in PD mode. */
+ uint64_t ca_par_pers : 1; /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
+ uint64_t dm : 1; /**< [ 14: 14](R/W) Data mask enable. */
+ uint64_t wr_dbi : 1; /**< [ 15: 15](R/W) Write DBI. */
+ uint64_t rd_dbi : 1; /**< [ 16: 16](R/W) Read DBI. */
+ uint64_t tccd_l : 3; /**< [ 19: 17](R/W) tCCD_L timing parameter:
+ 0x0 = 4.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5-0x7 = reserved. */
+ uint64_t lpasr : 2; /**< [ 21: 20](R/W) LP auto self refresh. */
+ uint64_t crc : 1; /**< [ 22: 22](R/W) CRC mode. */
+ uint64_t gd : 1; /**< [ 23: 23](R/W) Gear-down mode. */
+ uint64_t pda : 1; /**< [ 24: 24](R/W) Per DRAM addressability. */
+ uint64_t temp_sense : 1; /**< [ 25: 25](R/W) Temperature sensor readout enable. */
+ uint64_t fgrm : 3; /**< [ 28: 26](R/W) Fine granularity refresh mode. */
+ uint64_t wr_cmd_lat : 2; /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
+ uint64_t mpr_fmt : 2; /**< [ 32: 31](R/W) MPR format. */
+ uint64_t xrank_add_tccd_s : 3; /**< [ 35: 33](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_S
+ when crossing logical rank (to a different bank group) of a 3DS DRAM. */
+ uint64_t xrank_add_tccd_l : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_L
+ when crossing logical rank (to the same bank group) of a 3DS DRAM. */
+ uint64_t reserved_39_63 : 25;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_modereg_params3_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t tc_ref_range : 1; /**< [ 40: 40](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
+ uint64_t tc_ref : 1; /**< [ 39: 39](R/W) Temperature controlled refresh mode: 0 = disable, 1 = enable. */
+ uint64_t xrank_add_tccd_l : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_L
+ when crossing logical rank (to the same bank group) of a 3DS DRAM. */
+ uint64_t xrank_add_tccd_s : 3; /**< [ 35: 33](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_S
+ when crossing logical rank (to a different bank group) of a 3DS DRAM. */
+ uint64_t mpr_fmt : 2; /**< [ 32: 31](R/W) MPR format. */
+ uint64_t wr_cmd_lat : 2; /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
+ uint64_t fgrm : 3; /**< [ 28: 26](R/W) Fine granularity refresh mode. */
+ uint64_t temp_sense : 1; /**< [ 25: 25](R/W) Temperature sensor readout enable. */
+ uint64_t pda : 1; /**< [ 24: 24](R/W) Per DRAM addressability. */
+ uint64_t gd : 1; /**< [ 23: 23](R/W) Gear-down mode enable. Software must first ensure that LMC()_CONTROL[DDR2T] is
+ cleared, prior to setting this field.
+ Setting of this field must happen prior to running the INIT sequence at the start of DRAM
+ bringup. */
+ uint64_t crc : 1; /**< [ 22: 22](R/W) CRC mode. */
+ uint64_t lpasr : 2; /**< [ 21: 20](R/W) LP auto self refresh. */
+ uint64_t tccd_l : 3; /**< [ 19: 17](R/W) tCCD_L timing parameter:
+ 0x0 = 4.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5-0x7 = reserved. */
+ uint64_t rd_dbi : 1; /**< [ 16: 16](R/W) Read DBI. */
+ uint64_t wr_dbi : 1; /**< [ 15: 15](R/W) Write DBI. */
+ uint64_t dm : 1; /**< [ 14: 14](R/W) Data mask enable. */
+ uint64_t ca_par_pers : 1; /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
+ uint64_t odt_pd : 1; /**< [ 12: 12](R/W) ODT in PD mode. */
+ uint64_t par_lat_mode : 3; /**< [ 11: 9](R/W) Parity latency mode. */
+ uint64_t wr_preamble : 1; /**< [ 8: 8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t rd_preamble : 1; /**< [ 7: 7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t sre_abort : 1; /**< [ 6: 6](R/W) Self refresh abort. */
+ uint64_t cal : 3; /**< [ 5: 3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
+ uint64_t vref_mon : 1; /**< [ 2: 2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
+ uint64_t reserved_1 : 1;
+ uint64_t max_pd : 1; /**< [ 0: 0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t max_pd : 1; /**< [ 0: 0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
+ uint64_t reserved_1 : 1;
+ uint64_t vref_mon : 1; /**< [ 2: 2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
+ uint64_t cal : 3; /**< [ 5: 3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
+ uint64_t sre_abort : 1; /**< [ 6: 6](R/W) Self refresh abort. */
+ uint64_t rd_preamble : 1; /**< [ 7: 7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t wr_preamble : 1; /**< [ 8: 8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
+ uint64_t par_lat_mode : 3; /**< [ 11: 9](R/W) Parity latency mode. */
+ uint64_t odt_pd : 1; /**< [ 12: 12](R/W) ODT in PD mode. */
+ uint64_t ca_par_pers : 1; /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
+ uint64_t dm : 1; /**< [ 14: 14](R/W) Data mask enable. */
+ uint64_t wr_dbi : 1; /**< [ 15: 15](R/W) Write DBI. */
+ uint64_t rd_dbi : 1; /**< [ 16: 16](R/W) Read DBI. */
+ uint64_t tccd_l : 3; /**< [ 19: 17](R/W) tCCD_L timing parameter:
+ 0x0 = 4.
+ 0x1 = 5.
+ 0x2 = 6.
+ 0x3 = 7.
+ 0x4 = 8.
+ 0x5-0x7 = reserved. */
+ uint64_t lpasr : 2; /**< [ 21: 20](R/W) LP auto self refresh. */
+ uint64_t crc : 1; /**< [ 22: 22](R/W) CRC mode. */
+ uint64_t gd : 1; /**< [ 23: 23](R/W) Gear-down mode enable. Software must first ensure that LMC()_CONTROL[DDR2T] is
+ cleared, prior to setting this field.
+ Setting of this field must happen prior to running the INIT sequence at the start of DRAM
+ bringup. */
+ uint64_t pda : 1; /**< [ 24: 24](R/W) Per DRAM addressability. */
+ uint64_t temp_sense : 1; /**< [ 25: 25](R/W) Temperature sensor readout enable. */
+ uint64_t fgrm : 3; /**< [ 28: 26](R/W) Fine granularity refresh mode. */
+ uint64_t wr_cmd_lat : 2; /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
+ uint64_t mpr_fmt : 2; /**< [ 32: 31](R/W) MPR format. */
+ uint64_t xrank_add_tccd_s : 3; /**< [ 35: 33](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_S
+ when crossing logical rank (to a different bank group) of a 3DS DRAM. */
+ uint64_t xrank_add_tccd_l : 3; /**< [ 38: 36](R/W) Reserved.
+ Internal:
+ Add additional cycles on top of the 4 cycles applied to tCCD_L
+ when crossing logical rank (to the same bank group) of a 3DS DRAM. */
+ uint64_t tc_ref : 1; /**< [ 39: 39](R/W) Temperature controlled refresh mode: 0 = disable, 1 = enable. */
+ uint64_t tc_ref_range : 1; /**< [ 40: 40](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_modereg_params3 bdk_lmcx_modereg_params3_t;
+
+static inline uint64_t BDK_LMCX_MODEREG_PARAMS3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MODEREG_PARAMS3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000058ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000058ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000058ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000058ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_MODEREG_PARAMS3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MODEREG_PARAMS3(a) bdk_lmcx_modereg_params3_t
+#define bustype_BDK_LMCX_MODEREG_PARAMS3(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MODEREG_PARAMS3(a) "LMCX_MODEREG_PARAMS3"
+#define device_bar_BDK_LMCX_MODEREG_PARAMS3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_MODEREG_PARAMS3(a) (a)
+#define arguments_BDK_LMCX_MODEREG_PARAMS3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_mpr_data0
+ *
+ * LMC MR Data Register 0
+ * This register provides bits \<63:0\> of MPR data register.
+ */
+union bdk_lmcx_mpr_data0
+{
+ uint64_t u;
+ struct bdk_lmcx_mpr_data0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mpr_data : 64; /**< [ 63: 0](RO/H) MPR data bits\<63:0\>. Bits\<7:0\> represent the MPR data for the lowest-order x4 device (x4
+ device 0); bits\<15:8\> represent x4 device 1; ..., bits\<63:56\> are for x4 device 7.
+
+ This field is also used to store the results after running the general R/W training
+ sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
+ The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field stores the R/W comparison output
+ from all DQ63 - DQ0.
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, this field stores the positive edge read data
+ on a particular cycle coming from DQ63 - DQ0. */
+#else /* Word 0 - Little Endian */
+ uint64_t mpr_data : 64; /**< [ 63: 0](RO/H) MPR data bits\<63:0\>. Bits\<7:0\> represent the MPR data for the lowest-order x4 device (x4
+ device 0); bits\<15:8\> represent x4 device 1; ..., bits\<63:56\> are for x4 device 7.
+
+ This field is also used to store the results after running the general R/W training
+ sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
+ The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field stores the R/W comparison output
+ from all DQ63 - DQ0.
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, this field stores the positive edge read data
+ on a particular cycle coming from DQ63 - DQ0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_mpr_data0_s cn; */
+};
+typedef union bdk_lmcx_mpr_data0 bdk_lmcx_mpr_data0_t;
+
+static inline uint64_t BDK_LMCX_MPR_DATA0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MPR_DATA0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000070ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000070ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000070ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000070ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_MPR_DATA0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MPR_DATA0(a) bdk_lmcx_mpr_data0_t
+#define bustype_BDK_LMCX_MPR_DATA0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MPR_DATA0(a) "LMCX_MPR_DATA0"
+#define device_bar_BDK_LMCX_MPR_DATA0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_MPR_DATA0(a) (a)
+#define arguments_BDK_LMCX_MPR_DATA0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_mpr_data1
+ *
+ * LMC MR Data Register 1
+ * This register provides bits \<127:64\> of MPR data register.
+ */
+union bdk_lmcx_mpr_data1
+{
+ uint64_t u;
+ struct bdk_lmcx_mpr_data1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mpr_data : 64; /**< [ 63: 0](RO/H) MPR data bits\<127:64\>. Bits\<7:0\> represent the MPR data for x4 device 8; bits\<15:8\>
+ represent x4 device 9; ...; bits\<63:56\> are for x4 device 15.
+
+ This field is also used to store the results after running the general R/W training
+ sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
+ The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field stores the R/W comparison output
+ from the ECC byte (DQ71 - DQ64).
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, [MPR_DATA]\<7:0\> stores the positive edge read data
+ on a particular cycle coming from the ECC byte (DQ71 - DQ64), while
+ [MPR_DATA]\<64:8\> stores the negative edge read data coming from DQ55 - DQ0. */
+#else /* Word 0 - Little Endian */
+ uint64_t mpr_data : 64; /**< [ 63: 0](RO/H) MPR data bits\<127:64\>. Bits\<7:0\> represent the MPR data for x4 device 8; bits\<15:8\>
+ represent x4 device 9; ...; bits\<63:56\> are for x4 device 15.
+
+ This field is also used to store the results after running the general R/W training
+ sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
+ The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field stores the R/W comparison output
+ from the ECC byte (DQ71 - DQ64).
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, [MPR_DATA]\<7:0\> stores the positive edge read data
+ on a particular cycle coming from the ECC byte (DQ71 - DQ64), while
+ [MPR_DATA]\<64:8\> stores the negative edge read data coming from DQ55 - DQ0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_mpr_data1_s cn; */
+};
+typedef union bdk_lmcx_mpr_data1 bdk_lmcx_mpr_data1_t;
+
+static inline uint64_t BDK_LMCX_MPR_DATA1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MPR_DATA1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000078ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000078ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000078ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000078ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_MPR_DATA1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MPR_DATA1(a) bdk_lmcx_mpr_data1_t
+#define bustype_BDK_LMCX_MPR_DATA1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MPR_DATA1(a) "LMCX_MPR_DATA1"
+#define device_bar_BDK_LMCX_MPR_DATA1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_MPR_DATA1(a) (a)
+#define arguments_BDK_LMCX_MPR_DATA1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_mpr_data2
+ *
+ * LMC MR Data Register 2
+ * This register provides bits \<143:128\> of MPR data register.
+ */
+union bdk_lmcx_mpr_data2
+{
+ uint64_t u;
+ struct bdk_lmcx_mpr_data2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t mpr_data : 16; /**< [ 15: 0](RO/H) MPR data bits\<143:128\>. Bits\<7:0\> represent the MPR data for x4 device 16; bits\<15:8\>
+ represent x4 device 17.
+
+ This field is also used to store the results after running the general R/W training
+ sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
+ The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field is not used.
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, [MPR_DATA]\<15:0\> stores the negative edge read data
+ on a particular cycle coming from DQ71 - DQ56. */
+#else /* Word 0 - Little Endian */
+ uint64_t mpr_data : 16; /**< [ 15: 0](RO/H) MPR data bits\<143:128\>. Bits\<7:0\> represent the MPR data for x4 device 16; bits\<15:8\>
+ represent x4 device 17.
+
+ This field is also used to store the results after running the general R/W training
+ sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
+ The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field is not used.
+ When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, [MPR_DATA]\<15:0\> stores the negative edge read data
+ on a particular cycle coming from DQ71 - DQ56. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_mpr_data2_s cn; */
+};
+typedef union bdk_lmcx_mpr_data2 bdk_lmcx_mpr_data2_t;
+
+static inline uint64_t BDK_LMCX_MPR_DATA2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MPR_DATA2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000080ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000080ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000080ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000080ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_MPR_DATA2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MPR_DATA2(a) bdk_lmcx_mpr_data2_t
+#define bustype_BDK_LMCX_MPR_DATA2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MPR_DATA2(a) "LMCX_MPR_DATA2"
+#define device_bar_BDK_LMCX_MPR_DATA2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_MPR_DATA2(a) (a)
+#define arguments_BDK_LMCX_MPR_DATA2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_mr_mpr_ctl
+ *
+ * LMC MR Write and MPR Control Register
+ * This register provides the control functions when programming the MPR of DDR4 DRAMs.
+ */
+union bdk_lmcx_mr_mpr_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_mr_mpr_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t mr_wr_secure_key_ena : 1; /**< [ 60: 60](R/W) When set, this enables the issuing of security key with the
+ unique address field A[17:0] set by LMC()_MR_MPR_CTL[MR_WR_ADDR]
+ during the MRW sequence.
+ Set this to one when executing DRAM post package repair manually
+ by using MRW operation. */
+ uint64_t pba_func_space : 3; /**< [ 59: 57](R/W) Set the function space selector during PBA mode of the MRW
+ sequence. */
+ uint64_t mr_wr_bg1 : 1; /**< [ 56: 56](R/W) BG1 part of the address select for MRS in DDR4 mode. */
+ uint64_t mpr_sample_dq_enable : 1; /**< [ 55: 55](R/W) Reserved.
+ Internal:
+ No longer used due to logic change from
+ initial design. */
+ uint64_t pda_early_dqx : 1; /**< [ 54: 54](R/W) When set, it enables lmc_dqx early for PDA/PBA operation. */
+ uint64_t mr_wr_pba_enable : 1; /**< [ 53: 53](R/W) Reserved.
+ Internal:
+ Per buffer addressability write enable. When set, MRW operations use PBA,
+ enabled by [MR_WR_PDA_MASK] per buffer. Only available for DDR4 LRDIMM. */
+ uint64_t mr_wr_use_default_value : 1;/**< [ 52: 52](R/W) When set, write the value to the MR that is computed from the value set in various CSR
+ fields that would be used during initialization, rather that using the value in the
+ LMC()_MR_MPR_CTL[MR_WR_ADDR]. Useful to rewrite the same value or to change single
+ bits without having to compute a whole new value for the MR. */
+ uint64_t mpr_whole_byte_enable : 1; /**< [ 51: 51](R/W) Reserved.
+ Internal:
+ Select a whole byte of DRAM data to read when whole-byte mode enabled. */
+ uint64_t mpr_byte_select : 4; /**< [ 50: 47](R/W) Reserved.
+ Internal:
+ Select a whole byte of DRAM data to read when whole-byte mode enabled. */
+ uint64_t mpr_bit_select : 2; /**< [ 46: 45](R/W) Select which of four bits to read for each nibble of DRAM data. Typically all four bits
+ from a x4 device, or all eight bits from a x8 device, or all 16 bits from a x16 device
+ carry the same data, but this field allows selection of which device bit will be used to
+ read the MPR data. */
+ uint64_t mpr_wr : 1; /**< [ 44: 44](R/W) MPR sequence will perform a write operation when set. */
+ uint64_t mpr_loc : 2; /**< [ 43: 42](R/W) MPR location select for MPR sequence. Only makes a difference for DDR4. */
+ uint64_t mr_wr_pda_enable : 1; /**< [ 41: 41](R/W) PDA write enable. When set, MRW operations use PDA, enabled by [MR_WR_PDA_MASK] per device.
+ Only available for DDR4 devices. */
+ uint64_t mr_wr_pda_mask : 18; /**< [ 40: 23](R/W) PDA mask. If [MR_WR_PDA_ENABLE] = 1 and there is a one in the bit for this mask value, then
+ the corresponding DRAM device is enabled for the PDA MR write operation.
+ Bit\<23\> corresponds to the lowest order, x4 device, and bit\<40\> corresponds to the highest
+ order x4 device, for a total of up to 18 devices. */
+ uint64_t mr_wr_rank : 2; /**< [ 22: 21](R/W) Selects the DRAM rank for either MRW or MPR sequences. */
+ uint64_t mr_wr_sel : 3; /**< [ 20: 18](R/W) Selects which MR to write with the MR write sequence.
+ Which pins to drive and how to drive them is automatically controlled through the DDR3/4
+ mode setting. Bits\<19:18\> are also used to select the MPR page for an MPR sequence.
+ A value of 0x7 selects an RCW write for both DDR4 and DDR3 MRW operations. */
+ uint64_t mr_wr_addr : 18; /**< [ 17: 0](R/W) Sets a value for A\<17:0\> for MR write operations. Note that many of these bits
+ must be zero for various MRs. Bits\<7:0\> are also used for write data on an MPR
+ sequence write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t mr_wr_addr : 18; /**< [ 17: 0](R/W) Sets a value for A\<17:0\> for MR write operations. Note that many of these bits
+ must be zero for various MRs. Bits\<7:0\> are also used for write data on an MPR
+ sequence write operation. */
+ uint64_t mr_wr_sel : 3; /**< [ 20: 18](R/W) Selects which MR to write with the MR write sequence.
+ Which pins to drive and how to drive them is automatically controlled through the DDR3/4
+ mode setting. Bits\<19:18\> are also used to select the MPR page for an MPR sequence.
+ A value of 0x7 selects an RCW write for both DDR4 and DDR3 MRW operations. */
+ uint64_t mr_wr_rank : 2; /**< [ 22: 21](R/W) Selects the DRAM rank for either MRW or MPR sequences. */
+ uint64_t mr_wr_pda_mask : 18; /**< [ 40: 23](R/W) PDA mask. If [MR_WR_PDA_ENABLE] = 1 and there is a one in the bit for this mask value, then
+ the corresponding DRAM device is enabled for the PDA MR write operation.
+ Bit\<23\> corresponds to the lowest order, x4 device, and bit\<40\> corresponds to the highest
+ order x4 device, for a total of up to 18 devices. */
+ uint64_t mr_wr_pda_enable : 1; /**< [ 41: 41](R/W) PDA write enable. When set, MRW operations use PDA, enabled by [MR_WR_PDA_MASK] per device.
+ Only available for DDR4 devices. */
+ uint64_t mpr_loc : 2; /**< [ 43: 42](R/W) MPR location select for MPR sequence. Only makes a difference for DDR4. */
+ uint64_t mpr_wr : 1; /**< [ 44: 44](R/W) MPR sequence will perform a write operation when set. */
+ uint64_t mpr_bit_select : 2; /**< [ 46: 45](R/W) Select which of four bits to read for each nibble of DRAM data. Typically all four bits
+ from a x4 device, or all eight bits from a x8 device, or all 16 bits from a x16 device
+ carry the same data, but this field allows selection of which device bit will be used to
+ read the MPR data. */
+ uint64_t mpr_byte_select : 4; /**< [ 50: 47](R/W) Reserved.
+ Internal:
+ Select a whole byte of DRAM data to read when whole-byte mode enabled. */
+ uint64_t mpr_whole_byte_enable : 1; /**< [ 51: 51](R/W) Reserved.
+ Internal:
+ Select a whole byte of DRAM data to read when whole-byte mode enabled. */
+ uint64_t mr_wr_use_default_value : 1;/**< [ 52: 52](R/W) When set, write the value to the MR that is computed from the value set in various CSR
+ fields that would be used during initialization, rather that using the value in the
+ LMC()_MR_MPR_CTL[MR_WR_ADDR]. Useful to rewrite the same value or to change single
+ bits without having to compute a whole new value for the MR. */
+ uint64_t mr_wr_pba_enable : 1; /**< [ 53: 53](R/W) Reserved.
+ Internal:
+ Per buffer addressability write enable. When set, MRW operations use PBA,
+ enabled by [MR_WR_PDA_MASK] per buffer. Only available for DDR4 LRDIMM. */
+ uint64_t pda_early_dqx : 1; /**< [ 54: 54](R/W) When set, it enables lmc_dqx early for PDA/PBA operation. */
+ uint64_t mpr_sample_dq_enable : 1; /**< [ 55: 55](R/W) Reserved.
+ Internal:
+ No longer used due to logic change from
+ initial design. */
+ uint64_t mr_wr_bg1 : 1; /**< [ 56: 56](R/W) BG1 part of the address select for MRS in DDR4 mode. */
+ uint64_t pba_func_space : 3; /**< [ 59: 57](R/W) Set the function space selector during PBA mode of the MRW
+ sequence. */
+ uint64_t mr_wr_secure_key_ena : 1; /**< [ 60: 60](R/W) When set, this enables the issuing of security key with the
+ unique address field A[17:0] set by LMC()_MR_MPR_CTL[MR_WR_ADDR]
+ during the MRW sequence.
+ Set this to one when executing DRAM post package repair manually
+ by using MRW operation. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_mr_mpr_ctl_s cn8; */
+ struct bdk_lmcx_mr_mpr_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t mr_wr_secure_key_ena : 1; /**< [ 60: 60](R/W) When set, this enables the issuing of security key with the
+ unique address field A[17:0] set by LMC()_MR_MPR_CTL[MR_WR_ADDR]
+ during the MRW sequence.
+ Set this to one when executing DRAM post package repair manually
+ by using MRW operation. */
+ uint64_t pba_func_space : 3; /**< [ 59: 57](R/W) Set the function space selector during PBA mode of the MRW
+ sequence. */
+ uint64_t mr_wr_bg1 : 1; /**< [ 56: 56](R/W) BG1 part of the address select for MRS in DDR4 mode. */
+ uint64_t mpr_sample_dq_enable : 1; /**< [ 55: 55](R/W) Reserved.
+ Internal:
+ No longer used due to logic change from
+ initial design. */
+ uint64_t pda_early_dqx : 1; /**< [ 54: 54](R/W) When set, it enables lmc_dqx early for PDA/PBA operation. */
+ uint64_t mr_wr_pba_enable : 1; /**< [ 53: 53](R/W) Reserved.
+ Internal:
+ Per buffer addressability write enable. When set, MRW operations use PBA,
+ enabled by [MR_WR_PDA_MASK] per buffer. Only available for DDR4 LRDIMM. */
+ uint64_t mr_wr_use_default_value : 1;/**< [ 52: 52](R/W) When set, write the value to the MR that is computed from the value set in various CSR
+ fields that would be used during initialization, rather that using the value in the
+ LMC()_MR_MPR_CTL[MR_WR_ADDR]. Useful to rewrite the same value or to change single
+ bits without having to compute a whole new value for the MR. */
+ uint64_t mpr_whole_byte_enable : 1; /**< [ 51: 51](R/W) Reserved.
+ Internal:
+ Select a whole byte of DRAM data to read when whole-byte mode enabled. */
+ uint64_t mpr_byte_select : 4; /**< [ 50: 47](R/W) Reserved.
+ Internal:
+ Select a whole byte of DRAM data to read when whole-byte mode enabled. */
+ uint64_t mpr_bit_select : 2; /**< [ 46: 45](R/W) Select which of four bits to read for each nibble of DRAM data. Typically all four bits
+ from a x4 device, or all eight bits from a x8 device, or all 16 bits from a x16 device
+ carry the same data, but this field allows selection of which device bit will be used to
+ read the MPR data. */
+ uint64_t mpr_wr : 1; /**< [ 44: 44](R/W) MPR sequence will perform a write operation when set. */
+ uint64_t mpr_loc : 2; /**< [ 43: 42](R/W) MPR location select for MPR sequence. Only makes a difference for DDR4. */
+ uint64_t mr_wr_pda_enable : 1; /**< [ 41: 41](R/W) PDA write enable. When set, MRW operations use PDA, enabled by [MR_WR_PDA_MASK] per device.
+ Only available for DDR4 devices. */
+ uint64_t mr_wr_pda_mask : 18; /**< [ 40: 23](R/W) PDA mask. If [MR_WR_PDA_ENABLE] = 1 and there is a one in the bit for this mask value, then
+ the corresponding DRAM device is enabled for the PDA MR write operation.
+ Bit\<23\> corresponds to the lowest order, x4 device, and bit\<40\> corresponds to the highest
+ order x4 device, for a total of up to 18 devices. */
+ uint64_t mr_wr_rank : 2; /**< [ 22: 21](R/W) Selects the DRAM rank for either MRW or MPR sequences. This field also selects the DRAM
+ rank when running LMC_SEQ_SEL_E::VREF_INT sequence. */
+ uint64_t mr_wr_sel : 3; /**< [ 20: 18](R/W) Selects which Mode Register to write with the MR Write sequence.
+ Which pins to drive and how to drive them is automatically controlled through the DDR4
+ mode setting.
+ Bits\<19:18\> are also used to select the MPR page for an MPR sequence.
+ A value of 0x7 selects a DDR4RCD control word (RCW) write. */
+ uint64_t mr_wr_addr : 18; /**< [ 17: 0](R/W) Sets a value for A\<17:0\> for MR write operations. Note that many of these bits
+ must be zero for various MRs. Bits\<7:0\> are also used for write data on an MPR
+ sequence write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t mr_wr_addr : 18; /**< [ 17: 0](R/W) Sets a value for A\<17:0\> for MR write operations. Note that many of these bits
+ must be zero for various MRs. Bits\<7:0\> are also used for write data on an MPR
+ sequence write operation. */
+ uint64_t mr_wr_sel : 3; /**< [ 20: 18](R/W) Selects which Mode Register to write with the MR Write sequence.
+ Which pins to drive and how to drive them is automatically controlled through the DDR4
+ mode setting.
+ Bits\<19:18\> are also used to select the MPR page for an MPR sequence.
+ A value of 0x7 selects a DDR4RCD control word (RCW) write. */
+ uint64_t mr_wr_rank : 2; /**< [ 22: 21](R/W) Selects the DRAM rank for either MRW or MPR sequences. This field also selects the DRAM
+ rank when running LMC_SEQ_SEL_E::VREF_INT sequence. */
+ uint64_t mr_wr_pda_mask : 18; /**< [ 40: 23](R/W) PDA mask. If [MR_WR_PDA_ENABLE] = 1 and there is a one in the bit for this mask value, then
+ the corresponding DRAM device is enabled for the PDA MR write operation.
+ Bit\<23\> corresponds to the lowest order, x4 device, and bit\<40\> corresponds to the highest
+ order x4 device, for a total of up to 18 devices. */
+ uint64_t mr_wr_pda_enable : 1; /**< [ 41: 41](R/W) PDA write enable. When set, MRW operations use PDA, enabled by [MR_WR_PDA_MASK] per device.
+ Only available for DDR4 devices. */
+ uint64_t mpr_loc : 2; /**< [ 43: 42](R/W) MPR location select for MPR sequence. Only makes a difference for DDR4. */
+ uint64_t mpr_wr : 1; /**< [ 44: 44](R/W) MPR sequence will perform a write operation when set. */
+ uint64_t mpr_bit_select : 2; /**< [ 46: 45](R/W) Select which of four bits to read for each nibble of DRAM data. Typically all four bits
+ from a x4 device, or all eight bits from a x8 device, or all 16 bits from a x16 device
+ carry the same data, but this field allows selection of which device bit will be used to
+ read the MPR data. */
+ uint64_t mpr_byte_select : 4; /**< [ 50: 47](R/W) Reserved.
+ Internal:
+ Select a whole byte of DRAM data to read when whole-byte mode enabled. */
+ uint64_t mpr_whole_byte_enable : 1; /**< [ 51: 51](R/W) Reserved.
+ Internal:
+ Select a whole byte of DRAM data to read when whole-byte mode enabled. */
+ uint64_t mr_wr_use_default_value : 1;/**< [ 52: 52](R/W) When set, write the value to the MR that is computed from the value set in various CSR
+ fields that would be used during initialization, rather that using the value in the
+ LMC()_MR_MPR_CTL[MR_WR_ADDR]. Useful to rewrite the same value or to change single
+ bits without having to compute a whole new value for the MR. */
+ uint64_t mr_wr_pba_enable : 1; /**< [ 53: 53](R/W) Reserved.
+ Internal:
+ Per buffer addressability write enable. When set, MRW operations use PBA,
+ enabled by [MR_WR_PDA_MASK] per buffer. Only available for DDR4 LRDIMM. */
+ uint64_t pda_early_dqx : 1; /**< [ 54: 54](R/W) When set, it enables lmc_dqx early for PDA/PBA operation. */
+ uint64_t mpr_sample_dq_enable : 1; /**< [ 55: 55](R/W) Reserved.
+ Internal:
+ No longer used due to logic change from
+ initial design. */
+ uint64_t mr_wr_bg1 : 1; /**< [ 56: 56](R/W) BG1 part of the address select for MRS in DDR4 mode. */
+ uint64_t pba_func_space : 3; /**< [ 59: 57](R/W) Set the function space selector during PBA mode of the MRW
+ sequence. */
+ uint64_t mr_wr_secure_key_ena : 1; /**< [ 60: 60](R/W) When set, this enables the issuing of security key with the
+ unique address field A[17:0] set by LMC()_MR_MPR_CTL[MR_WR_ADDR]
+ during the MRW sequence.
+ Set this to one when executing DRAM post package repair manually
+ by using MRW operation. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_mr_mpr_ctl bdk_lmcx_mr_mpr_ctl_t;
+
+static inline uint64_t BDK_LMCX_MR_MPR_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MR_MPR_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000068ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000068ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000068ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000068ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_MR_MPR_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MR_MPR_CTL(a) bdk_lmcx_mr_mpr_ctl_t
+#define bustype_BDK_LMCX_MR_MPR_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MR_MPR_CTL(a) "LMCX_MR_MPR_CTL"
+#define device_bar_BDK_LMCX_MR_MPR_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_MR_MPR_CTL(a) (a)
+#define arguments_BDK_LMCX_MR_MPR_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_msix_pba#
+ *
+ * LMC MSI-X Pending Bit Array Registers
+ * This register is the LMC-X PBA table; the bit number is indexed by the LMC_INT_VEC_E enumeration.
+ */
+union bdk_lmcx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_lmcx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated LMC()_MSIX_VEC()_CTL, enumerated by LMC_INT_VEC_E. Bits
+ that have no associated LMC_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated LMC()_MSIX_VEC()_CTL, enumerated by LMC_INT_VEC_E. Bits
+ that have no associated LMC_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_msix_pbax_s cn; */
+};
+typedef union bdk_lmcx_msix_pbax bdk_lmcx_msix_pbax_t;
+
+static inline uint64_t BDK_LMCX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e088ff0000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e088ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
+ return 0x87e088ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b==0)))
+ return 0x87e088ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("LMCX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MSIX_PBAX(a,b) bdk_lmcx_msix_pbax_t
+#define bustype_BDK_LMCX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MSIX_PBAX(a,b) "LMCX_MSIX_PBAX"
+#define device_bar_BDK_LMCX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_LMCX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_LMCX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_msix_vec#_addr
+ *
+ * LMC MSI-X Vector-Table Address Register
+ * This register is the MSI-X vector table, indexed by the LMC_INT_VEC_E enumeration.
+ */
+union bdk_lmcx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_lmcx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's LMC()_MSIX_VEC()_ADDR, LMC()_MSIX_VEC()_CTL, and corresponding
+ bit of LMC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's LMC()_MSIX_VEC()_ADDR, LMC()_MSIX_VEC()_CTL, and corresponding
+ bit of LMC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_msix_vecx_addr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's LMC()_MSIX_VEC()_ADDR, LMC()_MSIX_VEC()_CTL, and corresponding
+ bit of LMC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's LMC()_MSIX_VEC()_ADDR, LMC()_MSIX_VEC()_CTL, and corresponding
+ bit of LMC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_lmcx_msix_vecx_addr_s cn9; */
+};
+typedef union bdk_lmcx_msix_vecx_addr bdk_lmcx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_LMCX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e088f00000ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e088f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
+ return 0x87e088f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b==0)))
+ return 0x87e088f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("LMCX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MSIX_VECX_ADDR(a,b) bdk_lmcx_msix_vecx_addr_t
+#define bustype_BDK_LMCX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MSIX_VECX_ADDR(a,b) "LMCX_MSIX_VECX_ADDR"
+#define device_bar_BDK_LMCX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_LMCX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_LMCX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_msix_vec#_ctl
+ *
+ * LMC MSI-X Vector-Table Control and Data Register
+ * This register is the MSI-X vector table, indexed by the LMC_INT_VEC_E enumeration.
+ */
+union bdk_lmcx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_lmcx_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_lmcx_msix_vecx_ctl bdk_lmcx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_LMCX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e088f00008ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e088f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
+ return 0x87e088f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b==0)))
+ return 0x87e088f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("LMCX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_MSIX_VECX_CTL(a,b) bdk_lmcx_msix_vecx_ctl_t
+#define bustype_BDK_LMCX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_MSIX_VECX_CTL(a,b) "LMCX_MSIX_VECX_CTL"
+#define device_bar_BDK_LMCX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_LMCX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_LMCX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_ns_ctl
+ *
+ * LMC Non Secure Control Register
+ * This register contains control parameters for handling nonsecure accesses.
+ */
+union bdk_lmcx_ns_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_ns_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t ns_scramble_dis : 1; /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
+ When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
+ field needs to be cleared to zero in order to enable data scrambling on
+ nonsecure mode. */
+ uint64_t ns_dynamic_dis : 1; /**< [ 24: 24](RO) Reserved. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t adr_offset : 22; /**< [ 21: 0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
+
+ In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
+
+ In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
+#else /* Word 0 - Little Endian */
+ uint64_t adr_offset : 22; /**< [ 21: 0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
+
+ In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
+
+ In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t ns_dynamic_dis : 1; /**< [ 24: 24](RO) Reserved. */
+ uint64_t ns_scramble_dis : 1; /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
+ When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
+ field needs to be cleared to zero in order to enable data scrambling on
+ nonsecure mode. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_ns_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t ns_scramble_dis : 1; /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
+ When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
+ field needs to be cleared to zero in order to enable data scrambling on
+ nonsecure mode. */
+ uint64_t ns_dynamic_dis : 1; /**< [ 24: 24](RO) Reserved. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t adr_offset : 18; /**< [ 17: 0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
+
+ In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
+
+ In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
+#else /* Word 0 - Little Endian */
+ uint64_t adr_offset : 18; /**< [ 17: 0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
+
+ In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
+
+ In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t ns_dynamic_dis : 1; /**< [ 24: 24](RO) Reserved. */
+ uint64_t ns_scramble_dis : 1; /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
+ When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
+ field needs to be cleared to zero in order to enable data scrambling on
+ nonsecure mode. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_ns_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t ns_scramble_dis : 1; /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
+ When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
+ field needs to be cleared to zero in order to enable data scrambling on
+ nonsecure mode. */
+ uint64_t ns_dynamic_dis : 1; /**< [ 24: 24](R/W) Disable optimization that dynamically reduces read latency when there are no
+ longer any secure operations in flight. For diagnostic use only. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t adr_offset : 22; /**< [ 21: 0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
+
+ In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
+
+ In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
+#else /* Word 0 - Little Endian */
+ uint64_t adr_offset : 22; /**< [ 21: 0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
+
+ In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
+
+ In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t ns_dynamic_dis : 1; /**< [ 24: 24](R/W) Disable optimization that dynamically reduces read latency when there are no
+ longer any secure operations in flight. For diagnostic use only. */
+ uint64_t ns_scramble_dis : 1; /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
+ When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
+ field needs to be cleared to zero in order to enable data scrambling on
+ nonsecure mode. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_ns_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t ns_scramble_dis : 1; /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
+ When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
+ field needs to be cleared to zero in order to enable data scrambling on
+ nonsecure mode. */
+ uint64_t ns_dynamic_dis : 1; /**< [ 24: 24](R/W) Disable optimization that dynamically reduces read latency when there are no
+ longer any secure operations in flight. For diagnostic use only. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t adr_offset : 18; /**< [ 17: 0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
+
+ In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
+
+ In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
+#else /* Word 0 - Little Endian */
+ uint64_t adr_offset : 18; /**< [ 17: 0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
+
+ In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
+
+ In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t ns_dynamic_dis : 1; /**< [ 24: 24](R/W) Disable optimization that dynamically reduces read latency when there are no
+ longer any secure operations in flight. For diagnostic use only. */
+ uint64_t ns_scramble_dis : 1; /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
+ When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
+ field needs to be cleared to zero in order to enable data scrambling on
+ nonsecure mode. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_ns_ctl_cn81xx cn83xx; */
+ /* struct bdk_lmcx_ns_ctl_cn81xx cn88xxp2; */
+};
+typedef union bdk_lmcx_ns_ctl bdk_lmcx_ns_ctl_t;
+
+static inline uint64_t BDK_LMCX_NS_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_NS_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000178ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000178ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000178ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000178ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_NS_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_NS_CTL(a) bdk_lmcx_ns_ctl_t
+#define bustype_BDK_LMCX_NS_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_NS_CTL(a) "LMCX_NS_CTL"
+#define device_bar_BDK_LMCX_NS_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_NS_CTL(a) (a)
+#define arguments_BDK_LMCX_NS_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_nxm
+ *
+ * LMC Nonexistent Memory Register
+ * Following is the decoding for mem_msb/rank:
+ * 0x0: mem_msb = mem_adr[25].
+ * 0x1: mem_msb = mem_adr[26].
+ * 0x2: mem_msb = mem_adr[27].
+ * 0x3: mem_msb = mem_adr[28].
+ * 0x4: mem_msb = mem_adr[29].
+ * 0x5: mem_msb = mem_adr[30].
+ * 0x6: mem_msb = mem_adr[31].
+ * 0x7: mem_msb = mem_adr[32].
+ * 0x8: mem_msb = mem_adr[33].
+ * 0x9: mem_msb = mem_adr[34].
+ * 0xA: mem_msb = mem_adr[35].
+ * 0xB: mem_msb = mem_adr[36].
+ * 0xC-0xF = Reserved.
+ *
+ * For example, for a DIMM made of Samsung's K4B1G0846C-ZCF7 1Gb (16M * 8 bit * 8 bank)
+ * parts, the column address width = 10; so with 10b of col, 3b of bus, 3b of bank, row_lsb = 16.
+ * Therefore, row = mem_adr[29:16] and mem_msb = 4.
+ *
+ * Note also that addresses greater than the max defined space (pbank_msb) are also treated as
+ * NXM accesses.
+ */
+union bdk_lmcx_nxm
+{
+ uint64_t u;
+ struct bdk_lmcx_nxm_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t mem_msb_d1_r1 : 4; /**< [ 23: 20](R/W) Reserved.
+ Internal:
+ Maximum row MSB for DIMM1, RANK1/DIMM1 in single ranked.
+ If DIMM1 is dual-sided, this should be set to
+ NXM[MEM_MSB_D1_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
+ uint64_t mem_msb_d1_r0 : 4; /**< [ 19: 16](R/W) Reserved.
+ Internal:
+ Maximum row MSB for DIMM1, RANK0.
+ if DIMM1 contains 3DS DRAMs, this would point to
+ the logical rank's most significant bit. */
+ uint64_t mem_msb_d0_r1 : 4; /**< [ 15: 12](R/W) Maximum row MSB for DIMM0, RANK1/DIMM0 in single ranked.
+ If DIMM0 is dual-sided, this should be set to
+ [MEM_MSB_D0_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
+ uint64_t mem_msb_d0_r0 : 4; /**< [ 11: 8](R/W) Maximum row MSB for DIMM0, RANK0.
+ Internal:
+ If DIMM0 contains 3DS DRAMs, this would point to
+ the logical rank's most significant bit. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t cs_mask : 4; /**< [ 3: 0](R/W) Chip select mask. This mask corresponds to the four chip selects for a memory
+ configuration. If LMC()_CONFIG[RANK_ENA]=0 then this mask must be set in pairs because
+ each reference address will assert a pair of chip selects. If the chip select(s) have a
+ corresponding [CS_MASK] bit set, then the reference is to nonexistent memory (NXM). LMC will
+ alias a NXM read reference to use the lowest, legal chip select(s) and return zeros. LMC
+ normally discards NXM write operations, but will also alias them when
+ LMC()_CONTROL[NXM_WRITE_EN]=1. */
+#else /* Word 0 - Little Endian */
+ uint64_t cs_mask : 4; /**< [ 3: 0](R/W) Chip select mask. This mask corresponds to the four chip selects for a memory
+ configuration. If LMC()_CONFIG[RANK_ENA]=0 then this mask must be set in pairs because
+ each reference address will assert a pair of chip selects. If the chip select(s) have a
+ corresponding [CS_MASK] bit set, then the reference is to nonexistent memory (NXM). LMC will
+ alias a NXM read reference to use the lowest, legal chip select(s) and return zeros. LMC
+ normally discards NXM write operations, but will also alias them when
+ LMC()_CONTROL[NXM_WRITE_EN]=1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t mem_msb_d0_r0 : 4; /**< [ 11: 8](R/W) Maximum row MSB for DIMM0, RANK0.
+ Internal:
+ If DIMM0 contains 3DS DRAMs, this would point to
+ the logical rank's most significant bit. */
+ uint64_t mem_msb_d0_r1 : 4; /**< [ 15: 12](R/W) Maximum row MSB for DIMM0, RANK1/DIMM0 in single ranked.
+ If DIMM0 is dual-sided, this should be set to
+ [MEM_MSB_D0_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
+ uint64_t mem_msb_d1_r0 : 4; /**< [ 19: 16](R/W) Reserved.
+ Internal:
+ Maximum row MSB for DIMM1, RANK0.
+ if DIMM1 contains 3DS DRAMs, this would point to
+ the logical rank's most significant bit. */
+ uint64_t mem_msb_d1_r1 : 4; /**< [ 23: 20](R/W) Reserved.
+ Internal:
+ Maximum row MSB for DIMM1, RANK1/DIMM1 in single ranked.
+ If DIMM1 is dual-sided, this should be set to
+ NXM[MEM_MSB_D1_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_nxm_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t mem_msb_d1_r1 : 4; /**< [ 23: 20](R/W) Maximum row MSB for DIMM1, RANK1/DIMM1 in single ranked.
+ If DIMM1 is dual-sided, this should be set to
+ NXM[MEM_MSB_D1_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
+ uint64_t mem_msb_d1_r0 : 4; /**< [ 19: 16](R/W) Maximum row MSB for DIMM1, RANK0.
+ Internal:
+ if DIMM1 contains 3DS DRAMs, this would point to
+ the logical rank's most significant bit. */
+ uint64_t mem_msb_d0_r1 : 4; /**< [ 15: 12](R/W) Maximum row MSB for DIMM0, RANK1/DIMM0 in single ranked.
+ If DIMM0 is dual-sided, this should be set to
+ [MEM_MSB_D0_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
+ uint64_t mem_msb_d0_r0 : 4; /**< [ 11: 8](R/W) Maximum row MSB for DIMM0, RANK0.
+ Internal:
+ If DIMM0 contains 3DS DRAMs, this would point to
+ the logical rank's most significant bit. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t cs_mask : 4; /**< [ 3: 0](R/W) Chip select mask. This mask corresponds to the four chip selects for a memory
+ configuration. If LMC()_CONFIG[RANK_ENA]=0 then this mask must be set in pairs because
+ each reference address will assert a pair of chip selects. If the chip select(s) have a
+ corresponding [CS_MASK] bit set, then the reference is to nonexistent memory (NXM). LMC will
+ alias a NXM read reference to use the lowest, legal chip select(s) and return zeros. LMC
+ normally discards NXM write operations, but will also alias them when
+ LMC()_CONTROL[NXM_WRITE_EN]=1. */
+#else /* Word 0 - Little Endian */
+ uint64_t cs_mask : 4; /**< [ 3: 0](R/W) Chip select mask. This mask corresponds to the four chip selects for a memory
+ configuration. If LMC()_CONFIG[RANK_ENA]=0 then this mask must be set in pairs because
+ each reference address will assert a pair of chip selects. If the chip select(s) have a
+ corresponding [CS_MASK] bit set, then the reference is to nonexistent memory (NXM). LMC will
+ alias a NXM read reference to use the lowest, legal chip select(s) and return zeros. LMC
+ normally discards NXM write operations, but will also alias them when
+ LMC()_CONTROL[NXM_WRITE_EN]=1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t mem_msb_d0_r0 : 4; /**< [ 11: 8](R/W) Maximum row MSB for DIMM0, RANK0.
+ Internal:
+ If DIMM0 contains 3DS DRAMs, this would point to
+ the logical rank's most significant bit. */
+ uint64_t mem_msb_d0_r1 : 4; /**< [ 15: 12](R/W) Maximum row MSB for DIMM0, RANK1/DIMM0 in single ranked.
+ If DIMM0 is dual-sided, this should be set to
+ [MEM_MSB_D0_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
+ uint64_t mem_msb_d1_r0 : 4; /**< [ 19: 16](R/W) Maximum row MSB for DIMM1, RANK0.
+ Internal:
+ if DIMM1 contains 3DS DRAMs, this would point to
+ the logical rank's most significant bit. */
+ uint64_t mem_msb_d1_r1 : 4; /**< [ 23: 20](R/W) Maximum row MSB for DIMM1, RANK1/DIMM1 in single ranked.
+ If DIMM1 is dual-sided, this should be set to
+ NXM[MEM_MSB_D1_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_nxm_s cn81xx; */
+ /* struct bdk_lmcx_nxm_cn9 cn88xx; */
+ /* struct bdk_lmcx_nxm_cn9 cn83xx; */
+};
+typedef union bdk_lmcx_nxm bdk_lmcx_nxm_t;
+
+static inline uint64_t BDK_LMCX_NXM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_NXM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880000c8ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880000c8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880000c8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880000c8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_NXM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_NXM(a) bdk_lmcx_nxm_t
+#define bustype_BDK_LMCX_NXM(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_NXM(a) "LMCX_NXM"
+#define device_bar_BDK_LMCX_NXM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_NXM(a) (a)
+#define arguments_BDK_LMCX_NXM(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_nxm_fadr
+ *
+ * LMC NXM Failing Address Register
+ * This register captures only the first transaction with a NXM error while an
+ * interrupt is pending, and only captures a subsequent event once the interrupt is
+ * cleared by writing a one to LMC()_INT[NXM_ERR]. It captures the actual L2C-LMC
+ * address provided to the LMC that caused the NXM error. A read or write NXM error is
+ * captured only if enabled using the NXM event enables.
+ */
+union bdk_lmcx_nxm_fadr
+{
+ uint64_t u;
+ struct bdk_lmcx_nxm_fadr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_nxm_fadr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t nxm_faddr_ext : 1; /**< [ 39: 39](RO/H) Extended bit for the failing L2C-LMC address (bit 37). */
+ uint64_t nxm_src : 1; /**< [ 38: 38](RO/H) Indicates the source of the operation that caused a NXM error:
+ 0 = L2C, 1 = HFA. */
+ uint64_t nxm_type : 1; /**< [ 37: 37](RO/H) Indicates the type of operation that caused NXM error:
+ 0 = Read, 1 = Write. */
+ uint64_t nxm_faddr : 37; /**< [ 36: 0](RO/H) Failing L2C-LMC address. Bits\<3:0\> are always zeros for an HFA access, and bits\<4:0\> are
+ always 0s for an L2C access. Bits\<5:4\> represent the fill order for an L2C read operation,
+ and the start point within a cache line for a write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_faddr : 37; /**< [ 36: 0](RO/H) Failing L2C-LMC address. Bits\<3:0\> are always zeros for an HFA access, and bits\<4:0\> are
+ always 0s for an L2C access. Bits\<5:4\> represent the fill order for an L2C read operation,
+ and the start point within a cache line for a write operation. */
+ uint64_t nxm_type : 1; /**< [ 37: 37](RO/H) Indicates the type of operation that caused NXM error:
+ 0 = Read, 1 = Write. */
+ uint64_t nxm_src : 1; /**< [ 38: 38](RO/H) Indicates the source of the operation that caused a NXM error:
+ 0 = L2C, 1 = HFA. */
+ uint64_t nxm_faddr_ext : 1; /**< [ 39: 39](RO/H) Extended bit for the failing L2C-LMC address (bit 37). */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_nxm_fadr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t nxm_src : 1; /**< [ 45: 45](RO/H) Indicates the source of the operation that caused a NXM error:
+ 0 = L2C, 1 = HFA. */
+ uint64_t nxm_type : 1; /**< [ 44: 44](RO/H) Indicates the type of operation that caused NXM error:
+ 0 = Read, 1 = Write. */
+ uint64_t reserved_42_43 : 2;
+ uint64_t nxm_faddr : 42; /**< [ 41: 0](RO/H) Failing L2C-LMC address. Bits\<3:0\> are always zeros for an HFA access, and bits\<4:0\> are
+ always 0s for an L2C access. Bits\<5:4\> represent the fill order for an L2C read operation,
+ and the start point within a cache line for a write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t nxm_faddr : 42; /**< [ 41: 0](RO/H) Failing L2C-LMC address. Bits\<3:0\> are always zeros for an HFA access, and bits\<4:0\> are
+ always 0s for an L2C access. Bits\<5:4\> represent the fill order for an L2C read operation,
+ and the start point within a cache line for a write operation. */
+ uint64_t reserved_42_43 : 2;
+ uint64_t nxm_type : 1; /**< [ 44: 44](RO/H) Indicates the type of operation that caused NXM error:
+ 0 = Read, 1 = Write. */
+ uint64_t nxm_src : 1; /**< [ 45: 45](RO/H) Indicates the source of the operation that caused a NXM error:
+ 0 = L2C, 1 = HFA. */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_nxm_fadr bdk_lmcx_nxm_fadr_t;
+
+static inline uint64_t BDK_LMCX_NXM_FADR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_NXM_FADR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000028ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000028ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000028ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_NXM_FADR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_NXM_FADR(a) bdk_lmcx_nxm_fadr_t
+#define bustype_BDK_LMCX_NXM_FADR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_NXM_FADR(a) "LMCX_NXM_FADR"
+#define device_bar_BDK_LMCX_NXM_FADR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_NXM_FADR(a) (a)
+#define arguments_BDK_LMCX_NXM_FADR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ops_cnt
+ *
+ * LMC OPS Performance Counter Register
+ */
+union bdk_lmcx_ops_cnt
+{
+ uint64_t u;
+ struct bdk_lmcx_ops_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t opscnt : 64; /**< [ 63: 0](RO/H) Performance counter. A 64-bit counter that increments when the DDR3 data bus is being
+ used.
+ DDR bus utilization = OPSCNT / LMC()_DCLK_CNT. */
+#else /* Word 0 - Little Endian */
+ uint64_t opscnt : 64; /**< [ 63: 0](RO/H) Performance counter. A 64-bit counter that increments when the DDR3 data bus is being
+ used.
+ DDR bus utilization = OPSCNT / LMC()_DCLK_CNT. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_ops_cnt_s cn8; */
+ struct bdk_lmcx_ops_cnt_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t opscnt : 64; /**< [ 63: 0](RO/H) Performance counter. A 64-bit counter that increments when the DDR4 data bus is being
+ used.
+ DDR bus utilization = OPSCNT / LMC()_DCLK_CNT. */
+#else /* Word 0 - Little Endian */
+ uint64_t opscnt : 64; /**< [ 63: 0](RO/H) Performance counter. A 64-bit counter that increments when the DDR4 data bus is being
+ used.
+ DDR bus utilization = OPSCNT / LMC()_DCLK_CNT. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_ops_cnt bdk_lmcx_ops_cnt_t;
+
+static inline uint64_t BDK_LMCX_OPS_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_OPS_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001d8ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001d8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001d8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001d8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_OPS_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_OPS_CNT(a) bdk_lmcx_ops_cnt_t
+#define bustype_BDK_LMCX_OPS_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_OPS_CNT(a) "LMCX_OPS_CNT"
+#define device_bar_BDK_LMCX_OPS_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_OPS_CNT(a) (a)
+#define arguments_BDK_LMCX_OPS_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_phy_ctl
+ *
+ * LMC PHY Control Register
+ */
+union bdk_lmcx_phy_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_phy_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_vref_sel : 1; /**< [ 63: 63](R/W) Run Vref training twice, sampling from DQ3 and DQ4 sequentially, then use the
+ average of two as the final Vref training result. */
+ uint64_t double_vref_training : 1; /**< [ 62: 62](R/W) Vref longer training.
+ 0 = Normal training period.
+ 1 = Double training period. */
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](RO) Reserved. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](RO) Reserved. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](RO) Reserved. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](RO) Reserved. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
+ the
+ DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
+ and drives a constant 1 in DDR4.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on
+ (MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t reserved_51_54 : 4;
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN81XX needs to be set to this value. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t reserved_13 : 1;
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+#else /* Word 0 - Little Endian */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t reserved_13 : 1;
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN81XX needs to be set to this value. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t reserved_51_54 : 4;
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
+ the
+ DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
+ and drives a constant 1 in DDR4.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on
+ (MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](RO) Reserved. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](RO) Reserved. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](RO) Reserved. */
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](RO) Reserved. */
+ uint64_t double_vref_training : 1; /**< [ 62: 62](R/W) Vref longer training.
+ 0 = Normal training period.
+ 1 = Double training period. */
+ uint64_t rx_vref_sel : 1; /**< [ 63: 63](R/W) Run Vref training twice, sampling from DQ3 and DQ4 sequentially, then use the
+ average of two as the final Vref training result. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_phy_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](RO) Reserved. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](RO) Reserved. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](RO) Reserved. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](RO) Reserved. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](RO) Reserved. */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](RO) Reserved. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](RO) Reserved. */
+ uint64_t c1_sel : 2; /**< [ 54: 53](R/W) Reserved.
+ Internal:
+ 0x0 = C1 is not routed to any output pin.
+ 0x1 = C1 is routed to CS3.
+ 0x2 = C1 is routed to A17 address pin.
+ 0x3 = C1 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t c0_sel : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ 0x0 = C0 is not routed to any output pin.
+ 0x1 = C0 is routed to CS2.
+ 0x2 = C0 is routed to TEN output pin.
+ 0x3 = C0 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI.
+ 1 = 9 bits per byte lane, including DBI. CN88XX needs to bet set to this value. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DBI.
+ 0x5 = DQ4.
+ 0x6 = DQ5.
+ 0x7 = DQ6.
+ 0x8 = DQ7. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t lv_mode : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ Low Voltage Mode (1.35V.) */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+#else /* Word 0 - Little Endian */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t lv_mode : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ Low Voltage Mode (1.35V.) */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DBI.
+ 0x5 = DQ4.
+ 0x6 = DQ5.
+ 0x7 = DQ6.
+ 0x8 = DQ7. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI.
+ 1 = 9 bits per byte lane, including DBI. CN88XX needs to bet set to this value. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t c0_sel : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ 0x0 = C0 is not routed to any output pin.
+ 0x1 = C0 is routed to CS2.
+ 0x2 = C0 is routed to TEN output pin.
+ 0x3 = C0 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t c1_sel : 2; /**< [ 54: 53](R/W) Reserved.
+ Internal:
+ 0x0 = C1 is not routed to any output pin.
+ 0x1 = C1 is routed to CS3.
+ 0x2 = C1 is routed to A17 address pin.
+ 0x3 = C1 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](RO) Reserved. */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](RO) Reserved. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](RO) Reserved. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](RO) Reserved. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](RO) Reserved. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](RO) Reserved. */
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](RO) Reserved. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_phy_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_vref_sel : 1; /**< [ 63: 63](R/W) Run Vref training twice, sampling from DQ3 and DQ4 sequentially, then use the
+ average of two as the final Vref training result. */
+ uint64_t double_vref_training : 1; /**< [ 62: 62](R/W) Vref longer training.
+ 0 = Normal training period.
+ 1 = Double training period. */
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](R/W) When set, the PHY attempts to lock all DQ/DBI bit deskew settings once alignment is
+ achieved.
+
+ When clear, LMC disengages the PHY bit deskew lock control mechanism. This
+ causes the PHY to continuously perform and/or adjust the read deskew training on
+ all DQ/DBI bits during any read operations. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ When set, LMC prevents PHY from loading the deskew shift
+ registers with its internal settings. When Read Deskew sequence is kicked off
+ by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
+ whether or not to load the shift register with PHY's internal settings before
+ the shifting process. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ When set high, PHY selects all of the preloaded data
+ when configuring the read deskew settings. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
+ Overwrite sequence to shift out a 10-bits setting for a single DQ.
+ Note that there are a total of 9 bytes and the chain structure are split into two
+ halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
+ ECC,3,2,1,0.
+ Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
+ DQ has 10-bits deskew setting. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to one to disable the DRAM data mask feature by having LMC driving a constant value
+ on
+ the DDR_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant one.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on (LMC()_MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t reserved_54 : 1;
+ uint64_t c1_cs3_switch : 1; /**< [ 53: 53](R/W) 0 = Routes C1 data to the C1 output pin, and
+ routes CS3 data to the CS3 output pin.
+
+ 1 = Routes C1 data to the CS3 output pin, and
+ routes CS3 data to the C1 output pin. */
+ uint64_t c0_cs2_switch : 1; /**< [ 52: 52](R/W) 0 = Routes C0/TEN data to the C0/TEN output pin, and
+ routes CS2 data to the CS2 output pin.
+
+ 1 = Routes C0/TEN data to the CS2 output pin, and
+ routes CS2 data to the C0/TEN output pin. */
+ uint64_t ten_sel : 1; /**< [ 51: 51](R/W) DDR PHY test enable select signal. When asserted, LMC drives C0/TEN pin with the value set
+ in LMC()_PHY_CTL[TEN] as part of any commands being sent out. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN83XX needs to be set to this value. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI/DQS1.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t dqs1_loopback : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ The same as [LOOPBACK] except DQS1 is loopbacked through DQS0. */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+#else /* Word 0 - Little Endian */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t dqs1_loopback : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ The same as [LOOPBACK] except DQS1 is loopbacked through DQS0. */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI/DQS1.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN83XX needs to be set to this value. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t ten_sel : 1; /**< [ 51: 51](R/W) DDR PHY test enable select signal. When asserted, LMC drives C0/TEN pin with the value set
+ in LMC()_PHY_CTL[TEN] as part of any commands being sent out. */
+ uint64_t c0_cs2_switch : 1; /**< [ 52: 52](R/W) 0 = Routes C0/TEN data to the C0/TEN output pin, and
+ routes CS2 data to the CS2 output pin.
+
+ 1 = Routes C0/TEN data to the CS2 output pin, and
+ routes CS2 data to the C0/TEN output pin. */
+ uint64_t c1_cs3_switch : 1; /**< [ 53: 53](R/W) 0 = Routes C1 data to the C1 output pin, and
+ routes CS3 data to the CS3 output pin.
+
+ 1 = Routes C1 data to the CS3 output pin, and
+ routes CS3 data to the C1 output pin. */
+ uint64_t reserved_54 : 1;
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to one to disable the DRAM data mask feature by having LMC driving a constant value
+ on
+ the DDR_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant one.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on (LMC()_MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
+ Overwrite sequence to shift out a 10-bits setting for a single DQ.
+ Note that there are a total of 9 bytes and the chain structure are split into two
+ halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
+ ECC,3,2,1,0.
+ Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
+ DQ has 10-bits deskew setting. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ When set high, PHY selects all of the preloaded data
+ when configuring the read deskew settings. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ When set, LMC prevents PHY from loading the deskew shift
+ registers with its internal settings. When Read Deskew sequence is kicked off
+ by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
+ whether or not to load the shift register with PHY's internal settings before
+ the shifting process. */
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](R/W) When set, the PHY attempts to lock all DQ/DBI bit deskew settings once alignment is
+ achieved.
+
+ When clear, LMC disengages the PHY bit deskew lock control mechanism. This
+ causes the PHY to continuously perform and/or adjust the read deskew training on
+ all DQ/DBI bits during any read operations. */
+ uint64_t double_vref_training : 1; /**< [ 62: 62](R/W) Vref longer training.
+ 0 = Normal training period.
+ 1 = Double training period. */
+ uint64_t rx_vref_sel : 1; /**< [ 63: 63](R/W) Run Vref training twice, sampling from DQ3 and DQ4 sequentially, then use the
+ average of two as the final Vref training result. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_phy_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](RO) Reserved. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ When set, LMC prevents PHY from loading the deskew shift
+ registers with its internal settings. When Read Deskew sequence is kicked off
+ by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
+ whether or not to load the shift register with PHY's internal settings before
+ the shifting process. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ When set high, PHY selects all of the preloaded data
+ when configuring the read deskew settings. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
+ Overwrite sequence to shift out a 10-bits setting for a single DQ.
+ Note that there are a total of 9 bytes and the chain structure are split into two
+ halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
+ ECC,3,2,1,0.
+ Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
+ DQ has 10-bits deskew setting. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
+ the
+ DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
+ and drives a constant 1 in DDR4.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on
+ (MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t c1_sel : 2; /**< [ 54: 53](R/W) Reserved.
+ Internal:
+ 0x0 = C1 is not routed to any output pin.
+ 0x1 = C1 is routed to CS3.
+ 0x2 = C1 is routed to A17 address pin.
+ 0x3 = C1 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t c0_sel : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ 0x0 = C0 is not routed to any output pin.
+ 0x1 = C0 is routed to CS2.
+ 0x2 = C0 is routed to TEN output pin.
+ 0x3 = C0 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN81XX needs to be set to this value. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t lv_mode : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ Low Voltage Mode (1.35V.) */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+#else /* Word 0 - Little Endian */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t lv_mode : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ Low Voltage Mode (1.35V.) */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN81XX needs to be set to this value. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t c0_sel : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ 0x0 = C0 is not routed to any output pin.
+ 0x1 = C0 is routed to CS2.
+ 0x2 = C0 is routed to TEN output pin.
+ 0x3 = C0 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t c1_sel : 2; /**< [ 54: 53](R/W) Reserved.
+ Internal:
+ 0x0 = C1 is not routed to any output pin.
+ 0x1 = C1 is routed to CS3.
+ 0x2 = C1 is routed to A17 address pin.
+ 0x3 = C1 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
+ the
+ DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
+ and drives a constant 1 in DDR4.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on
+ (MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
+ Overwrite sequence to shift out a 10-bits setting for a single DQ.
+ Note that there are a total of 9 bytes and the chain structure are split into two
+ halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
+ ECC,3,2,1,0.
+ Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
+ DQ has 10-bits deskew setting. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ When set high, PHY selects all of the preloaded data
+ when configuring the read deskew settings. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ When set, LMC prevents PHY from loading the deskew shift
+ registers with its internal settings. When Read Deskew sequence is kicked off
+ by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
+ whether or not to load the shift register with PHY's internal settings before
+ the shifting process. */
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](RO) Reserved. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_lmcx_phy_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](R/W) When set, the PHY attempts to lock all DQ/DBI bit deskew settings once alignment is
+ achieved.
+
+ When clear, LMC disengages the PHY bit deskew lock control mechanism. This
+ causes the PHY to continuously perform and/or adjust the read deskew training on
+ all DQ/DBI bits during any read operations. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ When set, LMC prevents PHY from loading the deskew shift
+ registers with its internal settings. When Read Deskew sequence is kicked off
+ by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
+ whether or not to load the shift register with PHY's internal settings before
+ the shifting process. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ When set high, PHY selects all of the preloaded data
+ when configuring the read deskew settings. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
+ Overwrite sequence to shift out a 10-bits setting for a single DQ.
+ Note that there are a total of 9 bytes and the chain structure are split into two
+ halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
+ ECC,3,2,1,0.
+ Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
+ DQ has 10-bits deskew setting. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
+ the
+ DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
+ and drives a constant 1 in DDR4.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on
+ (MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t c1_sel : 2; /**< [ 54: 53](R/W) Reserved.
+ Internal:
+ 0x0 = C1 is not routed to any output pin.
+ 0x1 = C1 is routed to CS3.
+ 0x2 = C1 is routed to A17 address pin.
+ 0x3 = C1 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t c0_sel : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ 0x0 = C0 is not routed to any output pin.
+ 0x1 = C0 is routed to CS2.
+ 0x2 = C0 is routed to TEN output pin.
+ 0x3 = C0 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN83XX needs to be set to this value. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t lv_mode : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ Low Voltage Mode (1.35V.) */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+#else /* Word 0 - Little Endian */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t lv_mode : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ Low Voltage Mode (1.35V.) */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN83XX needs to be set to this value. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t c0_sel : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ 0x0 = C0 is not routed to any output pin.
+ 0x1 = C0 is routed to CS2.
+ 0x2 = C0 is routed to TEN output pin.
+ 0x3 = C0 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t c1_sel : 2; /**< [ 54: 53](R/W) Reserved.
+ Internal:
+ 0x0 = C1 is not routed to any output pin.
+ 0x1 = C1 is routed to CS3.
+ 0x2 = C1 is routed to A17 address pin.
+ 0x3 = C1 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
+ the
+ DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
+ and drives a constant 1 in DDR4.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on
+ (MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
+ Overwrite sequence to shift out a 10-bits setting for a single DQ.
+ Note that there are a total of 9 bytes and the chain structure are split into two
+ halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
+ ECC,3,2,1,0.
+ Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
+ DQ has 10-bits deskew setting. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ When set high, PHY selects all of the preloaded data
+ when configuring the read deskew settings. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ When set, LMC prevents PHY from loading the deskew shift
+ registers with its internal settings. When Read Deskew sequence is kicked off
+ by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
+ whether or not to load the shift register with PHY's internal settings before
+ the shifting process. */
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](R/W) When set, the PHY attempts to lock all DQ/DBI bit deskew settings once alignment is
+ achieved.
+
+ When clear, LMC disengages the PHY bit deskew lock control mechanism. This
+ causes the PHY to continuously perform and/or adjust the read deskew training on
+ all DQ/DBI bits during any read operations. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_lmcx_phy_ctl_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](RO) Reserved. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ When set, LMC prevents PHY from loading the deskew shift
+ registers with its internal settings. When Read Deskew sequence is kicked off
+ by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
+ whether or not to load the shift register with PHY's internal settings before
+ the shifting process. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ When set high, PHY selects all of the preloaded data
+ when configuring the read deskew settings. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
+ Overwrite sequence to shift out a 10-bits setting for a single DQ.
+ Note that there are a total of 9 bytes and the chain structure are split into two
+ halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
+ ECC,3,2,1,0.
+ Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
+ DQ has 10-bits deskew setting. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
+ the
+ DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
+ and drives a constant 1 in DDR4.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on
+ (MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t c1_sel : 2; /**< [ 54: 53](R/W) Reserved.
+ Internal:
+ 0x0 = C1 is not routed to any output pin.
+ 0x1 = C1 is routed to CS3.
+ 0x2 = C1 is routed to A17 address pin.
+ 0x3 = C1 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t c0_sel : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ 0x0 = C0 is not routed to any output pin.
+ 0x1 = C0 is routed to CS2.
+ 0x2 = C0 is routed to TEN output pin.
+ 0x3 = C0 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN88XX needs to bet set to this value. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t lv_mode : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ Low Voltage Mode (1.35V.) */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+#else /* Word 0 - Little Endian */
+ uint64_t ts_stagger : 1; /**< [ 0: 0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
+ avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
+ mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
+ impedance) at the first CK cycle, and change drivers to the designated drive strengths
+ specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
+ LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
+ uint64_t loopback_pos : 1; /**< [ 1: 1](R/W) Reserved; must be zero.
+ Internal:
+ Loopback pos mode. This works in conjunction with
+ LMC()_PHY_CTL[LOOPBACK] mentioned above. */
+ uint64_t loopback : 1; /**< [ 2: 2](R/W) Reserved; must be zero.
+ Internal:
+ external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
+ bits
+ are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
+ LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
+ can be loop-backed out through DQS1 of the same byte. For DQ, when
+ LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
+ flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
+ loop-backed out after being flop'd by incoming DQS. */
+ uint64_t ck_dlyout0 : 4; /**< [ 6: 3](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune0 : 1; /**< [ 7: 7](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t ck_dlyout1 : 4; /**< [ 11: 8](R/W) Reserved; must be zero.
+ Internal:
+ Clock delay out. */
+ uint64_t ck_tune1 : 1; /**< [ 12: 12](R/W) Reserved; must be zero.
+ Internal:
+ Clock tune. */
+ uint64_t lv_mode : 1; /**< [ 13: 13](R/W) Reserved; must be zero.
+ Internal:
+ Low Voltage Mode (1.35V.) */
+ uint64_t rx_always_on : 1; /**< [ 14: 14](R/W) Reserved; must be zero.
+ Internal:
+ Set to force read_enable to PHY active all the time.
+ This bit MUST not be set when LMC initialization is in progress. Internal VREF and
+ Deskew training requires normal operation on the dqx/s read_enable signals. */
+ uint64_t ten : 1; /**< [ 15: 15](R/W) DDR PHY test enable pin. */
+ uint64_t phy_pwr_save_disable : 1; /**< [ 16: 16](R/W) DDR PHY power save disable. */
+ uint64_t phy_dsk_byp : 1; /**< [ 17: 17](R/W) PHY deskew bypass. */
+ uint64_t phy_dsk_reset : 1; /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
+ training sequence is in the idle state. */
+ uint64_t int_phy_loopback_ena : 1; /**< [ 19: 19](R/W) Reserved.
+ Internal:
+ PHY loopback enable. */
+ uint64_t int_pad_loopback_ena : 1; /**< [ 20: 20](R/W) Reserved.
+ Internal:
+ DDR pad loopback enable. Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
+ when loopback is enabled. */
+ uint64_t dac_on : 1; /**< [ 21: 21](R/W) Reserved.
+ Internal:
+ PHY DAC on. */
+ uint64_t ref_pin_on : 1; /**< [ 22: 22](R/W) Reserved.
+ Internal:
+ Voltage reference pin enabled. */
+ uint64_t ddr_error_n_ena : 1; /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
+ uint64_t dbi_mode_ena : 1; /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
+ uint64_t dsk_dbg_bit_sel : 4; /**< [ 28: 25](R/W) Reserved.
+ Internal:
+ Deskew debug bit select for dsk read operation.
+ 0x0 = DQ0.
+ 0x1 = DQ1.
+ 0x2 = DQ2.
+ 0x3 = DQ3.
+ 0x4 = DAC.
+ 0x5 = DBI.
+ 0x6 = DQ4.
+ 0x7 = DQ5.
+ 0x8 = DQ6.
+ 0x9 = DQ7. */
+ uint64_t dsk_dbg_byte_sel : 4; /**< [ 32: 29](R/W) Reserved.
+ Internal:
+ Deskew debug byte select for read operation. Values 0-3 correspond to
+ byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
+ uint64_t dsk_dbg_num_bits_sel : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Deskew debug, select number of bits per byte lane.
+ 0 = 8 bits per byte lane, no DBI, no DAC debug.
+ 1 = 10 bits per byte lane, including DBI and DAC. CN88XX needs to bet set to this value. */
+ uint64_t dsk_dbg_offset : 2; /**< [ 35: 34](R/W) Reserved.
+ Internal:
+ Offset to change delay of deskew debug data return time to LMC from
+ DDR PHY. */
+ uint64_t dsk_dbg_clk_scaler : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Adjust clock toggle rate for reading deskew debug information:
+ 0x0 = Deskew read clock toggles every 4 DCLKs.
+ 0x1 = Deskew read clock toggles every 8 DCLKs.
+ 0x2 = Deskew read clock toggles every 12 DCLKs.
+ 0x3 = Deskew read clock toggles every 16 DCLKs. */
+ uint64_t dsk_dbg_rd_start : 1; /**< [ 38: 38](WO/H) Reserved.
+ Internal:
+ Write one to start deskew data read operation, will automatically clear
+ to zero. Write to one will also clear the complete bit. */
+ uint64_t dsk_dbg_rd_data : 10; /**< [ 48: 39](RO/H) Reserved.
+ Internal:
+ Data from a deskew read operation. Only valid when the
+ LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
+ uint64_t dsk_dbg_rd_complete : 1; /**< [ 49: 49](RO/H) Reserved.
+ Internal:
+ Indicates completion of a read operation, will clear to zero when a read
+ operation is started, then set to one when operation is complete. */
+ uint64_t phy_reset : 1; /**< [ 50: 50](WO) Reserved.
+ Internal:
+ Write to one to reset the PHY, one-shot operation, will automatically
+ clear to value of zero. */
+ uint64_t c0_sel : 2; /**< [ 52: 51](R/W) Reserved.
+ Internal:
+ 0x0 = C0 is not routed to any output pin.
+ 0x1 = C0 is routed to CS2.
+ 0x2 = C0 is routed to TEN output pin.
+ 0x3 = C0 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t c1_sel : 2; /**< [ 54: 53](R/W) Reserved.
+ Internal:
+ 0x0 = C1 is not routed to any output pin.
+ 0x1 = C1 is routed to CS3.
+ 0x2 = C1 is routed to A17 address pin.
+ 0x3 = C1 is not routed to any output pin.
+
+ Set to 0x0 if not interfacing with 3DS DRAM. */
+ uint64_t dm_disable : 1; /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
+ the
+ DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
+ and drives a constant 1 in DDR4.
+ Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
+ on
+ (MODEREG_PARAMS3[WR_DBI]=1). */
+ uint64_t dq_shallow_loopback : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ DQ shallow loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
+ without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
+ LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
+ uint64_t data_rate_loopback : 1; /**< [ 57: 57](R/W) Reserved.
+ Internal:
+ DQ data rate loopback, working in conjunction with LOOPBACK assertion.
+ When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
+ backed out through odd DQ at the same rate.
+ When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
+ DQ against each DQS edge seperately. This is done at the clock rate. */
+ uint64_t dsk_dbg_wr_mode : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
+ Overwrite sequence to shift out a 10-bits setting for a single DQ.
+ Note that there are a total of 9 bytes and the chain structure are split into two
+ halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
+ ECC,3,2,1,0.
+ Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
+ DQ has 10-bits deskew setting. */
+ uint64_t dsk_dbg_overwrt_ena : 1; /**< [ 59: 59](R/W) Reserved.
+ Internal:
+ When set high, PHY selects all of the preloaded data
+ when configuring the read deskew settings. */
+ uint64_t dsk_dbg_load_dis : 1; /**< [ 60: 60](R/W) Reserved.
+ Internal:
+ When set, LMC prevents PHY from loading the deskew shift
+ registers with its internal settings. When Read Deskew sequence is kicked off
+ by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
+ whether or not to load the shift register with PHY's internal settings before
+ the shifting process. */
+ uint64_t phy_dsk_lock_en : 1; /**< [ 61: 61](RO) Reserved. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_lmcx_phy_ctl bdk_lmcx_phy_ctl_t;
+
+static inline uint64_t BDK_LMCX_PHY_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_PHY_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000210ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000210ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000210ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000210ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_PHY_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_PHY_CTL(a) bdk_lmcx_phy_ctl_t
+#define bustype_BDK_LMCX_PHY_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_PHY_CTL(a) "LMCX_PHY_CTL"
+#define device_bar_BDK_LMCX_PHY_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_PHY_CTL(a) (a)
+#define arguments_BDK_LMCX_PHY_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_phy_ctl2
+ *
+ * LMC PHY Control Register
+ */
+union bdk_lmcx_phy_ctl2
+{
+ uint64_t u;
+ struct bdk_lmcx_phy_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t dqs1_dsk_adj8 : 3; /**< [ 53: 51](R/W) Provides adjustable deskew settings for DQS1 signal of the ECC byte. */
+ uint64_t dqs1_dsk_adj7 : 3; /**< [ 50: 48](R/W) Provides adjustable deskew settings for DQS1 signal of byte 7. */
+ uint64_t dqs1_dsk_adj6 : 3; /**< [ 47: 45](R/W) Provides adjustable deskew settings for DQS1 signal of byte 6. */
+ uint64_t dqs1_dsk_adj5 : 3; /**< [ 44: 42](R/W) Provides adjustable deskew settings for DQS1 signal of byte 5. */
+ uint64_t dqs1_dsk_adj4 : 3; /**< [ 41: 39](R/W) Provides adjustable deskew settings for DQS1 signal of byte 4. */
+ uint64_t dqs1_dsk_adj3 : 3; /**< [ 38: 36](R/W) Provides adjustable deskew settings for DQS1 signal of byte 3. */
+ uint64_t dqs1_dsk_adj2 : 3; /**< [ 35: 33](R/W) Provides adjustable deskew settings for DQS1 signal of byte 2. */
+ uint64_t dqs1_dsk_adj1 : 3; /**< [ 32: 30](R/W) Provides adjustable deskew settings for DQS1 signal of byte 1. */
+ uint64_t dqs1_dsk_adj0 : 3; /**< [ 29: 27](R/W) Provides adjustable deskew settings for DQS1 signal of byte 0. */
+ uint64_t reserved_0_26 : 27;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_26 : 27;
+ uint64_t dqs1_dsk_adj0 : 3; /**< [ 29: 27](R/W) Provides adjustable deskew settings for DQS1 signal of byte 0. */
+ uint64_t dqs1_dsk_adj1 : 3; /**< [ 32: 30](R/W) Provides adjustable deskew settings for DQS1 signal of byte 1. */
+ uint64_t dqs1_dsk_adj2 : 3; /**< [ 35: 33](R/W) Provides adjustable deskew settings for DQS1 signal of byte 2. */
+ uint64_t dqs1_dsk_adj3 : 3; /**< [ 38: 36](R/W) Provides adjustable deskew settings for DQS1 signal of byte 3. */
+ uint64_t dqs1_dsk_adj4 : 3; /**< [ 41: 39](R/W) Provides adjustable deskew settings for DQS1 signal of byte 4. */
+ uint64_t dqs1_dsk_adj5 : 3; /**< [ 44: 42](R/W) Provides adjustable deskew settings for DQS1 signal of byte 5. */
+ uint64_t dqs1_dsk_adj6 : 3; /**< [ 47: 45](R/W) Provides adjustable deskew settings for DQS1 signal of byte 6. */
+ uint64_t dqs1_dsk_adj7 : 3; /**< [ 50: 48](R/W) Provides adjustable deskew settings for DQS1 signal of byte 7. */
+ uint64_t dqs1_dsk_adj8 : 3; /**< [ 53: 51](R/W) Provides adjustable deskew settings for DQS1 signal of the ECC byte. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_phy_ctl2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t dqs8_dsk_adj : 3; /**< [ 26: 24](R/W) Provides adjustable deskew settings for DQS signal of the ECC byte. */
+ uint64_t dqs7_dsk_adj : 3; /**< [ 23: 21](R/W) Provides adjustable deskew settings for DQS signal of byte 7. */
+ uint64_t dqs6_dsk_adj : 3; /**< [ 20: 18](R/W) Provides adjustable deskew settings for DQS signal of byte 6. */
+ uint64_t dqs5_dsk_adj : 3; /**< [ 17: 15](R/W) Provides adjustable deskew settings for DQS signal of byte 5. */
+ uint64_t dqs4_dsk_adj : 3; /**< [ 14: 12](R/W) Provides adjustable deskew settings for DQS signal of byte 4. */
+ uint64_t dqs3_dsk_adj : 3; /**< [ 11: 9](R/W) Provides adjustable deskew settings for DQS signal of byte 3. */
+ uint64_t dqs2_dsk_adj : 3; /**< [ 8: 6](R/W) Provides adjustable deskew settings for DQS signal of byte 2. */
+ uint64_t dqs1_dsk_adj : 3; /**< [ 5: 3](R/W) Provides adjustable deskew settings for DQS signal of byte 1. */
+ uint64_t dqs0_dsk_adj : 3; /**< [ 2: 0](R/W) Provides adjustable deskew settings for DQS signal of byte 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t dqs0_dsk_adj : 3; /**< [ 2: 0](R/W) Provides adjustable deskew settings for DQS signal of byte 0. */
+ uint64_t dqs1_dsk_adj : 3; /**< [ 5: 3](R/W) Provides adjustable deskew settings for DQS signal of byte 1. */
+ uint64_t dqs2_dsk_adj : 3; /**< [ 8: 6](R/W) Provides adjustable deskew settings for DQS signal of byte 2. */
+ uint64_t dqs3_dsk_adj : 3; /**< [ 11: 9](R/W) Provides adjustable deskew settings for DQS signal of byte 3. */
+ uint64_t dqs4_dsk_adj : 3; /**< [ 14: 12](R/W) Provides adjustable deskew settings for DQS signal of byte 4. */
+ uint64_t dqs5_dsk_adj : 3; /**< [ 17: 15](R/W) Provides adjustable deskew settings for DQS signal of byte 5. */
+ uint64_t dqs6_dsk_adj : 3; /**< [ 20: 18](R/W) Provides adjustable deskew settings for DQS signal of byte 6. */
+ uint64_t dqs7_dsk_adj : 3; /**< [ 23: 21](R/W) Provides adjustable deskew settings for DQS signal of byte 7. */
+ uint64_t dqs8_dsk_adj : 3; /**< [ 26: 24](R/W) Provides adjustable deskew settings for DQS signal of the ECC byte. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_phy_ctl2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t dqs1_dsk_adj8 : 3; /**< [ 53: 51](R/W) Provides adjustable deskew settings for DQS1 signal of the ECC byte. */
+ uint64_t dqs1_dsk_adj7 : 3; /**< [ 50: 48](R/W) Provides adjustable deskew settings for DQS1 signal of byte 7. */
+ uint64_t dqs1_dsk_adj6 : 3; /**< [ 47: 45](R/W) Provides adjustable deskew settings for DQS1 signal of byte 6. */
+ uint64_t dqs1_dsk_adj5 : 3; /**< [ 44: 42](R/W) Provides adjustable deskew settings for DQS1 signal of byte 5. */
+ uint64_t dqs1_dsk_adj4 : 3; /**< [ 41: 39](R/W) Provides adjustable deskew settings for DQS1 signal of byte 4. */
+ uint64_t dqs1_dsk_adj3 : 3; /**< [ 38: 36](R/W) Provides adjustable deskew settings for DQS1 signal of byte 3. */
+ uint64_t dqs1_dsk_adj2 : 3; /**< [ 35: 33](R/W) Provides adjustable deskew settings for DQS1 signal of byte 2. */
+ uint64_t dqs1_dsk_adj1 : 3; /**< [ 32: 30](R/W) Provides adjustable deskew settings for DQS1 signal of byte 1. */
+ uint64_t dqs1_dsk_adj0 : 3; /**< [ 29: 27](R/W) Provides adjustable deskew settings for DQS1 signal of byte 0. */
+ uint64_t dqs0_dsk_adj8 : 3; /**< [ 26: 24](R/W) Provides adjustable deskew settings for DQS0 signal of the ECC byte. */
+ uint64_t dqs0_dsk_adj7 : 3; /**< [ 23: 21](R/W) Provides adjustable deskew settings for DQS0 signal of byte 7. */
+ uint64_t dqs0_dsk_adj6 : 3; /**< [ 20: 18](R/W) Provides adjustable deskew settings for DQS0 signal of byte 6. */
+ uint64_t dqs0_dsk_adj5 : 3; /**< [ 17: 15](R/W) Provides adjustable deskew settings for DQS0 signal of byte 5. */
+ uint64_t dqs0_dsk_adj4 : 3; /**< [ 14: 12](R/W) Provides adjustable deskew settings for DQS0 signal of byte 4. */
+ uint64_t dqs0_dsk_adj3 : 3; /**< [ 11: 9](R/W) Provides adjustable deskew settings for DQS0 signal of byte 3. */
+ uint64_t dqs0_dsk_adj2 : 3; /**< [ 8: 6](R/W) Provides adjustable deskew settings for DQS0 signal of byte 2. */
+ uint64_t dqs0_dsk_adj1 : 3; /**< [ 5: 3](R/W) Provides adjustable deskew settings for DQS0 signal of byte 1. */
+ uint64_t dqs0_dsk_adj0 : 3; /**< [ 2: 0](R/W) Provides adjustable deskew settings for DQS0 signal of byte 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t dqs0_dsk_adj0 : 3; /**< [ 2: 0](R/W) Provides adjustable deskew settings for DQS0 signal of byte 0. */
+ uint64_t dqs0_dsk_adj1 : 3; /**< [ 5: 3](R/W) Provides adjustable deskew settings for DQS0 signal of byte 1. */
+ uint64_t dqs0_dsk_adj2 : 3; /**< [ 8: 6](R/W) Provides adjustable deskew settings for DQS0 signal of byte 2. */
+ uint64_t dqs0_dsk_adj3 : 3; /**< [ 11: 9](R/W) Provides adjustable deskew settings for DQS0 signal of byte 3. */
+ uint64_t dqs0_dsk_adj4 : 3; /**< [ 14: 12](R/W) Provides adjustable deskew settings for DQS0 signal of byte 4. */
+ uint64_t dqs0_dsk_adj5 : 3; /**< [ 17: 15](R/W) Provides adjustable deskew settings for DQS0 signal of byte 5. */
+ uint64_t dqs0_dsk_adj6 : 3; /**< [ 20: 18](R/W) Provides adjustable deskew settings for DQS0 signal of byte 6. */
+ uint64_t dqs0_dsk_adj7 : 3; /**< [ 23: 21](R/W) Provides adjustable deskew settings for DQS0 signal of byte 7. */
+ uint64_t dqs0_dsk_adj8 : 3; /**< [ 26: 24](R/W) Provides adjustable deskew settings for DQS0 signal of the ECC byte. */
+ uint64_t dqs1_dsk_adj0 : 3; /**< [ 29: 27](R/W) Provides adjustable deskew settings for DQS1 signal of byte 0. */
+ uint64_t dqs1_dsk_adj1 : 3; /**< [ 32: 30](R/W) Provides adjustable deskew settings for DQS1 signal of byte 1. */
+ uint64_t dqs1_dsk_adj2 : 3; /**< [ 35: 33](R/W) Provides adjustable deskew settings for DQS1 signal of byte 2. */
+ uint64_t dqs1_dsk_adj3 : 3; /**< [ 38: 36](R/W) Provides adjustable deskew settings for DQS1 signal of byte 3. */
+ uint64_t dqs1_dsk_adj4 : 3; /**< [ 41: 39](R/W) Provides adjustable deskew settings for DQS1 signal of byte 4. */
+ uint64_t dqs1_dsk_adj5 : 3; /**< [ 44: 42](R/W) Provides adjustable deskew settings for DQS1 signal of byte 5. */
+ uint64_t dqs1_dsk_adj6 : 3; /**< [ 47: 45](R/W) Provides adjustable deskew settings for DQS1 signal of byte 6. */
+ uint64_t dqs1_dsk_adj7 : 3; /**< [ 50: 48](R/W) Provides adjustable deskew settings for DQS1 signal of byte 7. */
+ uint64_t dqs1_dsk_adj8 : 3; /**< [ 53: 51](R/W) Provides adjustable deskew settings for DQS1 signal of the ECC byte. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_phy_ctl2 bdk_lmcx_phy_ctl2_t;
+
+static inline uint64_t BDK_LMCX_PHY_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_PHY_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000250ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000250ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=3))
+ return 0x87e088000250ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000250ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_PHY_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_PHY_CTL2(a) bdk_lmcx_phy_ctl2_t
+#define bustype_BDK_LMCX_PHY_CTL2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_PHY_CTL2(a) "LMCX_PHY_CTL2"
+#define device_bar_BDK_LMCX_PHY_CTL2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_PHY_CTL2(a) (a)
+#define arguments_BDK_LMCX_PHY_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_phy_ctl3
+ *
+ * LMC PHY Control Register
+ */
+union bdk_lmcx_phy_ctl3
+{
+ uint64_t u;
+ struct bdk_lmcx_phy_ctl3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t ddr_dimm1_ck1_en_clear : 1; /**< [ 17: 17](R/W1/H) Write one to clear DDR_DIMM1_CK_EN[1]. One shot operation. */
+ uint64_t ddr_dimm1_ck0_en_clear : 1; /**< [ 16: 16](R/W1/H) Write one to clear DDR_DIMM1_CK_EN[0]. One shot operation. */
+ uint64_t ddr_dimm0_ck1_en_clear : 1; /**< [ 15: 15](R/W1/H) Write one to clear DDR_DIMM0_CK_EN[1]. One shot operation. */
+ uint64_t ddr_dimm0_ck0_en_clear : 1; /**< [ 14: 14](R/W1/H) Write one to clear DDR_DIMM0_CK_EN[0]. One shot operation. */
+ uint64_t ddr_dimm1_ck1_en_set : 1; /**< [ 13: 13](R/W1/H) Write one to set DDR_DIMM1_CK_EN[1]. One shot operation. */
+ uint64_t ddr_dimm1_ck0_en_set : 1; /**< [ 12: 12](R/W1/H) Write one to set DDR_DIMM1_CK_EN[0]. One shot operation. */
+ uint64_t ddr_dimm0_ck1_en_set : 1; /**< [ 11: 11](R/W1/H) Write one to set DDR_DIMM0_CK_EN[1]. One shot operation. */
+ uint64_t ddr_dimm0_ck0_en_set : 1; /**< [ 10: 10](R/W1/H) Write one to set DDR_DIMM0_CK_EN[0]. One shot operation. */
+ uint64_t x4_clk_select_overwrite : 1;/**< [ 9: 9](R/W) Overwrite mode for the PHY's x4 clock select.
+ 0 = Hardware automatically asserts the PHY's x4 clk select signal
+ during running deskew training of a x4 DIMM (i.e., running LMC_SEQ_SEL_E::VREF_INT sequence
+ with both LMC()_EXT_CONFIG[VREFINT_SEQ_DESKEW] and LMC()_CONFIG[MODE_X4DEV] set
+ to 1).
+ 1 = Enable overwrite mode for the PHY's x4 clock select. PHY's x4 clk select
+ signal is determined by the state of [X4_CLK_SELECT]. */
+ uint64_t x4_clk_select : 1; /**< [ 8: 8](R/W/H) Manually enable/disable the PHY's x4 clk select. Only valid when
+ [X4_CLK_SELECT_OVERWRITE] is one, otherwise hardware determines the value. */
+ uint64_t io_dcc_n : 2; /**< [ 7: 6](R/W) Duty cycle trim for IO. */
+ uint64_t io_dcc_p : 2; /**< [ 5: 4](R/W) Duty cycle trim for IO. */
+ uint64_t phy_dcc_n : 2; /**< [ 3: 2](R/W) Duty cycle corrector for PHY. This will be consumed by both DQS and CK bits. */
+ uint64_t phy_dcc_p : 2; /**< [ 1: 0](R/W) Duty cycle corrector for PHY. This will be consumed by both DQS and CK bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t phy_dcc_p : 2; /**< [ 1: 0](R/W) Duty cycle corrector for PHY. This will be consumed by both DQS and CK bits. */
+ uint64_t phy_dcc_n : 2; /**< [ 3: 2](R/W) Duty cycle corrector for PHY. This will be consumed by both DQS and CK bits. */
+ uint64_t io_dcc_p : 2; /**< [ 5: 4](R/W) Duty cycle trim for IO. */
+ uint64_t io_dcc_n : 2; /**< [ 7: 6](R/W) Duty cycle trim for IO. */
+ uint64_t x4_clk_select : 1; /**< [ 8: 8](R/W/H) Manually enable/disable the PHY's x4 clk select. Only valid when
+ [X4_CLK_SELECT_OVERWRITE] is one, otherwise hardware determines the value. */
+ uint64_t x4_clk_select_overwrite : 1;/**< [ 9: 9](R/W) Overwrite mode for the PHY's x4 clock select.
+ 0 = Hardware automatically asserts the PHY's x4 clk select signal
+ during running deskew training of a x4 DIMM (i.e., running LMC_SEQ_SEL_E::VREF_INT sequence
+ with both LMC()_EXT_CONFIG[VREFINT_SEQ_DESKEW] and LMC()_CONFIG[MODE_X4DEV] set
+ to 1).
+ 1 = Enable overwrite mode for the PHY's x4 clock select. PHY's x4 clk select
+ signal is determined by the state of [X4_CLK_SELECT]. */
+ uint64_t ddr_dimm0_ck0_en_set : 1; /**< [ 10: 10](R/W1/H) Write one to set DDR_DIMM0_CK_EN[0]. One shot operation. */
+ uint64_t ddr_dimm0_ck1_en_set : 1; /**< [ 11: 11](R/W1/H) Write one to set DDR_DIMM0_CK_EN[1]. One shot operation. */
+ uint64_t ddr_dimm1_ck0_en_set : 1; /**< [ 12: 12](R/W1/H) Write one to set DDR_DIMM1_CK_EN[0]. One shot operation. */
+ uint64_t ddr_dimm1_ck1_en_set : 1; /**< [ 13: 13](R/W1/H) Write one to set DDR_DIMM1_CK_EN[1]. One shot operation. */
+ uint64_t ddr_dimm0_ck0_en_clear : 1; /**< [ 14: 14](R/W1/H) Write one to clear DDR_DIMM0_CK_EN[0]. One shot operation. */
+ uint64_t ddr_dimm0_ck1_en_clear : 1; /**< [ 15: 15](R/W1/H) Write one to clear DDR_DIMM0_CK_EN[1]. One shot operation. */
+ uint64_t ddr_dimm1_ck0_en_clear : 1; /**< [ 16: 16](R/W1/H) Write one to clear DDR_DIMM1_CK_EN[0]. One shot operation. */
+ uint64_t ddr_dimm1_ck1_en_clear : 1; /**< [ 17: 17](R/W1/H) Write one to clear DDR_DIMM1_CK_EN[1]. One shot operation. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_phy_ctl3_s cn; */
+};
+typedef union bdk_lmcx_phy_ctl3 bdk_lmcx_phy_ctl3_t;
+
+static inline uint64_t BDK_LMCX_PHY_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_PHY_CTL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880002f8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_PHY_CTL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_PHY_CTL3(a) bdk_lmcx_phy_ctl3_t
+#define bustype_BDK_LMCX_PHY_CTL3(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_PHY_CTL3(a) "LMCX_PHY_CTL3"
+#define device_bar_BDK_LMCX_PHY_CTL3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_PHY_CTL3(a) (a)
+#define arguments_BDK_LMCX_PHY_CTL3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ppr_ctl
+ *
+ * LMC PPR Timing Register
+ * This register contains programmable timing and control parameters used
+ * when running the post package repair sequence. The timing fields
+ * LMC()_PPR_CTL[TPGMPST], LMC()_PPR_CTL[TPGM_EXIT] and LMC()_PPR_CTL[TPGM] need to be set as
+ * to satisfy the minimum values mentioned in the JEDEC DDR4 spec before
+ * running the PPR sequence. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] to run
+ * the PPR sequence.
+ *
+ * Running hard PPR may require LMC to issue security key as four consecutive
+ * MR0 commands, each with a unique address field A[17:0]. Set the security
+ * key in the general purpose CSRs as follows:
+ *
+ * _ Security key 0 = LMC()_GENERAL_PURPOSE0[DATA]\<17:0\>.
+ * _ Security key 1 = LMC()_GENERAL_PURPOSE0[DATA]\<35:18\>.
+ * _ Security key 2 = LMC()_GENERAL_PURPOSE1[DATA]\<17:0\>.
+ * _ Security key 3 = LMC()_GENERAL_PURPOSE1[DATA]\<35:18\>.
+ */
+union bdk_lmcx_ppr_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_ppr_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t lrank_sel : 3; /**< [ 26: 24](RO) Reserved. */
+ uint64_t skip_issue_security : 1; /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
+ issuing four consecutive MR0 commands that supply the security key. */
+ uint64_t sppr : 1; /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
+ the soft PPR mode. */
+ uint64_t tpgm : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
+
+ For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
+ RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
+
+ For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
+ RNDUP[TPGM(ns) / TCYC(ns))].
+
+ [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate). */
+ uint64_t tpgm_exit : 5; /**< [ 11: 7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
+ Set this field as follows:
+ _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
+
+ where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
+ (not
+ data rate). */
+ uint64_t tpgmpst : 7; /**< [ 6: 0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
+ Set this field as follows:
+
+ _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
+
+ where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate). */
+#else /* Word 0 - Little Endian */
+ uint64_t tpgmpst : 7; /**< [ 6: 0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
+ Set this field as follows:
+
+ _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
+
+ where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate). */
+ uint64_t tpgm_exit : 5; /**< [ 11: 7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
+ Set this field as follows:
+ _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
+
+ where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
+ (not
+ data rate). */
+ uint64_t tpgm : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
+
+ For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
+ RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
+
+ For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
+ RNDUP[TPGM(ns) / TCYC(ns))].
+
+ [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate). */
+ uint64_t sppr : 1; /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
+ the soft PPR mode. */
+ uint64_t skip_issue_security : 1; /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
+ issuing four consecutive MR0 commands that supply the security key. */
+ uint64_t lrank_sel : 3; /**< [ 26: 24](RO) Reserved. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_ppr_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t lrank_sel : 3; /**< [ 26: 24](R/W) Selects which logical rank to perform the post package repair sequence.
+ Package ranks are selected by LMC()_MR_MPR_CTL[MR_WR_RANK]. */
+ uint64_t skip_issue_security : 1; /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
+ issuing four consecutive MR0 commands that supply the security key. */
+ uint64_t sppr : 1; /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
+ the soft PPR mode. */
+ uint64_t tpgm : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
+
+ For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
+ RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
+
+ For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
+ RNDUP[TPGM(ns) / TCYC(ns))].
+
+ [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate). */
+ uint64_t tpgm_exit : 5; /**< [ 11: 7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
+ Set this field as follows:
+ _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
+
+ where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
+ (not
+ data rate). */
+ uint64_t tpgmpst : 7; /**< [ 6: 0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
+ Set this field as follows:
+
+ _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
+
+ where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate). */
+#else /* Word 0 - Little Endian */
+ uint64_t tpgmpst : 7; /**< [ 6: 0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
+ Set this field as follows:
+
+ _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
+
+ where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate). */
+ uint64_t tpgm_exit : 5; /**< [ 11: 7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
+ Set this field as follows:
+ _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
+
+ where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
+ (not
+ data rate). */
+ uint64_t tpgm : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
+
+ For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
+ RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
+
+ For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
+ RNDUP[TPGM(ns) / TCYC(ns))].
+
+ [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate). */
+ uint64_t sppr : 1; /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
+ the soft PPR mode. */
+ uint64_t skip_issue_security : 1; /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
+ issuing four consecutive MR0 commands that supply the security key. */
+ uint64_t lrank_sel : 3; /**< [ 26: 24](R/W) Selects which logical rank to perform the post package repair sequence.
+ Package ranks are selected by LMC()_MR_MPR_CTL[MR_WR_RANK]. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_ppr_ctl_cn9 cn81xx; */
+ /* struct bdk_lmcx_ppr_ctl_s cn88xx; */
+ struct bdk_lmcx_ppr_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t lrank_sel : 3; /**< [ 26: 24](R/W) Selects which logical rank to perform the post package repair sequence.
+ Package ranks are selected by LMC()_MR_MPR_CTL[MR_WR_RANK]. */
+ uint64_t skip_issue_security : 1; /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
+ issuing four consecutive MR0 commands that suppliy the security key. */
+ uint64_t sppr : 1; /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
+ the soft PPR mode. */
+ uint64_t tpgm : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
+
+ For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
+ RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
+
+ For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
+ RNDUP[TPGM(ns) / TCYC(ns))].
+
+ [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate). */
+ uint64_t tpgm_exit : 5; /**< [ 11: 7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
+ Set this field as follows:
+ _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
+
+ where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
+ (not
+ data rate). */
+ uint64_t tpgmpst : 7; /**< [ 6: 0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
+ Set this field as follows:
+
+ _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
+
+ where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate). */
+#else /* Word 0 - Little Endian */
+ uint64_t tpgmpst : 7; /**< [ 6: 0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
+ Set this field as follows:
+
+ _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
+
+ where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate). */
+ uint64_t tpgm_exit : 5; /**< [ 11: 7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
+ Set this field as follows:
+ _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
+
+ where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
+ (not
+ data rate). */
+ uint64_t tpgm : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
+
+ For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
+ RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
+
+ For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
+ RNDUP[TPGM(ns) / TCYC(ns))].
+
+ [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate). */
+ uint64_t sppr : 1; /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
+ the soft PPR mode. */
+ uint64_t skip_issue_security : 1; /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
+ issuing four consecutive MR0 commands that suppliy the security key. */
+ uint64_t lrank_sel : 3; /**< [ 26: 24](R/W) Selects which logical rank to perform the post package repair sequence.
+ Package ranks are selected by LMC()_MR_MPR_CTL[MR_WR_RANK]. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_ppr_ctl bdk_lmcx_ppr_ctl_t;
+
+static inline uint64_t BDK_LMCX_PPR_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_PPR_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880003e0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880003e0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880003e0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880003e0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_PPR_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_PPR_CTL(a) bdk_lmcx_ppr_ctl_t
+#define bustype_BDK_LMCX_PPR_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_PPR_CTL(a) "LMCX_PPR_CTL"
+#define device_bar_BDK_LMCX_PPR_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_PPR_CTL(a) (a)
+#define arguments_BDK_LMCX_PPR_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_ref_status
+ *
+ * LMC Refresh Pending Status Register
+ * This register contains the status of the refresh pending counters.
+ */
+union bdk_lmcx_ref_status
+{
+ uint64_t u;
+ struct bdk_lmcx_ref_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_ref_status_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ref_pend_max_clr : 1; /**< [ 3: 3](R/W1C/H) Indicates that the number of pending refreshes has reached 7, requiring
+ software to clear the flag by setting this field to 1.
+ This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
+ uint64_t ref_count : 3; /**< [ 2: 0](RO/H) Reads back the number of pending refreshes that LMC has yet to execute. */
+#else /* Word 0 - Little Endian */
+ uint64_t ref_count : 3; /**< [ 2: 0](RO/H) Reads back the number of pending refreshes that LMC has yet to execute. */
+ uint64_t ref_pend_max_clr : 1; /**< [ 3: 3](R/W1C/H) Indicates that the number of pending refreshes has reached 7, requiring
+ software to clear the flag by setting this field to 1.
+ This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_ref_status_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t ref_count1 : 3; /**< [ 5: 3](RO/H) Reads back the number of pending refreshes that LMC has yet to execute.
+ This counter updates every TREFI window at TREFI/2. Only active if
+ LMC()_EXT_CONFIG[REF_MODE] is using a pair refresh mode. This register
+ is only reset on cold reset. */
+ uint64_t ref_count0 : 3; /**< [ 2: 0](RO/H) Reads back the number of pending refreshes that LMC has yet to execute.
+ This counter updates every TREFI. This register is only reset on cold reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t ref_count0 : 3; /**< [ 2: 0](RO/H) Reads back the number of pending refreshes that LMC has yet to execute.
+ This counter updates every TREFI. This register is only reset on cold reset. */
+ uint64_t ref_count1 : 3; /**< [ 5: 3](RO/H) Reads back the number of pending refreshes that LMC has yet to execute.
+ This counter updates every TREFI window at TREFI/2. Only active if
+ LMC()_EXT_CONFIG[REF_MODE] is using a pair refresh mode. This register
+ is only reset on cold reset. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_ref_status bdk_lmcx_ref_status_t;
+
+static inline uint64_t BDK_LMCX_REF_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_REF_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880000a0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880000a0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880000a0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880000a0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_REF_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_REF_STATUS(a) bdk_lmcx_ref_status_t
+#define bustype_BDK_LMCX_REF_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_REF_STATUS(a) "LMCX_REF_STATUS"
+#define device_bar_BDK_LMCX_REF_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_REF_STATUS(a) (a)
+#define arguments_BDK_LMCX_REF_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_reset_ctl
+ *
+ * LMC Reset Control Register
+ * Specify the RSL base addresses for the block.
+ * Internal:
+ * "DDR4RST DDR4 DRAM parts have a RESET# pin. The DDR4RST CSR field controls the assertion of
+ * the 9xxx pin that attaches to RESET#. When DDR4RST is set, 9xxx asserts RESET#. When DDR4RST
+ * is clear, 9xxx de-asserts RESET#. DDR4RST is set on a cold reset. Domain chip resets do not
+ * affect the DDR4RST value. Outside of cold reset, only software CSR writes change the DDR4RST
+ * value. DDR4PDOMAIN enables preservation mode during a domain reset. When set, the LMC
+ * automatically puts the attached DDR4 DRAM parts into self refresh (see LMC()_SEQ_CTL[SEQ_SEL])
+ * at the beginning of a domain reset sequence, provided that LMC is up. When cleared, LMC does
+ * not put the attached DDR4 DRAM parts into self-refresh during a
+ * domain reset sequence. DDR4PDOMAIN is cleared on a cold reset. Domain chip resets do not
+ * affect the DDR4PDOMAIN value. Outside of cold reset, only software CSR writes change the
+ * DDR4PDOMAIN value. DDR4PSV May be useful for system software to determine when the DDR4
+ * contents have been preserved. Cleared by hardware during a cold reset. Never cleared by
+ * hardware during a domain reset. Set by hardware during a domain reset if the hardware
+ * automatically put the DDR4 DRAM into self-refresh during the reset sequence. Can also be
+ * written by software (to any value).""
+ */
+union bdk_lmcx_reset_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_reset_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ddr3psv : 1; /**< [ 3: 3](R/W/H) Memory reset. 1 = DDR contents preserved.
+
+ May be useful for system software to determine when the DDR3/DDR4 contents have been
+ preserved.
+ Cleared by hardware during a cold reset. Never cleared by hardware during a warm/soft
+ reset. Set by hardware during a warm/soft reset if the hardware automatically put the
+ DDR3/DDR4
+ DRAM into self-refresh during the reset sequence.
+ Can also be written by software (to any value). */
+ uint64_t reserved_0_2 : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_2 : 3;
+ uint64_t ddr3psv : 1; /**< [ 3: 3](R/W/H) Memory reset. 1 = DDR contents preserved.
+
+ May be useful for system software to determine when the DDR3/DDR4 contents have been
+ preserved.
+ Cleared by hardware during a cold reset. Never cleared by hardware during a warm/soft
+ reset. Set by hardware during a warm/soft reset if the hardware automatically put the
+ DDR3/DDR4
+ DRAM into self-refresh during the reset sequence.
+ Can also be written by software (to any value). */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_reset_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ddr3psv : 1; /**< [ 3: 3](R/W/H) Memory reset. 1 = DDR contents preserved.
+
+ May be useful for system software to determine when the DDR3/DDR4 contents have been
+ preserved.
+ Cleared by hardware during a cold reset. Never cleared by hardware during a warm/soft
+ reset. Set by hardware during a warm/soft reset if the hardware automatically put the
+ DDR3/DDR4
+ DRAM into self-refresh during the reset sequence.
+ Can also be written by software (to any value). */
+ uint64_t ddr3psoft : 1; /**< [ 2: 2](R/W/H) Memory reset. 1 = Enable preserve mode during soft reset.
+
+ Enables preserve mode during a soft reset. When set, the DDR3/DDR4 controller hardware
+ automatically puts the attached DDR3/DDR4 DRAM parts into self-refresh at the beginning of
+ a
+ soft reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the DDR3/DDR4 controller
+ is up. When clear, the DDR3/DDR4 controller hardware does not put the attached DDR3/DDR4
+ DRAM
+ parts into self-refresh during a soft reset sequence.
+ DDR3PSOFT is cleared on a cold reset. Warm and soft chip resets do not affect the
+ DDR3PSOFT value. Outside of cold reset, only software CSR write operations change the
+ DDR3PSOFT value. */
+ uint64_t ddr3pwarm : 1; /**< [ 1: 1](R/W/H) Memory reset. 1 = Enable preserve mode during warm reset.
+
+ Enables preserve mode during a warm reset. When set, the DDR3/DDR4 controller hardware
+ automatically puts the attached DDR3/DDR4 DRAM parts into self-refresh at the beginning of
+ a
+ warm reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the DDR3/DDR4 controller
+ is up. When clear, the DDR3/DDR4 controller hardware does not put the attached DDR3/DDR4
+ DRAM
+ parts into self-refresh during a warm reset sequence.
+ DDR3PWARM is cleared on a cold reset. Warm and soft chip resets do not affect the
+ DDR3PWARM value. Outside of cold reset, only software CSR write operations change the
+ DDR3PWARM value.
+
+ Note that if a warm reset follows a soft reset, DDR3PWARM has no effect, as the DDR3/DDR4
+ controller is no longer up after any cold/warm/soft reset sequence. */
+ uint64_t ddr3rst : 1; /**< [ 0: 0](R/W/H) "Memory reset. 0 = Reset asserted; 1 = Reset deasserted.
+
+ DDR3/DDR4 DRAM parts have a RESET# pin. The DDR3RST CSR field controls the assertion of
+ the new CNXXXX pin that attaches to RESET#.
+ When DDR3RST is set, CNXXXX deasserts RESET#.
+ When DDR3RST is clear, CNXXXX asserts RESET#.
+ DDR3RST is cleared on a cold reset. Warm and soft chip resets do not affect the DDR3RST
+ value.
+ Outside of cold reset, only software CSR write operations change the DDR3RST value." */
+#else /* Word 0 - Little Endian */
+ uint64_t ddr3rst : 1; /**< [ 0: 0](R/W/H) "Memory reset. 0 = Reset asserted; 1 = Reset deasserted.
+
+ DDR3/DDR4 DRAM parts have a RESET# pin. The DDR3RST CSR field controls the assertion of
+ the new CNXXXX pin that attaches to RESET#.
+ When DDR3RST is set, CNXXXX deasserts RESET#.
+ When DDR3RST is clear, CNXXXX asserts RESET#.
+ DDR3RST is cleared on a cold reset. Warm and soft chip resets do not affect the DDR3RST
+ value.
+ Outside of cold reset, only software CSR write operations change the DDR3RST value." */
+ uint64_t ddr3pwarm : 1; /**< [ 1: 1](R/W/H) Memory reset. 1 = Enable preserve mode during warm reset.
+
+ Enables preserve mode during a warm reset. When set, the DDR3/DDR4 controller hardware
+ automatically puts the attached DDR3/DDR4 DRAM parts into self-refresh at the beginning of
+ a
+ warm reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the DDR3/DDR4 controller
+ is up. When clear, the DDR3/DDR4 controller hardware does not put the attached DDR3/DDR4
+ DRAM
+ parts into self-refresh during a warm reset sequence.
+ DDR3PWARM is cleared on a cold reset. Warm and soft chip resets do not affect the
+ DDR3PWARM value. Outside of cold reset, only software CSR write operations change the
+ DDR3PWARM value.
+
+ Note that if a warm reset follows a soft reset, DDR3PWARM has no effect, as the DDR3/DDR4
+ controller is no longer up after any cold/warm/soft reset sequence. */
+ uint64_t ddr3psoft : 1; /**< [ 2: 2](R/W/H) Memory reset. 1 = Enable preserve mode during soft reset.
+
+ Enables preserve mode during a soft reset. When set, the DDR3/DDR4 controller hardware
+ automatically puts the attached DDR3/DDR4 DRAM parts into self-refresh at the beginning of
+ a
+ soft reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the DDR3/DDR4 controller
+ is up. When clear, the DDR3/DDR4 controller hardware does not put the attached DDR3/DDR4
+ DRAM
+ parts into self-refresh during a soft reset sequence.
+ DDR3PSOFT is cleared on a cold reset. Warm and soft chip resets do not affect the
+ DDR3PSOFT value. Outside of cold reset, only software CSR write operations change the
+ DDR3PSOFT value. */
+ uint64_t ddr3psv : 1; /**< [ 3: 3](R/W/H) Memory reset. 1 = DDR contents preserved.
+
+ May be useful for system software to determine when the DDR3/DDR4 contents have been
+ preserved.
+ Cleared by hardware during a cold reset. Never cleared by hardware during a warm/soft
+ reset. Set by hardware during a warm/soft reset if the hardware automatically put the
+ DDR3/DDR4
+ DRAM into self-refresh during the reset sequence.
+ Can also be written by software (to any value). */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_reset_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t ddr4psv : 1; /**< [ 2: 2](R/W/H) Memory reset. 1 = DDR contents preserved.
+
+ May be useful for system software to determine when the DDR4 contents have
+ been preserved. Cleared by hardware during a cold reset. Never cleared by
+ hardware during a core domain reset. Set by hardware during a core domain reset
+ if the hardware automatically put the DDR4 DRAM into self-refresh during the
+ reset sequence. Can also be written by software (to any value). */
+ uint64_t ddr4pdomain : 1; /**< [ 1: 1](R/W/H) Memory reset. 1 = Enable preserve mode during core domain reset.
+
+ Enables preserve mode during a core domain reset. When set, the memory controller hardware
+ automatically puts the attached DDR4 DRAM parts into self-refresh at the beginning of a
+ core domain reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the controller is
+ up. When clear, the controller hardware does not put the attached DDR4 DRAM parts into
+ self-refresh during a core domain reset sequence.
+
+ DDR4PDOMAIN is cleared on a cold reset. Core domain resets do not affect the
+ DDR4PDOMAIN value. Outside of cold reset, only software CSR write operations change the
+ DDR4PDOMAIN value. */
+ uint64_t ddr4rst : 1; /**< [ 0: 0](R/W/H) "Memory reset. 0 = Reset asserted; 1 = Reset deasserted.
+
+ DDR4 DRAM parts have a RESET# pin. The DDR4RST CSR field controls the assertion of
+ the new CNXXXX pin that attaches to RESET#.
+ When DDR4RST is set, CNXXXX deasserts RESET#.
+ When DDR4RST is clear, CNXXXX asserts RESET#.
+ DDR4RST is cleared on a cold reset. Core domain resets do not affect the DDR4RST
+ value.
+ Outside of cold reset, only software CSR write operations change the DDR4RST value." */
+#else /* Word 0 - Little Endian */
+ uint64_t ddr4rst : 1; /**< [ 0: 0](R/W/H) "Memory reset. 0 = Reset asserted; 1 = Reset deasserted.
+
+ DDR4 DRAM parts have a RESET# pin. The DDR4RST CSR field controls the assertion of
+ the new CNXXXX pin that attaches to RESET#.
+ When DDR4RST is set, CNXXXX deasserts RESET#.
+ When DDR4RST is clear, CNXXXX asserts RESET#.
+ DDR4RST is cleared on a cold reset. Core domain resets do not affect the DDR4RST
+ value.
+ Outside of cold reset, only software CSR write operations change the DDR4RST value." */
+ uint64_t ddr4pdomain : 1; /**< [ 1: 1](R/W/H) Memory reset. 1 = Enable preserve mode during core domain reset.
+
+ Enables preserve mode during a core domain reset. When set, the memory controller hardware
+ automatically puts the attached DDR4 DRAM parts into self-refresh at the beginning of a
+ core domain reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the controller is
+ up. When clear, the controller hardware does not put the attached DDR4 DRAM parts into
+ self-refresh during a core domain reset sequence.
+
+ DDR4PDOMAIN is cleared on a cold reset. Core domain resets do not affect the
+ DDR4PDOMAIN value. Outside of cold reset, only software CSR write operations change the
+ DDR4PDOMAIN value. */
+ uint64_t ddr4psv : 1; /**< [ 2: 2](R/W/H) Memory reset. 1 = DDR contents preserved.
+
+ May be useful for system software to determine when the DDR4 contents have
+ been preserved. Cleared by hardware during a cold reset. Never cleared by
+ hardware during a core domain reset. Set by hardware during a core domain reset
+ if the hardware automatically put the DDR4 DRAM into self-refresh during the
+ reset sequence. Can also be written by software (to any value). */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_reset_ctl bdk_lmcx_reset_ctl_t;
+
+static inline uint64_t BDK_LMCX_RESET_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_RESET_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000180ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000180ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000180ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000180ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_RESET_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_RESET_CTL(a) bdk_lmcx_reset_ctl_t
+#define bustype_BDK_LMCX_RESET_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_RESET_CTL(a) "LMCX_RESET_CTL"
+#define device_bar_BDK_LMCX_RESET_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_RESET_CTL(a) (a)
+#define arguments_BDK_LMCX_RESET_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_retry_config
+ *
+ * LMC Automatic Retry Configuration Registers
+ * This register configures automatic retry operation.
+ */
+union bdk_lmcx_retry_config
+{
+ uint64_t u;
+ struct bdk_lmcx_retry_config_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t max_errors : 24; /**< [ 55: 32](R/W) Maximum number of errors before errors are ignored. */
+ uint64_t reserved_13_31 : 19;
+ uint64_t error_continue : 1; /**< [ 12: 12](WO) If LMC()_RETRY_CONFIG[AUTO_ERROR_CONTINUE] is cleared, LMC will wait
+ for a one to be written to LMC()_RETRY_CONFIG[ERROR_CONTINUE] before
+ continuing operations after an error. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t auto_error_continue : 1; /**< [ 8: 8](R/W) When set, LMC will automatically proceed with error handling and normal
+ operation after an error occurs. If clear, LMC will cease all operations
+ except for refresh as soon as possible, and will not continue with error
+ handling or normal operation until LMC()_RETRY_CONFIG[ERROR_CONTINUE]
+ is written with a one. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t pulse_count_auto_clr : 1; /**< [ 4: 4](R/W) When set, LMC()_RETRY_STATUS[ERROR_PULSE_COUNT_VALID] will clear
+ whenever the error interrupt is cleared. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t retry_enable : 1; /**< [ 0: 0](R/W) Enable retry on errors. */
+#else /* Word 0 - Little Endian */
+ uint64_t retry_enable : 1; /**< [ 0: 0](R/W) Enable retry on errors. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t pulse_count_auto_clr : 1; /**< [ 4: 4](R/W) When set, LMC()_RETRY_STATUS[ERROR_PULSE_COUNT_VALID] will clear
+ whenever the error interrupt is cleared. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t auto_error_continue : 1; /**< [ 8: 8](R/W) When set, LMC will automatically proceed with error handling and normal
+ operation after an error occurs. If clear, LMC will cease all operations
+ except for refresh as soon as possible, and will not continue with error
+ handling or normal operation until LMC()_RETRY_CONFIG[ERROR_CONTINUE]
+ is written with a one. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t error_continue : 1; /**< [ 12: 12](WO) If LMC()_RETRY_CONFIG[AUTO_ERROR_CONTINUE] is cleared, LMC will wait
+ for a one to be written to LMC()_RETRY_CONFIG[ERROR_CONTINUE] before
+ continuing operations after an error. */
+ uint64_t reserved_13_31 : 19;
+ uint64_t max_errors : 24; /**< [ 55: 32](R/W) Maximum number of errors before errors are ignored. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_retry_config_s cn; */
+};
+typedef union bdk_lmcx_retry_config bdk_lmcx_retry_config_t;
+
+static inline uint64_t BDK_LMCX_RETRY_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_RETRY_CONFIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000110ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000110ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000110ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000110ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_RETRY_CONFIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_RETRY_CONFIG(a) bdk_lmcx_retry_config_t
+#define bustype_BDK_LMCX_RETRY_CONFIG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_RETRY_CONFIG(a) "LMCX_RETRY_CONFIG"
+#define device_bar_BDK_LMCX_RETRY_CONFIG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_RETRY_CONFIG(a) (a)
+#define arguments_BDK_LMCX_RETRY_CONFIG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_retry_status
+ *
+ * LMC Automatic Retry Status Registers
+ * This register provides status on automatic retry operation.
+ */
+union bdk_lmcx_retry_status
+{
+ uint64_t u;
+ struct bdk_lmcx_retry_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clear_error_count : 1; /**< [ 63: 63](WO) Clear the error count, one shot operation. */
+ uint64_t clear_error_pulse_count : 1;/**< [ 62: 62](WO) Clear the error count, one shot operation. */
+ uint64_t reserved_57_61 : 5;
+ uint64_t error_pulse_count_valid : 1;/**< [ 56: 56](RO/H) When set and the count is valid, indicates that the counter has saturated,
+ which effectively indicates that a command error has occured and not a CRC
+ error. */
+ uint64_t error_pulse_count_sat : 1; /**< [ 55: 55](RO/H) When set and the count is valid, indicates that the counter has saturated,
+ which effectively indicates that a command error has occured and not a CRC
+ error. */
+ uint64_t reserved_52_54 : 3;
+ uint64_t error_pulse_count : 4; /**< [ 51: 48](RO/H) Count of cycles in last error pulse since clear. This count will be cleared
+ either by clearing the interrupt or writing a one to the pulse count clear bit. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t error_sequence : 5; /**< [ 44: 40](RO/H) Sequence number for sequence that was running when error occurred. */
+ uint64_t reserved_33_39 : 7;
+ uint64_t error_type : 1; /**< [ 32: 32](RO/H) Error type:
+ 0 = Error during a sequence run.
+ 1 = Error during normal operation, which means a read or write operation. Effectively this
+ means a command error for a read or write operation, or a CRC error for a write data
+ operation. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t error_count : 24; /**< [ 23: 0](RO/H) Number of errors encountered since last cleared. */
+#else /* Word 0 - Little Endian */
+ uint64_t error_count : 24; /**< [ 23: 0](RO/H) Number of errors encountered since last cleared. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t error_type : 1; /**< [ 32: 32](RO/H) Error type:
+ 0 = Error during a sequence run.
+ 1 = Error during normal operation, which means a read or write operation. Effectively this
+ means a command error for a read or write operation, or a CRC error for a write data
+ operation. */
+ uint64_t reserved_33_39 : 7;
+ uint64_t error_sequence : 5; /**< [ 44: 40](RO/H) Sequence number for sequence that was running when error occurred. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t error_pulse_count : 4; /**< [ 51: 48](RO/H) Count of cycles in last error pulse since clear. This count will be cleared
+ either by clearing the interrupt or writing a one to the pulse count clear bit. */
+ uint64_t reserved_52_54 : 3;
+ uint64_t error_pulse_count_sat : 1; /**< [ 55: 55](RO/H) When set and the count is valid, indicates that the counter has saturated,
+ which effectively indicates that a command error has occured and not a CRC
+ error. */
+ uint64_t error_pulse_count_valid : 1;/**< [ 56: 56](RO/H) When set and the count is valid, indicates that the counter has saturated,
+ which effectively indicates that a command error has occured and not a CRC
+ error. */
+ uint64_t reserved_57_61 : 5;
+ uint64_t clear_error_pulse_count : 1;/**< [ 62: 62](WO) Clear the error count, one shot operation. */
+ uint64_t clear_error_count : 1; /**< [ 63: 63](WO) Clear the error count, one shot operation. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_retry_status_s cn; */
+};
+typedef union bdk_lmcx_retry_status bdk_lmcx_retry_status_t;
+
+static inline uint64_t BDK_LMCX_RETRY_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_RETRY_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000118ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000118ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000118ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000118ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_RETRY_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_RETRY_STATUS(a) bdk_lmcx_retry_status_t
+#define bustype_BDK_LMCX_RETRY_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_RETRY_STATUS(a) "LMCX_RETRY_STATUS"
+#define device_bar_BDK_LMCX_RETRY_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_RETRY_STATUS(a) (a)
+#define arguments_BDK_LMCX_RETRY_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_rlevel_ctl
+ *
+ * LMC Read Level Control Register
+ */
+union bdk_lmcx_rlevel_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_rlevel_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t rank3_status : 2; /**< [ 47: 46](RO/H) Indicates status of the rank3 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(3) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(3) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(3) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(3) values came from a complete read-leveling sequence. */
+ uint64_t rank2_status : 2; /**< [ 45: 44](RO/H) Indicates status of the rank2 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(2) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(2) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(2) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(2) values came from a complete read-leveling sequence. */
+ uint64_t rank1_status : 2; /**< [ 43: 42](RO/H) Indicates status of the rank1 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(1) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(1) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(1) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(1) values came from a complete read-leveling sequence. */
+ uint64_t rank0_status : 2; /**< [ 41: 40](RO/H) Indicates status of the rank0 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(0) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(0) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(0) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(0) values came from a complete read-leveling sequence. */
+ uint64_t reserved_33_39 : 7;
+ uint64_t tccd_sel : 1; /**< [ 32: 32](RO) Reserved. */
+ uint64_t pattern : 8; /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t delay_unload_3 : 1; /**< [ 21: 21](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
+ normally be set, particularly at higher speeds. */
+ uint64_t delay_unload_2 : 1; /**< [ 20: 20](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
+ normally be set. */
+ uint64_t delay_unload_1 : 1; /**< [ 19: 19](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
+ normally be set. */
+ uint64_t delay_unload_0 : 1; /**< [ 18: 18](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
+ normally be set. */
+ uint64_t bitmask : 8; /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
+ uint64_t or_dis : 1; /**< [ 9: 9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
+ should normally not be set. */
+ uint64_t offset_en : 1; /**< [ 8: 8](R/W) When set, LMC attempts to select the read leveling setting that is
+ LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
+ in the largest contiguous sequence of passing settings. When clear, or if the setting
+ selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
+ the largest contiguous sequence of passing settings, rounding earlier when necessary. */
+ uint64_t offset : 4; /**< [ 7: 4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
+ uint64_t offset : 4; /**< [ 7: 4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
+ uint64_t offset_en : 1; /**< [ 8: 8](R/W) When set, LMC attempts to select the read leveling setting that is
+ LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
+ in the largest contiguous sequence of passing settings. When clear, or if the setting
+ selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
+ the largest contiguous sequence of passing settings, rounding earlier when necessary. */
+ uint64_t or_dis : 1; /**< [ 9: 9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
+ should normally not be set. */
+ uint64_t bitmask : 8; /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
+ uint64_t delay_unload_0 : 1; /**< [ 18: 18](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
+ normally be set. */
+ uint64_t delay_unload_1 : 1; /**< [ 19: 19](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
+ normally be set. */
+ uint64_t delay_unload_2 : 1; /**< [ 20: 20](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
+ normally be set. */
+ uint64_t delay_unload_3 : 1; /**< [ 21: 21](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
+ normally be set, particularly at higher speeds. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t pattern : 8; /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
+ uint64_t tccd_sel : 1; /**< [ 32: 32](RO) Reserved. */
+ uint64_t reserved_33_39 : 7;
+ uint64_t rank0_status : 2; /**< [ 41: 40](RO/H) Indicates status of the rank0 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(0) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(0) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(0) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(0) values came from a complete read-leveling sequence. */
+ uint64_t rank1_status : 2; /**< [ 43: 42](RO/H) Indicates status of the rank1 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(1) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(1) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(1) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(1) values came from a complete read-leveling sequence. */
+ uint64_t rank2_status : 2; /**< [ 45: 44](RO/H) Indicates status of the rank2 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(2) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(2) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(2) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(2) values came from a complete read-leveling sequence. */
+ uint64_t rank3_status : 2; /**< [ 47: 46](RO/H) Indicates status of the rank3 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(3) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(3) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(3) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(3) values came from a complete read-leveling sequence. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_rlevel_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t tccd_sel : 1; /**< [ 32: 32](RO) Reserved. */
+ uint64_t pattern : 8; /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t delay_unload_3 : 1; /**< [ 21: 21](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
+ normally be set, particularly at higher speeds. */
+ uint64_t delay_unload_2 : 1; /**< [ 20: 20](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
+ normally be set. */
+ uint64_t delay_unload_1 : 1; /**< [ 19: 19](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
+ normally be set. */
+ uint64_t delay_unload_0 : 1; /**< [ 18: 18](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
+ normally be set. */
+ uint64_t bitmask : 8; /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
+ uint64_t or_dis : 1; /**< [ 9: 9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
+ should normally not be set. */
+ uint64_t offset_en : 1; /**< [ 8: 8](R/W) When set, LMC attempts to select the read leveling setting that is
+ LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
+ in the largest contiguous sequence of passing settings. When clear, or if the setting
+ selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
+ the largest contiguous sequence of passing settings, rounding earlier when necessary. */
+ uint64_t offset : 4; /**< [ 7: 4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
+ uint64_t offset : 4; /**< [ 7: 4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
+ uint64_t offset_en : 1; /**< [ 8: 8](R/W) When set, LMC attempts to select the read leveling setting that is
+ LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
+ in the largest contiguous sequence of passing settings. When clear, or if the setting
+ selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
+ the largest contiguous sequence of passing settings, rounding earlier when necessary. */
+ uint64_t or_dis : 1; /**< [ 9: 9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
+ should normally not be set. */
+ uint64_t bitmask : 8; /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
+ uint64_t delay_unload_0 : 1; /**< [ 18: 18](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
+ normally be set. */
+ uint64_t delay_unload_1 : 1; /**< [ 19: 19](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
+ normally be set. */
+ uint64_t delay_unload_2 : 1; /**< [ 20: 20](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
+ normally be set. */
+ uint64_t delay_unload_3 : 1; /**< [ 21: 21](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
+ normally be set, particularly at higher speeds. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t pattern : 8; /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
+ uint64_t tccd_sel : 1; /**< [ 32: 32](RO) Reserved. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_rlevel_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t rank3_status : 2; /**< [ 47: 46](RO/H) Indicates status of the rank3 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(3) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(3) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(3) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(3) values came from a complete read-leveling sequence. */
+ uint64_t rank2_status : 2; /**< [ 45: 44](RO/H) Indicates status of the rank2 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(2) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(2) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(2) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(2) values came from a complete read-leveling sequence. */
+ uint64_t rank1_status : 2; /**< [ 43: 42](RO/H) Indicates status of the rank1 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(1) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(1) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(1) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(1) values came from a complete read-leveling sequence. */
+ uint64_t rank0_status : 2; /**< [ 41: 40](RO/H) Indicates status of the rank0 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(0) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(0) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(0) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(0) values came from a complete read-leveling sequence. */
+ uint64_t reserved_33_39 : 7;
+ uint64_t tccd_sel : 1; /**< [ 32: 32](R/W) When set, the read leveling sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to
+ space out back-to-back read commands. Otherwise the back-to-back
+ reads commands are spaced out by a default 4 cycles. */
+ uint64_t pattern : 8; /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t delay_unload_3 : 1; /**< [ 21: 21](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
+ normally be set, particularly at higher speeds. */
+ uint64_t delay_unload_2 : 1; /**< [ 20: 20](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
+ normally be set. */
+ uint64_t delay_unload_1 : 1; /**< [ 19: 19](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
+ normally be set. */
+ uint64_t delay_unload_0 : 1; /**< [ 18: 18](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
+ normally be set. */
+ uint64_t bitmask : 8; /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
+ uint64_t or_dis : 1; /**< [ 9: 9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
+ should normally not be set. */
+ uint64_t offset_en : 1; /**< [ 8: 8](R/W) When set, LMC attempts to select the read leveling setting that is
+ LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
+ in the largest contiguous sequence of passing settings. When clear, or if the setting
+ selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
+ the largest contiguous sequence of passing settings, rounding earlier when necessary. */
+ uint64_t offset : 4; /**< [ 7: 4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
+ uint64_t offset : 4; /**< [ 7: 4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
+ uint64_t offset_en : 1; /**< [ 8: 8](R/W) When set, LMC attempts to select the read leveling setting that is
+ LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
+ in the largest contiguous sequence of passing settings. When clear, or if the setting
+ selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
+ the largest contiguous sequence of passing settings, rounding earlier when necessary. */
+ uint64_t or_dis : 1; /**< [ 9: 9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
+ should normally not be set. */
+ uint64_t bitmask : 8; /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
+ uint64_t delay_unload_0 : 1; /**< [ 18: 18](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
+ normally be set. */
+ uint64_t delay_unload_1 : 1; /**< [ 19: 19](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
+ normally be set. */
+ uint64_t delay_unload_2 : 1; /**< [ 20: 20](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
+ normally be set. */
+ uint64_t delay_unload_3 : 1; /**< [ 21: 21](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
+ normally be set, particularly at higher speeds. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t pattern : 8; /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
+ uint64_t tccd_sel : 1; /**< [ 32: 32](R/W) When set, the read leveling sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to
+ space out back-to-back read commands. Otherwise the back-to-back
+ reads commands are spaced out by a default 4 cycles. */
+ uint64_t reserved_33_39 : 7;
+ uint64_t rank0_status : 2; /**< [ 41: 40](RO/H) Indicates status of the rank0 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(0) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(0) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(0) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(0) values came from a complete read-leveling sequence. */
+ uint64_t rank1_status : 2; /**< [ 43: 42](RO/H) Indicates status of the rank1 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(1) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(1) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(1) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(1) values came from a complete read-leveling sequence. */
+ uint64_t rank2_status : 2; /**< [ 45: 44](RO/H) Indicates status of the rank2 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(2) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(2) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(2) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(2) values came from a complete read-leveling sequence. */
+ uint64_t rank3_status : 2; /**< [ 47: 46](RO/H) Indicates status of the rank3 read-leveling and where the BYTEn programmings came from:
+ 0x0 = LMC()_RLEVEL_RANK(3) values are their reset value.
+ 0x1 = LMC()_RLEVEL_RANK(3) values were set via a CSR write.
+ 0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(3) values are
+ unpredictable).
+ 0x3 = LMC()_RLEVEL_RANK(3) values came from a complete read-leveling sequence. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_rlevel_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t tccd_sel : 1; /**< [ 32: 32](R/W) When set, the read leveling sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to
+ space out back-to-back read commands. Otherwise the back-to-back
+ reads commands are spaced out by a default 4 cycles. */
+ uint64_t pattern : 8; /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t delay_unload_3 : 1; /**< [ 21: 21](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
+ normally be set, particularly at higher speeds. */
+ uint64_t delay_unload_2 : 1; /**< [ 20: 20](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
+ normally be set. */
+ uint64_t delay_unload_1 : 1; /**< [ 19: 19](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
+ normally be set. */
+ uint64_t delay_unload_0 : 1; /**< [ 18: 18](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
+ normally be set. */
+ uint64_t bitmask : 8; /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
+ uint64_t or_dis : 1; /**< [ 9: 9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
+ should normally not be set. */
+ uint64_t offset_en : 1; /**< [ 8: 8](R/W) When set, LMC attempts to select the read leveling setting that is
+ LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
+ in the largest contiguous sequence of passing settings. When clear, or if the setting
+ selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
+ the largest contiguous sequence of passing settings, rounding earlier when necessary. */
+ uint64_t offset : 4; /**< [ 7: 4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
+ uint64_t offset : 4; /**< [ 7: 4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
+ uint64_t offset_en : 1; /**< [ 8: 8](R/W) When set, LMC attempts to select the read leveling setting that is
+ LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
+ in the largest contiguous sequence of passing settings. When clear, or if the setting
+ selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
+ the largest contiguous sequence of passing settings, rounding earlier when necessary. */
+ uint64_t or_dis : 1; /**< [ 9: 9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
+ should normally not be set. */
+ uint64_t bitmask : 8; /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
+ uint64_t delay_unload_0 : 1; /**< [ 18: 18](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
+ normally be set. */
+ uint64_t delay_unload_1 : 1; /**< [ 19: 19](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
+ normally be set. */
+ uint64_t delay_unload_2 : 1; /**< [ 20: 20](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
+ normally be set. */
+ uint64_t delay_unload_3 : 1; /**< [ 21: 21](R/W) Reserved, must be set.
+ Internal:
+ When set, unload the PHY silo one cycle later during
+ read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
+ normally be set, particularly at higher speeds. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t pattern : 8; /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
+ uint64_t tccd_sel : 1; /**< [ 32: 32](R/W) When set, the read leveling sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to
+ space out back-to-back read commands. Otherwise the back-to-back
+ reads commands are spaced out by a default 4 cycles. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_rlevel_ctl_cn81xx cn83xx; */
+ /* struct bdk_lmcx_rlevel_ctl_cn81xx cn88xxp2; */
+};
+typedef union bdk_lmcx_rlevel_ctl bdk_lmcx_rlevel_ctl_t;
+
+static inline uint64_t BDK_LMCX_RLEVEL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_RLEVEL_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880002a0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880002a0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880002a0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880002a0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_RLEVEL_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_RLEVEL_CTL(a) bdk_lmcx_rlevel_ctl_t
+#define bustype_BDK_LMCX_RLEVEL_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_RLEVEL_CTL(a) "LMCX_RLEVEL_CTL"
+#define device_bar_BDK_LMCX_RLEVEL_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_RLEVEL_CTL(a) (a)
+#define arguments_BDK_LMCX_RLEVEL_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_rlevel_dbg
+ *
+ * LMC Read Level Debug Register
+ * A given read of LMC()_RLEVEL_DBG returns the read leveling pass/fail results for all
+ * possible delay settings (i.e. the BITMASK) for only one byte in the last rank that
+ * the hardware ran read leveling on. LMC()_RLEVEL_CTL[BYTE] selects the particular
+ * byte. To get these pass/fail results for a different rank, you must run the hardware
+ * read leveling again. For example, it is possible to get the [BITMASK] results for
+ * every byte of every rank if you run read leveling separately for each rank, probing
+ * LMC()_RLEVEL_DBG between each read- leveling.
+ */
+union bdk_lmcx_rlevel_dbg
+{
+ uint64_t u;
+ struct bdk_lmcx_rlevel_dbg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bitmask : 64; /**< [ 63: 0](RO/H) Bitmask generated during read level settings sweep. BITMASK[n] = 0 means read level
+ setting n failed; BITMASK[n] = 1 means read level setting n passed for 0 \<= n \<= 63. */
+#else /* Word 0 - Little Endian */
+ uint64_t bitmask : 64; /**< [ 63: 0](RO/H) Bitmask generated during read level settings sweep. BITMASK[n] = 0 means read level
+ setting n failed; BITMASK[n] = 1 means read level setting n passed for 0 \<= n \<= 63. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_rlevel_dbg_s cn8; */
+ struct bdk_lmcx_rlevel_dbg_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bitmask : 64; /**< [ 63: 0](RO/H) Bitmask generated during read level settings sweep. BITMASK\<{a}\> = 0 means read level
+ setting {a} failed; BITMASK\<{a}\> = 1 means read level setting {a} passed for 0 \<= {a} \<= 63. */
+#else /* Word 0 - Little Endian */
+ uint64_t bitmask : 64; /**< [ 63: 0](RO/H) Bitmask generated during read level settings sweep. BITMASK\<{a}\> = 0 means read level
+ setting {a} failed; BITMASK\<{a}\> = 1 means read level setting {a} passed for 0 \<= {a} \<= 63. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_rlevel_dbg bdk_lmcx_rlevel_dbg_t;
+
+static inline uint64_t BDK_LMCX_RLEVEL_DBG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_RLEVEL_DBG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880002a8ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880002a8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880002a8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880002a8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_RLEVEL_DBG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_RLEVEL_DBG(a) bdk_lmcx_rlevel_dbg_t
+#define bustype_BDK_LMCX_RLEVEL_DBG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_RLEVEL_DBG(a) "LMCX_RLEVEL_DBG"
+#define device_bar_BDK_LMCX_RLEVEL_DBG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_RLEVEL_DBG(a) (a)
+#define arguments_BDK_LMCX_RLEVEL_DBG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_rlevel_dbg2
+ *
+ * LMC Read Level Debug Register
+ * A given read of LMC()_RLEVEL_DBG returns the read-leveling pass/fail results for all
+ * possible delay settings (i.e. the BITMASK) for only one byte in the last rank that
+ * the hardware ran read-leveling on. LMC()_RLEVEL_CTL[BYTE] selects the particular
+ * byte. To get these pass/fail results for a different rank, you must run the hardware
+ * read-leveling again. For example, it is possible to get the [BITMASK] results for
+ * every byte of every rank if you run read-leveling separately for each rank, probing
+ * LMC()_RLEVEL_DBG between each read- leveling.
+ */
+union bdk_lmcx_rlevel_dbg2
+{
+ uint64_t u;
+ struct bdk_lmcx_rlevel_dbg2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bitmask : 64; /**< [ 63: 0](RO/H) Bitmask generated during read level settings sweep. BITMASK\<{a}\> = 0 means read level
+ setting {a} failed; BITMASK\<{a}\> = 1 means read level setting {a} passed for 64 \<= {a} \<= 127. */
+#else /* Word 0 - Little Endian */
+ uint64_t bitmask : 64; /**< [ 63: 0](RO/H) Bitmask generated during read level settings sweep. BITMASK\<{a}\> = 0 means read level
+ setting {a} failed; BITMASK\<{a}\> = 1 means read level setting {a} passed for 64 \<= {a} \<= 127. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_rlevel_dbg2_s cn; */
+};
+typedef union bdk_lmcx_rlevel_dbg2 bdk_lmcx_rlevel_dbg2_t;
+
+static inline uint64_t BDK_LMCX_RLEVEL_DBG2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_RLEVEL_DBG2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880002b0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_RLEVEL_DBG2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_RLEVEL_DBG2(a) bdk_lmcx_rlevel_dbg2_t
+#define bustype_BDK_LMCX_RLEVEL_DBG2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_RLEVEL_DBG2(a) "LMCX_RLEVEL_DBG2"
+#define device_bar_BDK_LMCX_RLEVEL_DBG2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_RLEVEL_DBG2(a) (a)
+#define arguments_BDK_LMCX_RLEVEL_DBG2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_rlevel_rank#
+ *
+ * LMC Read Level Rank Register
+ * Four of these CSRs exist per LMC, one for each rank. Read level setting is measured
+ * in units of 1/4 CK, so the BYTEn values can range over 16 CK cycles. Each CSR is
+ * written by hardware during a read leveling sequence for the rank. (Hardware sets
+ * [STATUS] to 3 after hardware read leveling completes for the rank.)
+ *
+ * If hardware is unable to find a match per LMC()_RLEVEL_CTL[OFFSET_EN] and
+ * LMC()_RLEVEL_CTL[OFFSET], then hardware sets LMC()_RLEVEL_RANK()[BYTEn\<5:0\>] to
+ * 0x0.
+ *
+ * Each CSR may also be written by software, but not while a read leveling sequence is
+ * in progress. (Hardware sets [STATUS] to 1 after a CSR write.) Software initiates a
+ * hardware read leveling sequence by programming LMC()_RLEVEL_CTL and writing
+ * LMC()_SEQ_CTL[INIT_START] = 1 with LMC()_SEQ_CTL[SEQ_SEL]=1. See LMC()_RLEVEL_CTL.
+ *
+ * LMC()_RLEVEL_RANKi values for ranks i without attached DRAM should be set such that
+ * they do not increase the range of possible BYTE values for any byte lane. The
+ * easiest way to do this is to set LMC()_RLEVEL_RANK(i) = LMC()_RLEVEL_RANK(j), where j is
+ * some rank with attached DRAM whose LMC()_RLEVEL_RANK(j) is already fully initialized.
+ */
+union bdk_lmcx_rlevel_rankx
+{
+ uint64_t u;
+ struct bdk_lmcx_rlevel_rankx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t status : 2; /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
+ from:
+ 0x0 = BYTEn values are their reset value.
+ 0x1 = BYTEn values were set via a CSR write to this register.
+ 0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
+ 0x3 = BYTEn values came from a complete read leveling sequence. */
+ uint64_t reserved_0_53 : 54;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_53 : 54;
+ uint64_t status : 2; /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
+ from:
+ 0x0 = BYTEn values are their reset value.
+ 0x1 = BYTEn values were set via a CSR write to this register.
+ 0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
+ 0x3 = BYTEn values came from a complete read leveling sequence. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_rlevel_rankx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t byte8 : 7; /**< [ 62: 56](R/W/H) "Read level setting.
+ When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
+ signals DDR#_CBS_0_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
+ not increase the range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t byte7 : 7; /**< [ 55: 49](R/W/H) Read level setting. */
+ uint64_t byte6 : 7; /**< [ 48: 42](R/W/H) Read level setting. */
+ uint64_t byte5 : 7; /**< [ 41: 35](R/W/H) Read level setting. */
+ uint64_t byte4 : 7; /**< [ 34: 28](R/W/H) Read level setting. */
+ uint64_t byte3 : 7; /**< [ 27: 21](R/W/H) Read level setting. */
+ uint64_t byte2 : 7; /**< [ 20: 14](R/W/H) Read level setting. */
+ uint64_t byte1 : 7; /**< [ 13: 7](R/W/H) Read level setting. */
+ uint64_t byte0 : 7; /**< [ 6: 0](R/W/H) Read level setting. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte0 : 7; /**< [ 6: 0](R/W/H) Read level setting. */
+ uint64_t byte1 : 7; /**< [ 13: 7](R/W/H) Read level setting. */
+ uint64_t byte2 : 7; /**< [ 20: 14](R/W/H) Read level setting. */
+ uint64_t byte3 : 7; /**< [ 27: 21](R/W/H) Read level setting. */
+ uint64_t byte4 : 7; /**< [ 34: 28](R/W/H) Read level setting. */
+ uint64_t byte5 : 7; /**< [ 41: 35](R/W/H) Read level setting. */
+ uint64_t byte6 : 7; /**< [ 48: 42](R/W/H) Read level setting. */
+ uint64_t byte7 : 7; /**< [ 55: 49](R/W/H) Read level setting. */
+ uint64_t byte8 : 7; /**< [ 62: 56](R/W/H) "Read level setting.
+ When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
+ signals DDR#_CBS_0_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
+ not increase the range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_rlevel_rankx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t status : 2; /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
+ from:
+ 0x0 = BYTEn values are their reset value.
+ 0x1 = BYTEn values were set via a CSR write to this register.
+ 0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
+ 0x3 = BYTEn values came from a complete read leveling sequence. */
+ uint64_t byte8 : 6; /**< [ 53: 48](R/W/H) "Read level setting.
+ When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
+ signals DDR#_DQS_8_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
+ not increase the range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t byte7 : 6; /**< [ 47: 42](R/W/H) Read level setting. */
+ uint64_t byte6 : 6; /**< [ 41: 36](R/W/H) Read level setting. */
+ uint64_t byte5 : 6; /**< [ 35: 30](R/W/H) Read level setting. */
+ uint64_t byte4 : 6; /**< [ 29: 24](R/W/H) Read level setting. */
+ uint64_t byte3 : 6; /**< [ 23: 18](R/W/H) Read level setting. */
+ uint64_t byte2 : 6; /**< [ 17: 12](R/W/H) Read level setting. */
+ uint64_t byte1 : 6; /**< [ 11: 6](R/W/H) Read level setting. */
+ uint64_t byte0 : 6; /**< [ 5: 0](R/W/H) Read level setting. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte0 : 6; /**< [ 5: 0](R/W/H) Read level setting. */
+ uint64_t byte1 : 6; /**< [ 11: 6](R/W/H) Read level setting. */
+ uint64_t byte2 : 6; /**< [ 17: 12](R/W/H) Read level setting. */
+ uint64_t byte3 : 6; /**< [ 23: 18](R/W/H) Read level setting. */
+ uint64_t byte4 : 6; /**< [ 29: 24](R/W/H) Read level setting. */
+ uint64_t byte5 : 6; /**< [ 35: 30](R/W/H) Read level setting. */
+ uint64_t byte6 : 6; /**< [ 41: 36](R/W/H) Read level setting. */
+ uint64_t byte7 : 6; /**< [ 47: 42](R/W/H) Read level setting. */
+ uint64_t byte8 : 6; /**< [ 53: 48](R/W/H) "Read level setting.
+ When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
+ signals DDR#_DQS_8_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
+ not increase the range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t status : 2; /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
+ from:
+ 0x0 = BYTEn values are their reset value.
+ 0x1 = BYTEn values were set via a CSR write to this register.
+ 0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
+ 0x3 = BYTEn values came from a complete read leveling sequence. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_rlevel_rankx_cn81xx cn88xx; */
+ struct bdk_lmcx_rlevel_rankx_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t status : 2; /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
+ from:
+ 0x0 = BYTEn values are their reset value.
+ 0x1 = BYTEn values were set via a CSR write to this register.
+ 0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
+ 0x3 = BYTEn values came from a complete read leveling sequence. */
+ uint64_t byte8 : 6; /**< [ 53: 48](R/W/H) "Read level setting.
+ When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
+ signals DDR#_CBS_0_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
+ not increase the range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t byte7 : 6; /**< [ 47: 42](R/W/H) Read level setting. */
+ uint64_t byte6 : 6; /**< [ 41: 36](R/W/H) Read level setting. */
+ uint64_t byte5 : 6; /**< [ 35: 30](R/W/H) Read level setting. */
+ uint64_t byte4 : 6; /**< [ 29: 24](R/W/H) Read level setting. */
+ uint64_t byte3 : 6; /**< [ 23: 18](R/W/H) Read level setting. */
+ uint64_t byte2 : 6; /**< [ 17: 12](R/W/H) Read level setting. */
+ uint64_t byte1 : 6; /**< [ 11: 6](R/W/H) Read level setting. */
+ uint64_t byte0 : 6; /**< [ 5: 0](R/W/H) Read level setting. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte0 : 6; /**< [ 5: 0](R/W/H) Read level setting. */
+ uint64_t byte1 : 6; /**< [ 11: 6](R/W/H) Read level setting. */
+ uint64_t byte2 : 6; /**< [ 17: 12](R/W/H) Read level setting. */
+ uint64_t byte3 : 6; /**< [ 23: 18](R/W/H) Read level setting. */
+ uint64_t byte4 : 6; /**< [ 29: 24](R/W/H) Read level setting. */
+ uint64_t byte5 : 6; /**< [ 35: 30](R/W/H) Read level setting. */
+ uint64_t byte6 : 6; /**< [ 41: 36](R/W/H) Read level setting. */
+ uint64_t byte7 : 6; /**< [ 47: 42](R/W/H) Read level setting. */
+ uint64_t byte8 : 6; /**< [ 53: 48](R/W/H) "Read level setting.
+ When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
+ signals DDR#_CBS_0_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
+ not increase the range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t status : 2; /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
+ from:
+ 0x0 = BYTEn values are their reset value.
+ 0x1 = BYTEn values were set via a CSR write to this register.
+ 0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
+ 0x3 = BYTEn values came from a complete read leveling sequence. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_lmcx_rlevel_rankx bdk_lmcx_rlevel_rankx_t;
+
+static inline uint64_t BDK_LMCX_RLEVEL_RANKX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_RLEVEL_RANKX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
+ return 0x87e088000280ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
+ return 0x87e088000280ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=3)))
+ return 0x87e088000280ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=3)))
+ return 0x87e088000280ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("LMCX_RLEVEL_RANKX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_RLEVEL_RANKX(a,b) bdk_lmcx_rlevel_rankx_t
+#define bustype_BDK_LMCX_RLEVEL_RANKX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_RLEVEL_RANKX(a,b) "LMCX_RLEVEL_RANKX"
+#define device_bar_BDK_LMCX_RLEVEL_RANKX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_RLEVEL_RANKX(a,b) (a)
+#define arguments_BDK_LMCX_RLEVEL_RANKX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_rodt_mask
+ *
+ * LMC Read OnDieTermination Mask Register
+ * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations,
+ * especially on a multirank system. DDR4 DQ/DQS I/Os have built-in termination resistors that
+ * can be turned on or off by the controller, after meeting TAOND and TAOF timing requirements.
+ *
+ * Each rank has its own ODT pin that fans out to all the memory parts in that DIMM. System
+ * designers may prefer different combinations of ODT ONs for read operations into different
+ * ranks. CNXXXX supports full programmability by way of the mask register below. Each rank
+ * position has its own 4-bit programmable field. When the controller does a read to that rank,
+ * it sets the 4 ODT pins to the MASK pins below. For example, when doing a read from Rank0, a
+ * system designer may desire to terminate the lines with the resistor on DIMM0/Rank1. The mask
+ * [RODT_D0_R0] would then be {0010}.
+ *
+ * CNXXXX drives the appropriate mask values on the ODT pins by default. If this feature is not
+ * required, write 0x0 in this register. Note that, as per the JEDEC DDR4 specifications, the ODT
+ * pin for the rank that is being read should always be 0x0.
+ * When a given RANK is selected, the RODT mask for that rank is used. The resulting RODT mask is
+ * driven to the DIMMs in the following manner:
+ */
+union bdk_lmcx_rodt_mask
+{
+ uint64_t u;
+ struct bdk_lmcx_rodt_mask_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t rodt_d1_r1 : 4; /**< [ 27: 24](R/W) Reserved.
+ Internal:
+ Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked. If RANK_ENA=1, [RODT_D1_R1]\<3\> must be
+ zero. Otherwise [RODT_D1_R1]\<3:0\> is not used and must be zero. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t rodt_d1_r0 : 4; /**< [ 19: 16](R/W) Reserved.
+ Internal:
+ Read ODT mask DIMM1, RANK0. If RANK_ENA=1, [RODT_D1_RO]\<2\> must be zero. Otherwise,
+ [RODT_D1_RO]\<3:2,1\> must be zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rodt_d0_r1 : 4; /**< [ 11: 8](R/W) Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
+ [RODT_D0_R1]\<1\> must be zero. Otherwise, [RODT_D0_R1]\<3:0\> is not used and must
+ be zero. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rodt_d0_r0 : 4; /**< [ 3: 0](R/W) Read ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D0_R0]\<0\> must be
+ zero. Otherwise, [RODT_D0_R0]\<1:0,3\> must be zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t rodt_d0_r0 : 4; /**< [ 3: 0](R/W) Read ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D0_R0]\<0\> must be
+ zero. Otherwise, [RODT_D0_R0]\<1:0,3\> must be zero. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rodt_d0_r1 : 4; /**< [ 11: 8](R/W) Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
+ [RODT_D0_R1]\<1\> must be zero. Otherwise, [RODT_D0_R1]\<3:0\> is not used and must
+ be zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rodt_d1_r0 : 4; /**< [ 19: 16](R/W) Reserved.
+ Internal:
+ Read ODT mask DIMM1, RANK0. If RANK_ENA=1, [RODT_D1_RO]\<2\> must be zero. Otherwise,
+ [RODT_D1_RO]\<3:2,1\> must be zero. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t rodt_d1_r1 : 4; /**< [ 27: 24](R/W) Reserved.
+ Internal:
+ Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked. If RANK_ENA=1, [RODT_D1_R1]\<3\> must be
+ zero. Otherwise [RODT_D1_R1]\<3:0\> is not used and must be zero. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_rodt_mask_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t rodt_d1_r1 : 4; /**< [ 27: 24](R/W) Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
+ [RODT_D1_R1]\<3\> must be zero. Otherwise [RODT_D1_R1]\<3:0\> is not used and must
+ be zero. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t rodt_d1_r0 : 4; /**< [ 19: 16](R/W) Read ODT mask DIMM1, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D1_R0]\<2\> must be
+ zero. Otherwise, [RODT_D1_R0]\<3:2,1\> must be zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rodt_d0_r1 : 4; /**< [ 11: 8](R/W) Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
+ [RODT_D0_R1]\<1\> must be zero. Otherwise, [RODT_D0_R1]\<3:0\> is not used and must
+ be zero. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rodt_d0_r0 : 4; /**< [ 3: 0](R/W) Read ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D0_R0]\<0\> must be
+ zero. Otherwise, [RODT_D0_R0]\<1:0,3\> must be zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t rodt_d0_r0 : 4; /**< [ 3: 0](R/W) Read ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D0_R0]\<0\> must be
+ zero. Otherwise, [RODT_D0_R0]\<1:0,3\> must be zero. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rodt_d0_r1 : 4; /**< [ 11: 8](R/W) Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
+ [RODT_D0_R1]\<1\> must be zero. Otherwise, [RODT_D0_R1]\<3:0\> is not used and must
+ be zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rodt_d1_r0 : 4; /**< [ 19: 16](R/W) Read ODT mask DIMM1, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D1_R0]\<2\> must be
+ zero. Otherwise, [RODT_D1_R0]\<3:2,1\> must be zero. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t rodt_d1_r1 : 4; /**< [ 27: 24](R/W) Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
+ [RODT_D1_R1]\<3\> must be zero. Otherwise [RODT_D1_R1]\<3:0\> is not used and must
+ be zero. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_rodt_mask_s cn81xx; */
+ /* struct bdk_lmcx_rodt_mask_cn9 cn88xx; */
+ /* struct bdk_lmcx_rodt_mask_cn9 cn83xx; */
+};
+typedef union bdk_lmcx_rodt_mask bdk_lmcx_rodt_mask_t;
+
+static inline uint64_t BDK_LMCX_RODT_MASK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_RODT_MASK(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000268ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000268ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000268ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000268ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_RODT_MASK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_RODT_MASK(a) bdk_lmcx_rodt_mask_t
+#define bustype_BDK_LMCX_RODT_MASK(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_RODT_MASK(a) "LMCX_RODT_MASK"
+#define device_bar_BDK_LMCX_RODT_MASK(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_RODT_MASK(a) (a)
+#define arguments_BDK_LMCX_RODT_MASK(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_scramble_cfg0
+ *
+ * LMC Scramble Configuration 0 Register
+ */
+union bdk_lmcx_scramble_cfg0
+{
+ uint64_t u;
+ struct bdk_lmcx_scramble_cfg0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t key : 64; /**< [ 63: 0](R/W/H) Scramble key for data. Prior to enabling scrambling this key should be generated from a
+ cryptographically-secure random number generator such as RNM_RANDOM. */
+#else /* Word 0 - Little Endian */
+ uint64_t key : 64; /**< [ 63: 0](R/W/H) Scramble key for data. Prior to enabling scrambling this key should be generated from a
+ cryptographically-secure random number generator such as RNM_RANDOM. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_scramble_cfg0_s cn; */
+};
+typedef union bdk_lmcx_scramble_cfg0 bdk_lmcx_scramble_cfg0_t;
+
+static inline uint64_t BDK_LMCX_SCRAMBLE_CFG0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SCRAMBLE_CFG0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000320ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000320ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000320ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SCRAMBLE_CFG0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SCRAMBLE_CFG0(a) bdk_lmcx_scramble_cfg0_t
+#define bustype_BDK_LMCX_SCRAMBLE_CFG0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SCRAMBLE_CFG0(a) "LMCX_SCRAMBLE_CFG0"
+#define device_bar_BDK_LMCX_SCRAMBLE_CFG0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SCRAMBLE_CFG0(a) (a)
+#define arguments_BDK_LMCX_SCRAMBLE_CFG0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_scramble_cfg1
+ *
+ * LMC Scramble Configuration 1 Register
+ * These registers set the aliasing that uses the lowest, legal chip select(s).
+ */
+union bdk_lmcx_scramble_cfg1
+{
+ uint64_t u;
+ struct bdk_lmcx_scramble_cfg1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t key : 64; /**< [ 63: 0](R/W/H) Scramble key for addresses. Prior to enabling scrambling this key should be generated from
+ a cryptographically-secure random number generator such as RNM_RANDOM. */
+#else /* Word 0 - Little Endian */
+ uint64_t key : 64; /**< [ 63: 0](R/W/H) Scramble key for addresses. Prior to enabling scrambling this key should be generated from
+ a cryptographically-secure random number generator such as RNM_RANDOM. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_scramble_cfg1_s cn; */
+};
+typedef union bdk_lmcx_scramble_cfg1 bdk_lmcx_scramble_cfg1_t;
+
+static inline uint64_t BDK_LMCX_SCRAMBLE_CFG1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SCRAMBLE_CFG1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000328ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000328ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000328ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SCRAMBLE_CFG1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SCRAMBLE_CFG1(a) bdk_lmcx_scramble_cfg1_t
+#define bustype_BDK_LMCX_SCRAMBLE_CFG1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SCRAMBLE_CFG1(a) "LMCX_SCRAMBLE_CFG1"
+#define device_bar_BDK_LMCX_SCRAMBLE_CFG1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SCRAMBLE_CFG1(a) (a)
+#define arguments_BDK_LMCX_SCRAMBLE_CFG1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_scramble_cfg2
+ *
+ * LMC Scramble Configuration 2 Register
+ */
+union bdk_lmcx_scramble_cfg2
+{
+ uint64_t u;
+ struct bdk_lmcx_scramble_cfg2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t key : 64; /**< [ 63: 0](R/W/H) Scramble key for data. Prior to enabling scrambling this key should be generated from a
+ cryptographically-secure random number generator such as RNM_RANDOM. */
+#else /* Word 0 - Little Endian */
+ uint64_t key : 64; /**< [ 63: 0](R/W/H) Scramble key for data. Prior to enabling scrambling this key should be generated from a
+ cryptographically-secure random number generator such as RNM_RANDOM. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_scramble_cfg2_s cn; */
+};
+typedef union bdk_lmcx_scramble_cfg2 bdk_lmcx_scramble_cfg2_t;
+
+static inline uint64_t BDK_LMCX_SCRAMBLE_CFG2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SCRAMBLE_CFG2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000338ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000338ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000338ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SCRAMBLE_CFG2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SCRAMBLE_CFG2(a) bdk_lmcx_scramble_cfg2_t
+#define bustype_BDK_LMCX_SCRAMBLE_CFG2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SCRAMBLE_CFG2(a) "LMCX_SCRAMBLE_CFG2"
+#define device_bar_BDK_LMCX_SCRAMBLE_CFG2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SCRAMBLE_CFG2(a) (a)
+#define arguments_BDK_LMCX_SCRAMBLE_CFG2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_scrambled_fadr
+ *
+ * LMC Scrambled Failing (SEC/DED/NXM) Address Register
+ * LMC()_FADR captures the failing pre-scrambled address location (split into DIMM, bunk,
+ * bank, etc). If scrambling is off, LMC()_FADR also captures the failing physical location
+ * in the DRAM parts. LMC()_SCRAMBLED_FADR captures the actual failing address location in
+ * the physical DRAM parts, i.e.:
+ *
+ * * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical location in the
+ * DRAM parts (split into DIMM, bunk, bank, etc).
+ *
+ * * If scrambling is off, the pre-scramble and post-scramble addresses are the same, and so the
+ * contents of LMC()_SCRAMBLED_FADR match the contents of LMC()_FADR.
+ *
+ * This register only captures the first transaction with ECC errors. A DED error can over-write
+ * this register with its failing addresses if the first error was a SEC. If you write
+ * LMC()_CONFIG -\> SEC_ERR/DED_ERR, it clears the error bits and captures the next failing
+ * address. If [FDIMM] is 1, that means the error is in the higher DIMM.
+ */
+union bdk_lmcx_scrambled_fadr
+{
+ uint64_t u;
+ struct bdk_lmcx_scrambled_fadr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t fcid : 3; /**< [ 42: 40](RO/H) Reserved.
+ Internal:
+ Failing CID number. This field is only valid when interfacing with 3DS DRAMs
+ (i.e., when LMC()_EXT_CONFIG[DIMM0_CID] is nonzero). Returns a value of zero otherwise. */
+ uint64_t fill_order : 2; /**< [ 39: 38](RO/H) Fill order for failing transaction. */
+ uint64_t fdimm : 1; /**< [ 37: 37](RO/H) Failing DIMM number. */
+ uint64_t fbunk : 1; /**< [ 36: 36](RO/H) Failing rank number. */
+ uint64_t fbank : 4; /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
+ uint64_t frow : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
+ uint64_t fcol : 14; /**< [ 13: 0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 128b data that
+ had an ECC error, i.e., FCOL\<0\> is always 0. Can be used in conjunction with
+ LMC()_CONFIG[DED_ERR] to isolate the 64b chunk of data in error. */
+#else /* Word 0 - Little Endian */
+ uint64_t fcol : 14; /**< [ 13: 0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 128b data that
+ had an ECC error, i.e., FCOL\<0\> is always 0. Can be used in conjunction with
+ LMC()_CONFIG[DED_ERR] to isolate the 64b chunk of data in error. */
+ uint64_t frow : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
+ uint64_t fbank : 4; /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
+ uint64_t fbunk : 1; /**< [ 36: 36](RO/H) Failing rank number. */
+ uint64_t fdimm : 1; /**< [ 37: 37](RO/H) Failing DIMM number. */
+ uint64_t fill_order : 2; /**< [ 39: 38](RO/H) Fill order for failing transaction. */
+ uint64_t fcid : 3; /**< [ 42: 40](RO/H) Reserved.
+ Internal:
+ Failing CID number. This field is only valid when interfacing with 3DS DRAMs
+ (i.e., when LMC()_EXT_CONFIG[DIMM0_CID] is nonzero). Returns a value of zero otherwise. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_scrambled_fadr_s cn81xx; */
+ struct bdk_lmcx_scrambled_fadr_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t fcid : 3; /**< [ 42: 40](RO/H) Reserved.
+ Internal:
+ Failing CID number. This field is only valid when interfacing with 3DS DRAMs
+ (i.e., when either LMC()_EXT_CONFIG[DIMM0_CID] or LMC()_EXT_CONFIG[DIMM1_CID] is
+ nonzero). Returns a value of zero otherwise. */
+ uint64_t fill_order : 2; /**< [ 39: 38](RO/H) Fill order for failing transaction. */
+ uint64_t fdimm : 1; /**< [ 37: 37](RO/H) Failing DIMM number. */
+ uint64_t fbunk : 1; /**< [ 36: 36](RO/H) Failing rank number. */
+ uint64_t fbank : 4; /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
+ uint64_t frow : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
+ uint64_t fcol : 14; /**< [ 13: 0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 128b data that
+ had an ECC error, i.e., FCOL\<0\> is always 0. Can be used in conjunction with
+ LMC()_CONFIG[DED_ERR] to isolate the 64b chunk of data in error. */
+#else /* Word 0 - Little Endian */
+ uint64_t fcol : 14; /**< [ 13: 0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 128b data that
+ had an ECC error, i.e., FCOL\<0\> is always 0. Can be used in conjunction with
+ LMC()_CONFIG[DED_ERR] to isolate the 64b chunk of data in error. */
+ uint64_t frow : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
+ uint64_t fbank : 4; /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
+ uint64_t fbunk : 1; /**< [ 36: 36](RO/H) Failing rank number. */
+ uint64_t fdimm : 1; /**< [ 37: 37](RO/H) Failing DIMM number. */
+ uint64_t fill_order : 2; /**< [ 39: 38](RO/H) Fill order for failing transaction. */
+ uint64_t fcid : 3; /**< [ 42: 40](RO/H) Reserved.
+ Internal:
+ Failing CID number. This field is only valid when interfacing with 3DS DRAMs
+ (i.e., when either LMC()_EXT_CONFIG[DIMM0_CID] or LMC()_EXT_CONFIG[DIMM1_CID] is
+ nonzero). Returns a value of zero otherwise. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_lmcx_scrambled_fadr_cn88xx cn83xx; */
+};
+typedef union bdk_lmcx_scrambled_fadr bdk_lmcx_scrambled_fadr_t;
+
+static inline uint64_t BDK_LMCX_SCRAMBLED_FADR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SCRAMBLED_FADR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000330ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000330ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000330ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SCRAMBLED_FADR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SCRAMBLED_FADR(a) bdk_lmcx_scrambled_fadr_t
+#define bustype_BDK_LMCX_SCRAMBLED_FADR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SCRAMBLED_FADR(a) "LMCX_SCRAMBLED_FADR"
+#define device_bar_BDK_LMCX_SCRAMBLED_FADR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SCRAMBLED_FADR(a) (a)
+#define arguments_BDK_LMCX_SCRAMBLED_FADR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_seq_ctl
+ *
+ * LMC Sequence Control Register
+ * This register is used to initiate the various control sequences in the LMC.
+ */
+union bdk_lmcx_seq_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_seq_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t lmc_mode : 2; /**< [ 9: 8](RO/H) Readable internal state of LMC.
+ 0x0 = Init state. LMC is fresh out of reset. Only INIT or
+ LMC_SEQ_SEL_E::SREF_EXIT sequence can
+ take LMC out of this state to the normal state.
+ 0x1 = Normal state. LMC is in mission mode.
+ 0x2 = Self-refresh state. LMC and DRAMs are in Self-refresh mode. If software
+ initiated (by running SREF_ENTRY sequence), only LMC_SEQ_SEL_E::SREF_EXIT
+ sequence can take LMC out of this state to the normal state.
+ 0x3 = Power-down state. LMC and DRAMs are in Power-down mode. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t seq_complete : 1; /**< [ 5: 5](RO/H) Sequence complete. This bit is cleared when [INIT_START] is set to a one and
+ then is set to one when the sequence is completed. */
+ uint64_t seq_sel : 4; /**< [ 4: 1](R/W) Selects the sequence that LMC runs after a 0-\>1 transition on [INIT_START], as
+ enumerated by LMC_SEQ_SEL_E.
+
+ LMC writes the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (i.e. MR0-MR6) as part of some of
+ these sequences.
+ Refer to the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 descriptions for more
+ details. */
+ uint64_t init_start : 1; /**< [ 0: 0](WO) A 0-\>1 transition starts the DDR memory sequence that is selected by
+ LMC()_SEQ_CTL[SEQ_SEL].
+ This register is a one-shot and clears itself each time it is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t init_start : 1; /**< [ 0: 0](WO) A 0-\>1 transition starts the DDR memory sequence that is selected by
+ LMC()_SEQ_CTL[SEQ_SEL].
+ This register is a one-shot and clears itself each time it is set. */
+ uint64_t seq_sel : 4; /**< [ 4: 1](R/W) Selects the sequence that LMC runs after a 0-\>1 transition on [INIT_START], as
+ enumerated by LMC_SEQ_SEL_E.
+
+ LMC writes the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (i.e. MR0-MR6) as part of some of
+ these sequences.
+ Refer to the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 descriptions for more
+ details. */
+ uint64_t seq_complete : 1; /**< [ 5: 5](RO/H) Sequence complete. This bit is cleared when [INIT_START] is set to a one and
+ then is set to one when the sequence is completed. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t lmc_mode : 2; /**< [ 9: 8](RO/H) Readable internal state of LMC.
+ 0x0 = Init state. LMC is fresh out of reset. Only INIT or
+ LMC_SEQ_SEL_E::SREF_EXIT sequence can
+ take LMC out of this state to the normal state.
+ 0x1 = Normal state. LMC is in mission mode.
+ 0x2 = Self-refresh state. LMC and DRAMs are in Self-refresh mode. If software
+ initiated (by running SREF_ENTRY sequence), only LMC_SEQ_SEL_E::SREF_EXIT
+ sequence can take LMC out of this state to the normal state.
+ 0x3 = Power-down state. LMC and DRAMs are in Power-down mode. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_seq_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t seq_complete : 1; /**< [ 5: 5](RO/H) Sequence complete. This bit is cleared when [INIT_START] is set to a one and
+ then is set to one when the sequence is completed. */
+ uint64_t seq_sel : 4; /**< [ 4: 1](R/W) Selects the sequence that LMC runs after a 0-\>1 transition on [INIT_START], as
+ enumerated by LMC_SEQ_SEL_E.
+
+ LMC writes the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (i.e. MR0-MR6) as part of some of
+ these sequences.
+ Refer to the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 descriptions for more
+ details. */
+ uint64_t init_start : 1; /**< [ 0: 0](WO) A 0-\>1 transition starts the DDR memory sequence that is selected by
+ LMC()_SEQ_CTL[SEQ_SEL].
+ This register is a one-shot and clears itself each time it is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t init_start : 1; /**< [ 0: 0](WO) A 0-\>1 transition starts the DDR memory sequence that is selected by
+ LMC()_SEQ_CTL[SEQ_SEL].
+ This register is a one-shot and clears itself each time it is set. */
+ uint64_t seq_sel : 4; /**< [ 4: 1](R/W) Selects the sequence that LMC runs after a 0-\>1 transition on [INIT_START], as
+ enumerated by LMC_SEQ_SEL_E.
+
+ LMC writes the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 CSR field values
+ to the Mode registers in the DRAM parts (i.e. MR0-MR6) as part of some of
+ these sequences.
+ Refer to the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 descriptions for more
+ details. */
+ uint64_t seq_complete : 1; /**< [ 5: 5](RO/H) Sequence complete. This bit is cleared when [INIT_START] is set to a one and
+ then is set to one when the sequence is completed. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_lmcx_seq_ctl_s cn9; */
+};
+typedef union bdk_lmcx_seq_ctl bdk_lmcx_seq_ctl_t;
+
+static inline uint64_t BDK_LMCX_SEQ_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SEQ_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000048ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000048ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000048ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000048ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SEQ_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SEQ_CTL(a) bdk_lmcx_seq_ctl_t
+#define bustype_BDK_LMCX_SEQ_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SEQ_CTL(a) "LMCX_SEQ_CTL"
+#define device_bar_BDK_LMCX_SEQ_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SEQ_CTL(a) (a)
+#define arguments_BDK_LMCX_SEQ_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_slot_ctl0
+ *
+ * LMC Slot Control0 Register
+ * This register is an assortment of control fields needed by the memory controller. If software
+ * has not previously written to this register (since the last DRESET), hardware updates the
+ * fields in this register to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
+ * LMC()_WLEVEL_RANK(), LMC()_CONTROL, and LMC()_MODEREG_PARAMS0 registers
+ * change. Ideally, only read this register after LMC has been initialized and
+ * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
+ *
+ * The interpretation of the fields in this register depends on LMC()_CONTROL[DDR2T]:
+ *
+ * * If LMC()_CONTROL[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles between when
+ * the DRAM part registers CAS commands of the first and second types from different cache
+ * blocks.
+ *
+ * If LMC()_CONTROL[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles between when the DRAM
+ * part registers CAS commands of the first and second types from different cache blocks.
+ * FieldValue = 0 is always illegal in this case.
+ * The hardware-calculated minimums for these fields are shown in LMC()_SLOT_CTL0 Hardware-
+ * Calculated Minimums.
+ */
+union bdk_lmcx_slot_ctl0
+{
+ uint64_t u;
+ struct bdk_lmcx_slot_ctl0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t w2r_l_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extension to the [W2R_L_INIT] register. */
+ uint64_t w2r_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_INIT] register. */
+ uint64_t w2w_l_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_l_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2w_l_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2r_l_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2w_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM. */
+ uint64_t w2r_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2w_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2r_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2w_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM. */
+ uint64_t w2r_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t w2w_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM. */
+ uint64_t r2r_l_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2w_l_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_l_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2w_l_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_INIT] register. */
+ uint64_t w2r_l_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extension to the [W2R_L_INIT] register. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_slot_ctl0_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t w2r_l_init_ext : 1; /**< [ 49: 49](RO) Reserved. */
+ uint64_t w2r_init_ext : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t w2w_l_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_l_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2w_l_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2r_l_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2w_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM. */
+ uint64_t w2r_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2w_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2r_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2w_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM. */
+ uint64_t w2r_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t w2w_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM. */
+ uint64_t r2r_l_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2w_l_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_l_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2w_l_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_init_ext : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t w2r_l_init_ext : 1; /**< [ 49: 49](RO) Reserved. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_lmcx_slot_ctl0_s cn9; */
+ /* struct bdk_lmcx_slot_ctl0_s cn81xx; */
+ struct bdk_lmcx_slot_ctl0_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t w2r_l_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extenstion to the [W2R_L_INIT] register. */
+ uint64_t w2r_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_INIT] register. */
+ uint64_t w2w_l_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_l_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2w_l_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2r_l_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2w_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM. */
+ uint64_t w2r_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2w_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2r_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2w_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM. */
+ uint64_t w2r_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t w2w_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM. */
+ uint64_t r2r_l_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2w_l_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_l_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2w_l_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_INIT] register. */
+ uint64_t w2r_l_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extenstion to the [W2R_L_INIT] register. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_lmcx_slot_ctl0_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t w2r_l_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extenstion to the W2R_L_INIT register. */
+ uint64_t w2r_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the W2R_INIT register. */
+ uint64_t w2w_l_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_l_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2w_l_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2r_l_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2w_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM. */
+ uint64_t w2r_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2w_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2r_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t r2w_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM. */
+ uint64_t w2r_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM. */
+ uint64_t w2w_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM. */
+ uint64_t r2r_l_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t r2w_l_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_l_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2w_l_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to the same rank and DIMM, and same BG for DDR4. */
+ uint64_t w2r_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the W2R_INIT register. */
+ uint64_t w2r_l_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extenstion to the W2R_L_INIT register. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_lmcx_slot_ctl0 bdk_lmcx_slot_ctl0_t;
+
+static inline uint64_t BDK_LMCX_SLOT_CTL0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SLOT_CTL0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001f8ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001f8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001f8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001f8ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SLOT_CTL0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SLOT_CTL0(a) bdk_lmcx_slot_ctl0_t
+#define bustype_BDK_LMCX_SLOT_CTL0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SLOT_CTL0(a) "LMCX_SLOT_CTL0"
+#define device_bar_BDK_LMCX_SLOT_CTL0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SLOT_CTL0(a) (a)
+#define arguments_BDK_LMCX_SLOT_CTL0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_slot_ctl1
+ *
+ * LMC Slot Control1 Register
+ * This register is an assortment of control fields needed by the memory controller. If software
+ * has not previously written to this register (since the last DRESET), hardware updates the
+ * fields in this register to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
+ * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
+ * Ideally, only read this register after LMC has been initialized and
+ * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on LMC()_CONTROL[DDR2T]:
+ *
+ * * If LMC()_CONTROL[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles between when the
+ * DRAM part registers CAS commands of the first and second types from different cache blocks.
+ *
+ * * If LMC()_CONTROL[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles between when the DRAM
+ * part registers CAS commands of the first and second types from different cache blocks.
+ * FieldValue = 0 is always illegal in this case.
+ *
+ * The hardware calculated minimums for these fields are shown in LMC()_SLOT_CTL1 hardware
+ * calculated minimums.
+ */
+union bdk_lmcx_slot_ctl1
+{
+ uint64_t u;
+ struct bdk_lmcx_slot_ctl1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t w2w_xrank_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses across ranks of the same DIMM. */
+ uint64_t w2r_xrank_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ across ranks of the same DIMM. */
+ uint64_t r2w_xrank_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ across ranks of the same DIMM. */
+ uint64_t r2r_xrank_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ across ranks of the same DIMM. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_xrank_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ across ranks of the same DIMM. */
+ uint64_t r2w_xrank_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ across ranks of the same DIMM. */
+ uint64_t w2r_xrank_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ across ranks of the same DIMM. */
+ uint64_t w2w_xrank_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses across ranks of the same DIMM. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_slot_ctl1_s cn; */
+};
+typedef union bdk_lmcx_slot_ctl1 bdk_lmcx_slot_ctl1_t;
+
+static inline uint64_t BDK_LMCX_SLOT_CTL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SLOT_CTL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000200ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000200ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000200ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000200ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SLOT_CTL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SLOT_CTL1(a) bdk_lmcx_slot_ctl1_t
+#define bustype_BDK_LMCX_SLOT_CTL1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SLOT_CTL1(a) "LMCX_SLOT_CTL1"
+#define device_bar_BDK_LMCX_SLOT_CTL1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SLOT_CTL1(a) (a)
+#define arguments_BDK_LMCX_SLOT_CTL1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_slot_ctl2
+ *
+ * LMC Slot Control2 Register
+ * This register is an assortment of control fields needed by the memory controller. If software
+ * has not previously written to this register (since the last DRESET), hardware updates the
+ * fields in this register to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
+ * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
+ * Ideally, only read this register after LMC has been initialized and
+ * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on LMC()_CONTROL[DDR2T]:
+ *
+ * * If LMC()_CONTROL[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles between when the
+ * DRAM part registers CAS commands of the first and second types from different cache blocks.
+ *
+ * * If LMC()_CONTROL[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles between when the
+ * DRAM part registers CAS commands of the first and second types from different cache blocks.
+ * FieldValue = 0 is always illegal in this case.
+ *
+ * The hardware-calculated minimums for these fields are shown in LMC Registers.
+ */
+union bdk_lmcx_slot_ctl2
+{
+ uint64_t u;
+ struct bdk_lmcx_slot_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t w2w_xdimm_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses across DIMMs. */
+ uint64_t w2r_xdimm_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ across DIMMs. */
+ uint64_t r2w_xdimm_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ across DIMMs. */
+ uint64_t r2r_xdimm_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ across DIMMs. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_xdimm_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ across DIMMs. */
+ uint64_t r2w_xdimm_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ across DIMMs. */
+ uint64_t w2r_xdimm_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ across DIMMs. */
+ uint64_t w2w_xdimm_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses across DIMMs. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_slot_ctl2_s cn; */
+};
+typedef union bdk_lmcx_slot_ctl2 bdk_lmcx_slot_ctl2_t;
+
+static inline uint64_t BDK_LMCX_SLOT_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SLOT_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000208ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000208ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000208ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000208ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SLOT_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SLOT_CTL2(a) bdk_lmcx_slot_ctl2_t
+#define bustype_BDK_LMCX_SLOT_CTL2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SLOT_CTL2(a) "LMCX_SLOT_CTL2"
+#define device_bar_BDK_LMCX_SLOT_CTL2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SLOT_CTL2(a) (a)
+#define arguments_BDK_LMCX_SLOT_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_slot_ctl3
+ *
+ * LMC Slot Control3 Register
+ * This register is an assortment of control fields needed by the memory controller. If software
+ * has not previously written to this register (since the last DRESET), hardware updates the
+ * fields in this register to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
+ * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
+ * Ideally, only read this register after LMC has been initialized and
+ * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
+ *
+ * The interpretation of the fields in this CSR depends on LMC()_CONTROL[DDR2T]:
+ *
+ * * If LMC()_CONTROL[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles between when the
+ * DRAM part registers CAS commands of the first and second types from different cache blocks.
+ *
+ * * If LMC()_CONTROL[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles between when the
+ * DRAM part registers CAS commands of the first and second types from different cache blocks.
+ * FieldValue = 0 is always illegal in this case.
+ *
+ * The hardware-calculated minimums for these fields are shown in LMC Registers.
+ */
+union bdk_lmcx_slot_ctl3
+{
+ uint64_t u;
+ struct bdk_lmcx_slot_ctl3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t w2r_l_xrank_init_ext : 1; /**< [ 49: 49](RO) Reserved. */
+ uint64_t w2r_xrank_init_ext : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t w2w_l_xrank_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_l_xrank_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2w_l_xrank_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2r_l_xrank_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2w_xrank_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank. */
+ uint64_t w2r_xrank_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t r2w_xrank_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank. */
+ uint64_t r2r_xrank_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_xrank_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t r2w_xrank_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank. */
+ uint64_t w2r_xrank_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t w2w_xrank_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank. */
+ uint64_t r2r_l_xrank_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2w_l_xrank_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_l_xrank_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2w_l_xrank_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_xrank_init_ext : 1; /**< [ 48: 48](RO) Reserved. */
+ uint64_t w2r_l_xrank_init_ext : 1; /**< [ 49: 49](RO) Reserved. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_slot_ctl3_s cn88xxp1; */
+ struct bdk_lmcx_slot_ctl3_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t w2r_l_xrank_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extension to the [W2R_L_XRANK_INIT] register. */
+ uint64_t w2r_xrank_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_XRANK_INIT] register. */
+ uint64_t w2w_l_xrank_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_l_xrank_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2w_l_xrank_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2r_l_xrank_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2w_xrank_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank. */
+ uint64_t w2r_xrank_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t r2w_xrank_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank. */
+ uint64_t r2r_xrank_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_xrank_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t r2w_xrank_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank. */
+ uint64_t w2r_xrank_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t w2w_xrank_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank. */
+ uint64_t r2r_l_xrank_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2w_l_xrank_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_l_xrank_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2w_l_xrank_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_xrank_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_XRANK_INIT] register. */
+ uint64_t w2r_l_xrank_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extension to the [W2R_L_XRANK_INIT] register. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_slot_ctl3_cn9 cn81xx; */
+ /* struct bdk_lmcx_slot_ctl3_cn9 cn83xx; */
+ struct bdk_lmcx_slot_ctl3_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t w2r_l_xrank_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extension to the W2R_L_XRANK_INIT register. */
+ uint64_t w2r_xrank_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the W2R_XRANK_INIT register. */
+ uint64_t w2w_l_xrank_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_l_xrank_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2w_l_xrank_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2r_l_xrank_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2w_xrank_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank. */
+ uint64_t w2r_xrank_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t r2w_xrank_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank. */
+ uint64_t r2r_xrank_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank. */
+#else /* Word 0 - Little Endian */
+ uint64_t r2r_xrank_init : 6; /**< [ 5: 0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t r2w_xrank_init : 6; /**< [ 11: 6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank. */
+ uint64_t w2r_xrank_init : 6; /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank. */
+ uint64_t w2w_xrank_init : 6; /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank. */
+ uint64_t r2r_l_xrank_init : 6; /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t r2w_l_xrank_init : 6; /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_l_xrank_init : 6; /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
+ to a different logical rank, and same BG for DDR4. */
+ uint64_t w2w_l_xrank_init : 6; /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
+ accesses to a different logical rank, and same BG for DDR4. */
+ uint64_t w2r_xrank_init_ext : 1; /**< [ 48: 48](R/W/H) A 1-bit extension to the W2R_XRANK_INIT register. */
+ uint64_t w2r_l_xrank_init_ext : 1; /**< [ 49: 49](R/W/H) A 1-bit extension to the W2R_L_XRANK_INIT register. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_lmcx_slot_ctl3 bdk_lmcx_slot_ctl3_t;
+
+static inline uint64_t BDK_LMCX_SLOT_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_SLOT_CTL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000248ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000248ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000248ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000248ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_SLOT_CTL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_SLOT_CTL3(a) bdk_lmcx_slot_ctl3_t
+#define bustype_BDK_LMCX_SLOT_CTL3(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_SLOT_CTL3(a) "LMCX_SLOT_CTL3"
+#define device_bar_BDK_LMCX_SLOT_CTL3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_SLOT_CTL3(a) (a)
+#define arguments_BDK_LMCX_SLOT_CTL3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_timing_params0
+ *
+ * LMC Timing Parameters Register 0
+ */
+union bdk_lmcx_timing_params0
+{
+ uint64_t u;
+ struct bdk_lmcx_timing_params0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t tckesr : 4; /**< [ 60: 57](R/W) Indicates TCKESR constraints. Set this field as follows:
+ _ RNDUP[TCKESR(ns) / TCYC(ns)] - 1
+
+ where TCKESR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t tzqoper : 3; /**< [ 56: 54](R/W) Indicates tZQoper constraints. Set this field as follows:
+
+ _ RNDUP[tZQoper(nCK) / 128)]
+
+ where tZQoper is from the JEDEC DDR4 spec.
+
+ TYP = 4. */
+ uint64_t tbcw : 6; /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
+ _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
+
+ where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate).
+
+ TYP = 16. */
+ uint64_t tcksre : 4; /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
+ _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
+
+ where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, 10 ns). */
+ uint64_t trp : 5; /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
+
+ _ RNDUP[TRP(ns) / TCYC(ns)] - 1
+
+ where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency
+ (not data rate).
+
+ TYP TRP = 10-15 ns.
+
+ TYP TRTP = max(4nCK, 7.5 ns). */
+ uint64_t tzqinit : 4; /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
+
+ _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
+
+ where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 2 (equivalent to 512). */
+ uint64_t tdllk : 4; /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
+
+ _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
+
+ where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 3 (equivalent to 768).
+
+ This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
+ uint64_t tmod : 5; /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
+
+ _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
+
+ where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(24nCK, 15 ns). */
+ uint64_t tmrd : 4; /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
+
+ _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
+
+ where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 8nCK. */
+ uint64_t txpr : 6; /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
+
+ _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
+
+ where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, TRFC+10 ns). */
+ uint64_t tcke : 4; /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
+
+ _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
+
+ where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
+
+ Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
+ but does not delay the rising edge), care must be taken to set this parameter larger
+ to account for this effective reduction in the pulse width. */
+ uint64_t tzqcs : 4; /**< [ 11: 8](R/W) Indicates TZQCS constraints. This field is set as follows:
+
+ _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
+
+ where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t tzqcs : 4; /**< [ 11: 8](R/W) Indicates TZQCS constraints. This field is set as follows:
+
+ _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
+
+ where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
+ uint64_t tcke : 4; /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
+
+ _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
+
+ where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
+
+ Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
+ but does not delay the rising edge), care must be taken to set this parameter larger
+ to account for this effective reduction in the pulse width. */
+ uint64_t txpr : 6; /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
+
+ _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
+
+ where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, TRFC+10 ns). */
+ uint64_t tmrd : 4; /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
+
+ _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
+
+ where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 8nCK. */
+ uint64_t tmod : 5; /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
+
+ _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
+
+ where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(24nCK, 15 ns). */
+ uint64_t tdllk : 4; /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
+
+ _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
+
+ where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 3 (equivalent to 768).
+
+ This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
+ uint64_t tzqinit : 4; /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
+
+ _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
+
+ where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 2 (equivalent to 512). */
+ uint64_t trp : 5; /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
+
+ _ RNDUP[TRP(ns) / TCYC(ns)] - 1
+
+ where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency
+ (not data rate).
+
+ TYP TRP = 10-15 ns.
+
+ TYP TRTP = max(4nCK, 7.5 ns). */
+ uint64_t tcksre : 4; /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
+ _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
+
+ where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, 10 ns). */
+ uint64_t tbcw : 6; /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
+ _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
+
+ where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate).
+
+ TYP = 16. */
+ uint64_t tzqoper : 3; /**< [ 56: 54](R/W) Indicates tZQoper constraints. Set this field as follows:
+
+ _ RNDUP[tZQoper(nCK) / 128)]
+
+ where tZQoper is from the JEDEC DDR4 spec.
+
+ TYP = 4. */
+ uint64_t tckesr : 4; /**< [ 60: 57](R/W) Indicates TCKESR constraints. Set this field as follows:
+ _ RNDUP[TCKESR(ns) / TCYC(ns)] - 1
+
+ where TCKESR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_timing_params0_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t tbcw : 5; /**< [ 52: 48](R/W) Indicates tBCW constraints. Set this field as follows:
+ _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
+
+ where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate).
+
+ TYP = 16. */
+ uint64_t tcksre : 4; /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
+ _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
+
+ where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, 10 ns). */
+ uint64_t trp : 5; /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
+
+ _ RNDUP[TRP(ns) / TCYC(ns)] - 1
+
+ where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency
+ (not data rate).
+
+ TYP TRP = 10-15 ns.
+
+ TYP TRTP = max(4nCK, 7.5 ns). */
+ uint64_t tzqinit : 4; /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
+
+ _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
+
+ where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 2 (equivalent to 512). */
+ uint64_t tdllk : 4; /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
+
+ _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
+
+ where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 2 (equivalent to 512).
+
+ This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
+ uint64_t tmod : 5; /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
+
+ _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
+
+ where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(12nCK, 15 ns). */
+ uint64_t tmrd : 4; /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
+
+ _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
+
+ where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 4nCK. */
+ uint64_t txpr : 6; /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
+
+ _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
+
+ where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, TRFC+10 ns). */
+ uint64_t tcke : 4; /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
+
+ _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
+
+ where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
+
+ Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
+ but does not delay the rising edge), care must be taken to set this parameter larger
+ to account for this effective reduction in the pulse width. */
+ uint64_t tzqcs : 4; /**< [ 11: 8](R/W) Indicates TZQCS constraints. This field is set as follows:
+
+ _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
+
+ where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t tzqcs : 4; /**< [ 11: 8](R/W) Indicates TZQCS constraints. This field is set as follows:
+
+ _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
+
+ where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
+ uint64_t tcke : 4; /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
+
+ _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
+
+ where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
+
+ Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
+ but does not delay the rising edge), care must be taken to set this parameter larger
+ to account for this effective reduction in the pulse width. */
+ uint64_t txpr : 6; /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
+
+ _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
+
+ where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, TRFC+10 ns). */
+ uint64_t tmrd : 4; /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
+
+ _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
+
+ where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 4nCK. */
+ uint64_t tmod : 5; /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
+
+ _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
+
+ where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(12nCK, 15 ns). */
+ uint64_t tdllk : 4; /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
+
+ _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
+
+ where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 2 (equivalent to 512).
+
+ This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
+ uint64_t tzqinit : 4; /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
+
+ _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
+
+ where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 2 (equivalent to 512). */
+ uint64_t trp : 5; /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
+
+ _ RNDUP[TRP(ns) / TCYC(ns)] - 1
+
+ where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency
+ (not data rate).
+
+ TYP TRP = 10-15 ns.
+
+ TYP TRTP = max(4nCK, 7.5 ns). */
+ uint64_t tcksre : 4; /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
+ _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
+
+ where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, 10 ns). */
+ uint64_t tbcw : 5; /**< [ 52: 48](R/W) Indicates tBCW constraints. Set this field as follows:
+ _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
+
+ where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate).
+
+ TYP = 16. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_timing_params0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t tckesr : 4; /**< [ 60: 57](R/W) Indicates TCKESR constraints. Set this field as follows:
+ _ RNDUP[TCKESR(ns) / TCYC(ns)] - 1
+
+ where TCKESR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t tzqoper : 3; /**< [ 56: 54](R/W) Indicates tZQoper constraints. Set this field as follows:
+
+ _ RNDUP[tZQoper(nCK) / 128)]
+
+ where tZQoper is from the JEDEC DDR4 spec.
+
+ TYP = 4. */
+ uint64_t tbcw : 6; /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
+ _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
+
+ where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate).
+
+ TYP = 16. */
+ uint64_t tcksre : 4; /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
+ _ RNDUP[(max(TCKSRE(ns), TCKOFF(ns)) / TCYC(ns)] - 1
+
+ where TCKSRE is from the JEDEC DDR4 spec, TCKOFF is from RCD spec and TCYC(ns) is the DDR
+ clock frequency (not data rate). */
+ uint64_t trp : 5; /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
+
+ _ RNDUP[TRP(ns) / TCYC(ns)] - 1
+
+ where TRP is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP TRP = 12.5-15 ns. */
+ uint64_t tzqinit : 4; /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
+
+ _ RNDUP[TZQINIT(nCK) / 256]
+
+ where TZQINIT is from the JEDEC DDR4 spec.
+ TYP = 4 (equivalent to 1024 cycles). */
+ uint64_t tdllk : 4; /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
+
+ _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
+
+ where TDLLK is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 3 (equivalent to 768).
+
+ This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
+ uint64_t tmod : 5; /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
+
+ _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
+
+ where TMOD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(24nCK, 15 ns). */
+ uint64_t tmrd : 4; /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
+
+ _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
+
+ where TMRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 8nCK. */
+ uint64_t txpr : 6; /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
+
+ _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
+
+ where TXPR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, TRFC+10 ns). */
+ uint64_t tcke : 4; /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
+
+ _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
+
+ where TCKE is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
+
+ Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
+ but does not delay the rising edge), care must be taken to set this parameter larger
+ to account for this effective reduction in the pulse width. */
+ uint64_t tzqcs : 4; /**< [ 11: 8](R/W) Indicates TZQCS constraints. This field is set as follows:
+
+ _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
+
+ where TZQCS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t tzqcs : 4; /**< [ 11: 8](R/W) Indicates TZQCS constraints. This field is set as follows:
+
+ _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
+
+ where TZQCS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
+ uint64_t tcke : 4; /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
+
+ _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
+
+ where TCKE is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
+
+ Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
+ but does not delay the rising edge), care must be taken to set this parameter larger
+ to account for this effective reduction in the pulse width. */
+ uint64_t txpr : 6; /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
+
+ _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
+
+ where TXPR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, TRFC+10 ns). */
+ uint64_t tmrd : 4; /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
+
+ _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
+
+ where TMRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 8nCK. */
+ uint64_t tmod : 5; /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
+
+ _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
+
+ where TMOD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(24nCK, 15 ns). */
+ uint64_t tdllk : 4; /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
+
+ _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
+
+ where TDLLK is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 3 (equivalent to 768).
+
+ This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
+ uint64_t tzqinit : 4; /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
+
+ _ RNDUP[TZQINIT(nCK) / 256]
+
+ where TZQINIT is from the JEDEC DDR4 spec.
+ TYP = 4 (equivalent to 1024 cycles). */
+ uint64_t trp : 5; /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
+
+ _ RNDUP[TRP(ns) / TCYC(ns)] - 1
+
+ where TRP is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP TRP = 12.5-15 ns. */
+ uint64_t tcksre : 4; /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
+ _ RNDUP[(max(TCKSRE(ns), TCKOFF(ns)) / TCYC(ns)] - 1
+
+ where TCKSRE is from the JEDEC DDR4 spec, TCKOFF is from RCD spec and TCYC(ns) is the DDR
+ clock frequency (not data rate). */
+ uint64_t tbcw : 6; /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
+ _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
+
+ where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate).
+
+ TYP = 16. */
+ uint64_t tzqoper : 3; /**< [ 56: 54](R/W) Indicates tZQoper constraints. Set this field as follows:
+
+ _ RNDUP[tZQoper(nCK) / 128)]
+
+ where tZQoper is from the JEDEC DDR4 spec.
+
+ TYP = 4. */
+ uint64_t tckesr : 4; /**< [ 60: 57](R/W) Indicates TCKESR constraints. Set this field as follows:
+ _ RNDUP[TCKESR(ns) / TCYC(ns)] - 1
+
+ where TCKESR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_timing_params0_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t tbcw : 6; /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
+ _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
+
+ where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate).
+
+ TYP = 16. */
+ uint64_t tcksre : 4; /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
+ _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
+
+ where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, 10 ns). */
+ uint64_t trp : 5; /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
+
+ _ RNDUP[TRP(ns) / TCYC(ns)] - 1
+
+ where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency
+ (not data rate).
+
+ TYP TRP = 10-15 ns.
+
+ TYP TRTP = max(4nCK, 7.5 ns). */
+ uint64_t tzqinit : 4; /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
+
+ _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
+
+ where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 2 (equivalent to 512). */
+ uint64_t tdllk : 4; /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
+
+ _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
+
+ where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 3 (equivalent to 768).
+
+ This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
+ uint64_t tmod : 5; /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
+
+ _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
+
+ where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(24nCK, 15 ns). */
+ uint64_t tmrd : 4; /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
+
+ _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
+
+ where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 8nCK. */
+ uint64_t txpr : 6; /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
+
+ _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
+
+ where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, TRFC+10 ns). */
+ uint64_t tcke : 4; /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
+
+ _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
+
+ where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
+
+ Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
+ but does not delay the rising edge), care must be taken to set this parameter larger
+ to account for this effective reduction in the pulse width. */
+ uint64_t tzqcs : 4; /**< [ 11: 8](R/W) Indicates TZQCS constraints. This field is set as follows:
+
+ _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
+
+ where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t tzqcs : 4; /**< [ 11: 8](R/W) Indicates TZQCS constraints. This field is set as follows:
+
+ _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
+
+ where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
+ uint64_t tcke : 4; /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
+
+ _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
+
+ where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
+
+ Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
+ but does not delay the rising edge), care must be taken to set this parameter larger
+ to account for this effective reduction in the pulse width. */
+ uint64_t txpr : 6; /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
+
+ _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
+
+ where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, TRFC+10 ns). */
+ uint64_t tmrd : 4; /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
+
+ _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
+
+ where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 8nCK. */
+ uint64_t tmod : 5; /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
+
+ _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
+
+ where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(24nCK, 15 ns). */
+ uint64_t tdllk : 4; /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
+
+ _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
+
+ where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 3 (equivalent to 768).
+
+ This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
+ uint64_t tzqinit : 4; /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
+
+ _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
+
+ where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 2 (equivalent to 512). */
+ uint64_t trp : 5; /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
+
+ _ RNDUP[TRP(ns) / TCYC(ns)] - 1
+
+ where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency
+ (not data rate).
+
+ TYP TRP = 10-15 ns.
+
+ TYP TRTP = max(4nCK, 7.5 ns). */
+ uint64_t tcksre : 4; /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
+ _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
+
+ where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(5nCK, 10 ns). */
+ uint64_t tbcw : 6; /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
+ _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
+
+ where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
+ data rate).
+
+ TYP = 16. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_timing_params0_cn81xx cn83xx; */
+ /* struct bdk_lmcx_timing_params0_cn81xx cn88xxp2; */
+};
+typedef union bdk_lmcx_timing_params0 bdk_lmcx_timing_params0_t;
+
+static inline uint64_t BDK_LMCX_TIMING_PARAMS0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_TIMING_PARAMS0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000198ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000198ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000198ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000198ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_TIMING_PARAMS0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_TIMING_PARAMS0(a) bdk_lmcx_timing_params0_t
+#define bustype_BDK_LMCX_TIMING_PARAMS0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_TIMING_PARAMS0(a) "LMCX_TIMING_PARAMS0"
+#define device_bar_BDK_LMCX_TIMING_PARAMS0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_TIMING_PARAMS0(a) (a)
+#define arguments_BDK_LMCX_TIMING_PARAMS0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_timing_params1
+ *
+ * LMC Timing Parameters Register 1
+ */
+union bdk_lmcx_timing_params1
+{
+ uint64_t u;
+ struct bdk_lmcx_timing_params1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tstab : 5; /**< [ 63: 59](R/W) Indicates tSTAB constraints. Set this field as follows:
+
+ _ RNDUP[tSTAB(ns) / (512 * TCYC(ns))]
+
+ where tSTAB is from the JEDEC DDR4 RCD spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t txp_ext : 1; /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
+ above. */
+ uint64_t trcd_ext : 1; /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
+ uint64_t tpdm_full_cycle_ena : 1; /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
+ write/read latency calculation. This is to compensate the case when
+ tPDM delay in the RCD of an RDIMM is greater than one-cycle.
+ Only valid in RDIMM (LMC()_CONTROL[RDIMM_ENA]=1). */
+ uint64_t trfc_dlr : 7; /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
+
+ _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+
+ where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-120 ns.
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t txpdll : 5; /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
+
+ _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
+
+ where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(10nCK, 24 ns) */
+ uint64_t tfaw : 5; /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
+
+ _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
+
+ where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 30-40 ns
+
+ Internal:
+ When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
+
+ _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
+
+ where tFAW_SLR is the Four activate window to the same logical rank from the
+ JEDEC DDR4 3D Stacked spec. */
+ uint64_t twldqsen : 4; /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
+
+ _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
+
+ where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(25nCK) */
+ uint64_t twlmrd : 4; /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
+
+ _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
+
+ where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(40nCK) */
+ uint64_t txp : 3; /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
+
+ _ RNDUP[TXP(ns) / TCYC(ns)] - 1
+
+ where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(3nCK, 7.5 ns) */
+ uint64_t trrd : 3; /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
+
+ _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
+
+ where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 10 ns)
+
+ 0x0 = Reserved.
+ 0x1 = 3 TCYC.
+ ...
+ 0x6 = 8 TCYC.
+ 0x7 = 9 TCYC.
+
+ For DDR4, this is the tRRD_S parameter. */
+ uint64_t trfc : 7; /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
+
+ _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
+
+ where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-350 ns
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t twtr : 4; /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
+
+ _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
+
+ where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 7.5 ns)
+
+ For DDR4, this CSR field represents tWTR_S.
+
+ 0x0 = reserved.
+ 0x1 = 2.
+ ...
+ 0x7 = 8.
+ 0x8-0xF = reserved. */
+ uint64_t trcd : 4; /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
+
+ _ RNDUP[TRCD(ns) / TCYC(ns)]
+
+ where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 10-15 ns
+
+ 0x0 = reserved.
+ 0x1 = 2 (2 is the smallest value allowed).
+ 0x2 = 2.
+ ...
+ 0xE = 14.
+ 0xA-0xF = reserved.
+
+ In 2T mode, make this register TRCD - 1, not going below 2. */
+ uint64_t tras : 6; /**< [ 9: 4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
+
+ _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
+
+ where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP = 35ns - 9 * TREFI
+
+ 0x0 = reserved.
+ 0x1 = 2 TCYC.
+ 0x2 = 3 TCYC.
+ ...
+ 0x3F = 64 TCYC. */
+ uint64_t tmprr : 4; /**< [ 3: 0](R/W) Indicates TMPRR constraints. Set this field as follows:
+
+ _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
+
+ where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 1 nCK */
+#else /* Word 0 - Little Endian */
+ uint64_t tmprr : 4; /**< [ 3: 0](R/W) Indicates TMPRR constraints. Set this field as follows:
+
+ _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
+
+ where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 1 nCK */
+ uint64_t tras : 6; /**< [ 9: 4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
+
+ _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
+
+ where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP = 35ns - 9 * TREFI
+
+ 0x0 = reserved.
+ 0x1 = 2 TCYC.
+ 0x2 = 3 TCYC.
+ ...
+ 0x3F = 64 TCYC. */
+ uint64_t trcd : 4; /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
+
+ _ RNDUP[TRCD(ns) / TCYC(ns)]
+
+ where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 10-15 ns
+
+ 0x0 = reserved.
+ 0x1 = 2 (2 is the smallest value allowed).
+ 0x2 = 2.
+ ...
+ 0xE = 14.
+ 0xA-0xF = reserved.
+
+ In 2T mode, make this register TRCD - 1, not going below 2. */
+ uint64_t twtr : 4; /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
+
+ _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
+
+ where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 7.5 ns)
+
+ For DDR4, this CSR field represents tWTR_S.
+
+ 0x0 = reserved.
+ 0x1 = 2.
+ ...
+ 0x7 = 8.
+ 0x8-0xF = reserved. */
+ uint64_t trfc : 7; /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
+
+ _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
+
+ where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-350 ns
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t trrd : 3; /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
+
+ _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
+
+ where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 10 ns)
+
+ 0x0 = Reserved.
+ 0x1 = 3 TCYC.
+ ...
+ 0x6 = 8 TCYC.
+ 0x7 = 9 TCYC.
+
+ For DDR4, this is the tRRD_S parameter. */
+ uint64_t txp : 3; /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
+
+ _ RNDUP[TXP(ns) / TCYC(ns)] - 1
+
+ where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(3nCK, 7.5 ns) */
+ uint64_t twlmrd : 4; /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
+
+ _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
+
+ where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(40nCK) */
+ uint64_t twldqsen : 4; /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
+
+ _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
+
+ where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(25nCK) */
+ uint64_t tfaw : 5; /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
+
+ _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
+
+ where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 30-40 ns
+
+ Internal:
+ When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
+
+ _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
+
+ where tFAW_SLR is the Four activate window to the same logical rank from the
+ JEDEC DDR4 3D Stacked spec. */
+ uint64_t txpdll : 5; /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
+
+ _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
+
+ where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(10nCK, 24 ns) */
+ uint64_t trfc_dlr : 7; /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
+
+ _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+
+ where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-120 ns.
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t tpdm_full_cycle_ena : 1; /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
+ write/read latency calculation. This is to compensate the case when
+ tPDM delay in the RCD of an RDIMM is greater than one-cycle.
+ Only valid in RDIMM (LMC()_CONTROL[RDIMM_ENA]=1). */
+ uint64_t trcd_ext : 1; /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
+ uint64_t txp_ext : 1; /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
+ above. */
+ uint64_t tstab : 5; /**< [ 63: 59](R/W) Indicates tSTAB constraints. Set this field as follows:
+
+ _ RNDUP[tSTAB(ns) / (512 * TCYC(ns))]
+
+ where tSTAB is from the JEDEC DDR4 RCD spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_timing_params1_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t txp_ext : 1; /**< [ 58: 58](RO) Reserved. */
+ uint64_t trcd_ext : 1; /**< [ 57: 57](RO) Reserved. */
+ uint64_t tpdm_full_cycle_ena : 1; /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
+ write/read latency calculation. This is to compensate the case when
+ tPDM delay in the RCD of an RDIMM is greater than one-cycle.
+ Only valid in RDIMM (LMC()_CONTROL[RDIMM_ENA]=1). */
+ uint64_t trfc_dlr : 7; /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
+
+ _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+
+ where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-120 ns.
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t txpdll : 5; /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
+
+ _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
+
+ where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(10nCK, 24 ns) */
+ uint64_t tfaw : 5; /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
+
+ _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
+
+ where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 30-40 ns
+
+ Internal:
+ When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
+
+ _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
+
+ where tFAW_SLR is the Four activate window to the same logical rank from the
+ JEDEC DDR4 3D Stacked spec. */
+ uint64_t twldqsen : 4; /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
+
+ _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
+
+ where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(25nCK) */
+ uint64_t twlmrd : 4; /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
+
+ _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
+
+ where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(40nCK) */
+ uint64_t txp : 3; /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
+
+ _ RNDUP[TXP(ns) / TCYC(ns)] - 1
+
+ where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(3nCK, 7.5 ns) */
+ uint64_t trrd : 3; /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
+
+ _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
+
+ where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 10 ns)
+
+ 0x0 = Reserved.
+ 0x1 = 3 TCYC.
+ ...
+ 0x6 = 8 TCYC.
+ 0x7 = 9 TCYC.
+
+ For DDR4, this is the tRRD_S parameter. */
+ uint64_t trfc : 7; /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
+
+ _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
+
+ where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-350 ns
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t twtr : 4; /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
+
+ _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
+
+ where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 7.5 ns)
+
+ For DDR4, this CSR field represents tWTR_S.
+
+ 0x0 = reserved.
+ 0x1 = 2.
+ ...
+ 0x7 = 8.
+ 0x8-0xF = reserved. */
+ uint64_t trcd : 4; /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
+
+ _ RNDUP[TRCD(ns) / TCYC(ns)]
+
+ where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 10-15 ns
+
+ 0x0 = reserved.
+ 0x1 = 2 (2 is the smallest value allowed).
+ 0x2 = 2.
+ ...
+ 0xE = 14.
+ 0xA-0xF = reserved.
+
+ In 2T mode, make this register TRCD - 1, not going below 2. */
+ uint64_t tras : 6; /**< [ 9: 4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
+
+ _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
+
+ where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP = 35ns - 9 * TREFI
+
+ 0x0 = reserved.
+ 0x1 = 2 TCYC.
+ 0x2 = 3 TCYC.
+ ...
+ 0x3F = 64 TCYC. */
+ uint64_t tmprr : 4; /**< [ 3: 0](R/W) Indicates TMPRR constraints. Set this field as follows:
+
+ _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
+
+ where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 1 nCK */
+#else /* Word 0 - Little Endian */
+ uint64_t tmprr : 4; /**< [ 3: 0](R/W) Indicates TMPRR constraints. Set this field as follows:
+
+ _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
+
+ where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 1 nCK */
+ uint64_t tras : 6; /**< [ 9: 4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
+
+ _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
+
+ where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP = 35ns - 9 * TREFI
+
+ 0x0 = reserved.
+ 0x1 = 2 TCYC.
+ 0x2 = 3 TCYC.
+ ...
+ 0x3F = 64 TCYC. */
+ uint64_t trcd : 4; /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
+
+ _ RNDUP[TRCD(ns) / TCYC(ns)]
+
+ where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 10-15 ns
+
+ 0x0 = reserved.
+ 0x1 = 2 (2 is the smallest value allowed).
+ 0x2 = 2.
+ ...
+ 0xE = 14.
+ 0xA-0xF = reserved.
+
+ In 2T mode, make this register TRCD - 1, not going below 2. */
+ uint64_t twtr : 4; /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
+
+ _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
+
+ where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 7.5 ns)
+
+ For DDR4, this CSR field represents tWTR_S.
+
+ 0x0 = reserved.
+ 0x1 = 2.
+ ...
+ 0x7 = 8.
+ 0x8-0xF = reserved. */
+ uint64_t trfc : 7; /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
+
+ _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
+
+ where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-350 ns
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t trrd : 3; /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
+
+ _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
+
+ where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 10 ns)
+
+ 0x0 = Reserved.
+ 0x1 = 3 TCYC.
+ ...
+ 0x6 = 8 TCYC.
+ 0x7 = 9 TCYC.
+
+ For DDR4, this is the tRRD_S parameter. */
+ uint64_t txp : 3; /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
+
+ _ RNDUP[TXP(ns) / TCYC(ns)] - 1
+
+ where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(3nCK, 7.5 ns) */
+ uint64_t twlmrd : 4; /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
+
+ _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
+
+ where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(40nCK) */
+ uint64_t twldqsen : 4; /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
+
+ _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
+
+ where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(25nCK) */
+ uint64_t tfaw : 5; /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
+
+ _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
+
+ where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 30-40 ns
+
+ Internal:
+ When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
+
+ _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
+
+ where tFAW_SLR is the Four activate window to the same logical rank from the
+ JEDEC DDR4 3D Stacked spec. */
+ uint64_t txpdll : 5; /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
+
+ _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
+
+ where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(10nCK, 24 ns) */
+ uint64_t trfc_dlr : 7; /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
+
+ _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+
+ where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-120 ns.
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t tpdm_full_cycle_ena : 1; /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
+ write/read latency calculation. This is to compensate the case when
+ tPDM delay in the RCD of an RDIMM is greater than one-cycle.
+ Only valid in RDIMM (LMC()_CONTROL[RDIMM_ENA]=1). */
+ uint64_t trcd_ext : 1; /**< [ 57: 57](RO) Reserved. */
+ uint64_t txp_ext : 1; /**< [ 58: 58](RO) Reserved. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_lmcx_timing_params1_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tstab : 5; /**< [ 63: 59](R/W) Indicates tSTAB constraints. Set this field as follows:
+
+ _ RNDUP[tSTAB(ns) / (512 * TCYC(ns))]
+
+ where tSTAB is from the JEDEC DDR4 RCD spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t txp_ext : 1; /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
+ above. */
+ uint64_t trcd_ext : 1; /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
+ uint64_t tpdm_full_cycle_ena : 1; /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
+ write/read latency calculation. This is to compensate the case when
+ tPDM delay in the RCD of an RDIMM is greater than one-cycle.
+ Only valid in RDIMM (LMC()_CONTROL[RDIMM_ENA]=1). */
+ uint64_t trfc_dlr : 7; /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
+
+ _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+
+ where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-120 ns.
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t txpdll : 5; /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
+
+ _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
+
+ where TXPDLL is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(10nCK, 24 ns) */
+ uint64_t tfaw : 5; /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
+
+ _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
+
+ where TFAW is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 30-40 ns
+
+ Internal:
+ When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
+
+ _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
+
+ where tFAW_SLR is the Four activate window to the same logical rank from the
+ JEDEC DDR4 3D Stacked spec. */
+ uint64_t twldqsen : 4; /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
+
+ _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
+
+ where TWLDQSEN is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(25nCK) */
+ uint64_t twlmrd : 4; /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
+
+ _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
+
+ where TWLMRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(40nCK) */
+ uint64_t txp : 3; /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
+
+ _ RNDUP[TXP(ns) / TCYC(ns)] - 1
+
+ where TXP is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(3nCK, 7.5 ns) */
+ uint64_t trrd : 3; /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
+
+ _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
+
+ where TRRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 10 ns)
+
+ 0x0 = Reserved.
+ 0x1 = 3 TCYC.
+ ...
+ 0x6 = 8 TCYC.
+ 0x7 = 9 TCYC.
+
+ For DDR4, this is the tRRD_S parameter. */
+ uint64_t trfc : 7; /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
+
+ _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
+
+ where TRFC is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-350 ns
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t twtr : 4; /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
+
+ _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
+
+ where TWTR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 7.5 ns)
+
+ For DDR4, this CSR field represents tWTR_S.
+
+ 0x0 = reserved.
+ 0x1 = 2.
+ ...
+ 0x7 = 8.
+ 0x8-0xF = reserved. */
+ uint64_t trcd : 4; /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
+
+ _ RNDUP[TRCD(ns) / TCYC(ns)]
+
+ where TRCD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 10-15 ns
+
+ 0x0 = reserved.
+ 0x1 = 2 (2 is the smallest value allowed).
+ 0x2 = 2.
+ ...
+ 0xE = 14.
+ 0xA-0xF = reserved.
+
+ In 2T mode, make this register TRCD - 1, not going below 2. */
+ uint64_t tras : 6; /**< [ 9: 4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
+
+ _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
+
+ where TRAS is from the DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP = 35ns - 9 * TREFI
+
+ 0x0 = reserved.
+ 0x1 = 2 TCYC.
+ 0x2 = 3 TCYC.
+ ...
+ 0x3F = 64 TCYC. */
+ uint64_t tmprr : 4; /**< [ 3: 0](R/W) Indicates TMPRR constraints. Set this field as follows:
+
+ _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
+
+ where TMPRR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 1 nCK */
+#else /* Word 0 - Little Endian */
+ uint64_t tmprr : 4; /**< [ 3: 0](R/W) Indicates TMPRR constraints. Set this field as follows:
+
+ _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
+
+ where TMPRR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 1 nCK */
+ uint64_t tras : 6; /**< [ 9: 4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
+
+ _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
+
+ where TRAS is from the DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP = 35ns - 9 * TREFI
+
+ 0x0 = reserved.
+ 0x1 = 2 TCYC.
+ 0x2 = 3 TCYC.
+ ...
+ 0x3F = 64 TCYC. */
+ uint64_t trcd : 4; /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
+
+ _ RNDUP[TRCD(ns) / TCYC(ns)]
+
+ where TRCD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 10-15 ns
+
+ 0x0 = reserved.
+ 0x1 = 2 (2 is the smallest value allowed).
+ 0x2 = 2.
+ ...
+ 0xE = 14.
+ 0xA-0xF = reserved.
+
+ In 2T mode, make this register TRCD - 1, not going below 2. */
+ uint64_t twtr : 4; /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
+
+ _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
+
+ where TWTR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 7.5 ns)
+
+ For DDR4, this CSR field represents tWTR_S.
+
+ 0x0 = reserved.
+ 0x1 = 2.
+ ...
+ 0x7 = 8.
+ 0x8-0xF = reserved. */
+ uint64_t trfc : 7; /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
+
+ _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
+
+ where TRFC is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-350 ns
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t trrd : 3; /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
+
+ _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
+
+ where TRRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 10 ns)
+
+ 0x0 = Reserved.
+ 0x1 = 3 TCYC.
+ ...
+ 0x6 = 8 TCYC.
+ 0x7 = 9 TCYC.
+
+ For DDR4, this is the tRRD_S parameter. */
+ uint64_t txp : 3; /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
+
+ _ RNDUP[TXP(ns) / TCYC(ns)] - 1
+
+ where TXP is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(3nCK, 7.5 ns) */
+ uint64_t twlmrd : 4; /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
+
+ _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
+
+ where TWLMRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(40nCK) */
+ uint64_t twldqsen : 4; /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
+
+ _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
+
+ where TWLDQSEN is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(25nCK) */
+ uint64_t tfaw : 5; /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
+
+ _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
+
+ where TFAW is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 30-40 ns
+
+ Internal:
+ When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
+
+ _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
+
+ where tFAW_SLR is the Four activate window to the same logical rank from the
+ JEDEC DDR4 3D Stacked spec. */
+ uint64_t txpdll : 5; /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
+
+ _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
+
+ where TXPDLL is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(10nCK, 24 ns) */
+ uint64_t trfc_dlr : 7; /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
+
+ _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+
+ where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-120 ns.
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t tpdm_full_cycle_ena : 1; /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
+ write/read latency calculation. This is to compensate the case when
+ tPDM delay in the RCD of an RDIMM is greater than one-cycle.
+ Only valid in RDIMM (LMC()_CONTROL[RDIMM_ENA]=1). */
+ uint64_t trcd_ext : 1; /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
+ uint64_t txp_ext : 1; /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
+ above. */
+ uint64_t tstab : 5; /**< [ 63: 59](R/W) Indicates tSTAB constraints. Set this field as follows:
+
+ _ RNDUP[tSTAB(ns) / (512 * TCYC(ns))]
+
+ where tSTAB is from the JEDEC DDR4 RCD spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_lmcx_timing_params1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t txp_ext : 1; /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
+ above. */
+ uint64_t trcd_ext : 1; /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
+ uint64_t tpdm_full_cycle_ena : 1; /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
+ write/read latency calculation. This is to compensate the case when
+ tPDM delay in the RCD of an RDIMM is greater than one-cycle.
+ Only valid in RDIMM (LMC()_CONTROL[RDIMM_ENA]=1). */
+ uint64_t trfc_dlr : 7; /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
+
+ _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+
+ where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-120 ns.
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t txpdll : 5; /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
+
+ _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
+
+ where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(10nCK, 24 ns) */
+ uint64_t tfaw : 5; /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
+
+ _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
+
+ where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 30-40 ns
+
+ Internal:
+ When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
+
+ _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
+
+ where tFAW_SLR is the Four activate window to the same logical rank from the
+ JEDEC DDR4 3D Stacked spec. */
+ uint64_t twldqsen : 4; /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
+
+ _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
+
+ where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(25nCK) */
+ uint64_t twlmrd : 4; /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
+
+ _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
+
+ where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(40nCK) */
+ uint64_t txp : 3; /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
+
+ _ RNDUP[TXP(ns) / TCYC(ns)] - 1
+
+ where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(3nCK, 7.5 ns) */
+ uint64_t trrd : 3; /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
+
+ _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
+
+ where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 10 ns)
+
+ 0x0 = Reserved.
+ 0x1 = 3 TCYC.
+ ...
+ 0x6 = 8 TCYC.
+ 0x7 = 9 TCYC.
+
+ For DDR4, this is the tRRD_S parameter. */
+ uint64_t trfc : 7; /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
+
+ _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
+
+ where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-350 ns
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t twtr : 4; /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
+
+ _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
+
+ where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 7.5 ns)
+
+ For DDR4, this CSR field represents tWTR_S.
+
+ 0x0 = reserved.
+ 0x1 = 2.
+ ...
+ 0x7 = 8.
+ 0x8-0xF = reserved. */
+ uint64_t trcd : 4; /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
+
+ _ RNDUP[TRCD(ns) / TCYC(ns)]
+
+ where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 10-15 ns
+
+ 0x0 = reserved.
+ 0x1 = 2 (2 is the smallest value allowed).
+ 0x2 = 2.
+ ...
+ 0xE = 14.
+ 0xA-0xF = reserved.
+
+ In 2T mode, make this register TRCD - 1, not going below 2. */
+ uint64_t tras : 6; /**< [ 9: 4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
+
+ _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
+
+ where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP = 35ns - 9 * TREFI
+
+ 0x0 = reserved.
+ 0x1 = 2 TCYC.
+ 0x2 = 3 TCYC.
+ ...
+ 0x3F = 64 TCYC. */
+ uint64_t tmprr : 4; /**< [ 3: 0](R/W) Indicates TMPRR constraints. Set this field as follows:
+
+ _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
+
+ where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 1 nCK */
+#else /* Word 0 - Little Endian */
+ uint64_t tmprr : 4; /**< [ 3: 0](R/W) Indicates TMPRR constraints. Set this field as follows:
+
+ _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
+
+ where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 1 nCK */
+ uint64_t tras : 6; /**< [ 9: 4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
+
+ _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
+
+ where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
+ rate).
+
+ TYP = 35ns - 9 * TREFI
+
+ 0x0 = reserved.
+ 0x1 = 2 TCYC.
+ 0x2 = 3 TCYC.
+ ...
+ 0x3F = 64 TCYC. */
+ uint64_t trcd : 4; /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
+
+ _ RNDUP[TRCD(ns) / TCYC(ns)]
+
+ where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 10-15 ns
+
+ 0x0 = reserved.
+ 0x1 = 2 (2 is the smallest value allowed).
+ 0x2 = 2.
+ ...
+ 0xE = 14.
+ 0xA-0xF = reserved.
+
+ In 2T mode, make this register TRCD - 1, not going below 2. */
+ uint64_t twtr : 4; /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
+
+ _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
+
+ where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 7.5 ns)
+
+ For DDR4, this CSR field represents tWTR_S.
+
+ 0x0 = reserved.
+ 0x1 = 2.
+ ...
+ 0x7 = 8.
+ 0x8-0xF = reserved. */
+ uint64_t trfc : 7; /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
+
+ _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
+
+ where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-350 ns
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t trrd : 3; /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
+
+ _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
+
+ where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(4nCK, 10 ns)
+
+ 0x0 = Reserved.
+ 0x1 = 3 TCYC.
+ ...
+ 0x6 = 8 TCYC.
+ 0x7 = 9 TCYC.
+
+ For DDR4, this is the tRRD_S parameter. */
+ uint64_t txp : 3; /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
+
+ _ RNDUP[TXP(ns) / TCYC(ns)] - 1
+
+ where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(3nCK, 7.5 ns) */
+ uint64_t twlmrd : 4; /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
+
+ _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
+
+ where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(40nCK) */
+ uint64_t twldqsen : 4; /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
+
+ _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
+
+ where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = max(25nCK) */
+ uint64_t tfaw : 5; /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
+
+ _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
+
+ where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 30-40 ns
+
+ Internal:
+ When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
+
+ _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
+
+ where tFAW_SLR is the Four activate window to the same logical rank from the
+ JEDEC DDR4 3D Stacked spec. */
+ uint64_t txpdll : 5; /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
+
+ _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
+
+ where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP=max(10nCK, 24 ns) */
+ uint64_t trfc_dlr : 7; /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
+
+ _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
+
+ where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate).
+
+ TYP = 90-120 ns.
+
+ 0x0 = reserved.
+ 0x1 = 8 TCYC.
+ 0x2 = 16 TCYC.
+ 0x3 = 24 TCYC.
+ 0x4 = 32 TCYC.
+ ...
+ 0x7E = 1008 TCYC.
+ 0x7F = 1016 TCYC. */
+ uint64_t tpdm_full_cycle_ena : 1; /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
+ write/read latency calculation. This is to compensate the case when
+ tPDM delay in the RCD of an RDIMM is greater than one-cycle.
+ Only valid in RDIMM (LMC()_CONTROL[RDIMM_ENA]=1). */
+ uint64_t trcd_ext : 1; /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
+ uint64_t txp_ext : 1; /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
+ above. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_lmcx_timing_params1_cn81xx cn83xx; */
+ /* struct bdk_lmcx_timing_params1_cn81xx cn88xxp2; */
+};
+typedef union bdk_lmcx_timing_params1 bdk_lmcx_timing_params1_t;
+
+static inline uint64_t BDK_LMCX_TIMING_PARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_TIMING_PARAMS1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001a0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001a0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001a0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001a0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_TIMING_PARAMS1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_TIMING_PARAMS1(a) bdk_lmcx_timing_params1_t
+#define bustype_BDK_LMCX_TIMING_PARAMS1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_TIMING_PARAMS1(a) "LMCX_TIMING_PARAMS1"
+#define device_bar_BDK_LMCX_TIMING_PARAMS1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_TIMING_PARAMS1(a) (a)
+#define arguments_BDK_LMCX_TIMING_PARAMS1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_timing_params2
+ *
+ * LMC Timing Parameters Register 2
+ * This register sets timing parameters for DDR4.
+ */
+union bdk_lmcx_timing_params2
+{
+ uint64_t u;
+ struct bdk_lmcx_timing_params2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t tcmd_gear : 6; /**< [ 34: 29](R/W) Indicates tCMD_GEAR constraint. Set this field as follows:
+
+ _ RNDUP[tCMD_GEAR(ns) / (2 * TCYC(ns))]
+
+ where tCMD_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t tsync_gear : 6; /**< [ 28: 23](R/W) Indicates tSYNC_GEAR constraint. Set this field as follows:
+
+ _ RNDUP[tSYNC_GEAR(ns) / (2 * TCYC(ns))]
+
+ where tSYNC_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t txs : 7; /**< [ 22: 16](R/W) Indicates TXS constraints. Set this field as follows:
+
+ _ RNDUP[tXS(ns) / (8 * TCYC(ns))]
+
+ where tXS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t trrd_l_ext : 1; /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
+ when requiring tRRD_L of more than 9 nCK. Otherwise
+ this bit must be zero. */
+ uint64_t trtp : 4; /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
+ _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
+
+ For DDR3, typical = max(4 nCK, 7.5ns).
+
+ For DDR4 the TRTP parameter is dictated by the TWR MR bits. */
+ uint64_t t_rw_op_max : 4; /**< [ 10: 7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
+ timing of MRW and MPR operations. Set this field as follows:
+
+ _ RNDUP[Maximum operation delay (cycles) / 8]
+
+ Typical = 0x7. */
+ uint64_t twtr_l : 4; /**< [ 6: 3](R/W) Specifies tWTR_L constraints. Set this field as follows:
+
+ _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
+
+ where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns)
+
+ Internal:
+ Seems the '- 1' is because we add one back into slot timing equation */
+ uint64_t trrd_l : 3; /**< [ 2: 0](R/W) Specifies tRRD_L constraints. Set this field as follows:
+
+ _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
+
+ where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns).
+ 0x0 = reserved.
+ 0x1 = three TCYC.
+ 0x2 = four TCYC.
+ 0x3 = five TCYC.
+ 0x4 = six TCYC.
+ 0x5 = seven TCYC.
+ 0x6 = eight TCYC.
+ 0x7 = nine TCYC. */
+#else /* Word 0 - Little Endian */
+ uint64_t trrd_l : 3; /**< [ 2: 0](R/W) Specifies tRRD_L constraints. Set this field as follows:
+
+ _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
+
+ where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns).
+ 0x0 = reserved.
+ 0x1 = three TCYC.
+ 0x2 = four TCYC.
+ 0x3 = five TCYC.
+ 0x4 = six TCYC.
+ 0x5 = seven TCYC.
+ 0x6 = eight TCYC.
+ 0x7 = nine TCYC. */
+ uint64_t twtr_l : 4; /**< [ 6: 3](R/W) Specifies tWTR_L constraints. Set this field as follows:
+
+ _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
+
+ where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns)
+
+ Internal:
+ Seems the '- 1' is because we add one back into slot timing equation */
+ uint64_t t_rw_op_max : 4; /**< [ 10: 7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
+ timing of MRW and MPR operations. Set this field as follows:
+
+ _ RNDUP[Maximum operation delay (cycles) / 8]
+
+ Typical = 0x7. */
+ uint64_t trtp : 4; /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
+ _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
+
+ For DDR3, typical = max(4 nCK, 7.5ns).
+
+ For DDR4 the TRTP parameter is dictated by the TWR MR bits. */
+ uint64_t trrd_l_ext : 1; /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
+ when requiring tRRD_L of more than 9 nCK. Otherwise
+ this bit must be zero. */
+ uint64_t txs : 7; /**< [ 22: 16](R/W) Indicates TXS constraints. Set this field as follows:
+
+ _ RNDUP[tXS(ns) / (8 * TCYC(ns))]
+
+ where tXS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t tsync_gear : 6; /**< [ 28: 23](R/W) Indicates tSYNC_GEAR constraint. Set this field as follows:
+
+ _ RNDUP[tSYNC_GEAR(ns) / (2 * TCYC(ns))]
+
+ where tSYNC_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t tcmd_gear : 6; /**< [ 34: 29](R/W) Indicates tCMD_GEAR constraint. Set this field as follows:
+
+ _ RNDUP[tCMD_GEAR(ns) / (2 * TCYC(ns))]
+
+ where tCMD_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_timing_params2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t trrd_l_ext : 1; /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
+ when requiring tRRD_L of more than 9 nCK. Otherwise
+ this bit must be zero. */
+ uint64_t trtp : 4; /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
+ _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
+
+ For DDR3, typical = max(4 nCK, 7.5ns).
+
+ For DDR4 the TRTP parameter is dictated by the TWR MR bits. */
+ uint64_t t_rw_op_max : 4; /**< [ 10: 7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
+ timing of MRW and MPR operations. Set this field as follows:
+
+ _ RNDUP[Maximum operation delay (cycles) / 8]
+
+ Typical = 0x7. */
+ uint64_t twtr_l : 4; /**< [ 6: 3](R/W) Specifies tWTR_L constraints. Set this field as follows:
+
+ _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
+
+ where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns)
+
+ Internal:
+ Seems the '- 1' is because we add one back into slot timing equation */
+ uint64_t trrd_l : 3; /**< [ 2: 0](R/W) Specifies tRRD_L constraints. Set this field as follows:
+
+ _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
+
+ where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns).
+ 0x0 = reserved.
+ 0x1 = three TCYC.
+ 0x2 = four TCYC.
+ 0x3 = five TCYC.
+ 0x4 = six TCYC.
+ 0x5 = seven TCYC.
+ 0x6 = eight TCYC.
+ 0x7 = nine TCYC. */
+#else /* Word 0 - Little Endian */
+ uint64_t trrd_l : 3; /**< [ 2: 0](R/W) Specifies tRRD_L constraints. Set this field as follows:
+
+ _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
+
+ where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns).
+ 0x0 = reserved.
+ 0x1 = three TCYC.
+ 0x2 = four TCYC.
+ 0x3 = five TCYC.
+ 0x4 = six TCYC.
+ 0x5 = seven TCYC.
+ 0x6 = eight TCYC.
+ 0x7 = nine TCYC. */
+ uint64_t twtr_l : 4; /**< [ 6: 3](R/W) Specifies tWTR_L constraints. Set this field as follows:
+
+ _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
+
+ where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns)
+
+ Internal:
+ Seems the '- 1' is because we add one back into slot timing equation */
+ uint64_t t_rw_op_max : 4; /**< [ 10: 7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
+ timing of MRW and MPR operations. Set this field as follows:
+
+ _ RNDUP[Maximum operation delay (cycles) / 8]
+
+ Typical = 0x7. */
+ uint64_t trtp : 4; /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
+ _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
+
+ For DDR3, typical = max(4 nCK, 7.5ns).
+
+ For DDR4 the TRTP parameter is dictated by the TWR MR bits. */
+ uint64_t trrd_l_ext : 1; /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
+ when requiring tRRD_L of more than 9 nCK. Otherwise
+ this bit must be zero. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_lmcx_timing_params2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t tcmd_gear : 6; /**< [ 34: 29](R/W) Indicates tCMD_GEAR constraint. Set this field as follows:
+
+ _ RNDUP[tCMD_GEAR(ns) / (2 * TCYC(ns))]
+
+ where tCMD_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t tsync_gear : 6; /**< [ 28: 23](R/W) Indicates tSYNC_GEAR constraint. Set this field as follows:
+
+ _ RNDUP[tSYNC_GEAR(ns) / (2 * TCYC(ns))]
+
+ where tSYNC_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t txs : 7; /**< [ 22: 16](R/W) Indicates TXS constraints. Set this field as follows:
+
+ _ RNDUP[tXS(ns) / (8 * TCYC(ns))]
+
+ where tXS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t trrd_l_ext : 1; /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
+ when requiring tRRD_L of more than 9 nCK. Otherwise
+ this bit must be zero. */
+ uint64_t trtp : 4; /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
+ _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
+
+ The TRTP parameter is dictated by the WR and RTP MR0 bits. */
+ uint64_t t_rw_op_max : 4; /**< [ 10: 7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
+ timing of MRW and MPR operations. Set this field as follows:
+
+ _ RNDUP[Maximum operation delay (cycles) / 8]
+
+ Typical = 0x7. */
+ uint64_t twtr_l : 4; /**< [ 6: 3](R/W) Specifies tWTR_L constraints. Set this field as follows:
+
+ _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
+
+ where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns)
+
+ Internal:
+ Seems the '- 1' is because we add one back into slot timing equation */
+ uint64_t trrd_l : 3; /**< [ 2: 0](R/W) Specifies tRRD_L constraints. Set this field as follows:
+
+ _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
+
+ where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns).
+ 0x0 = reserved.
+ 0x1 = three TCYC.
+ 0x2 = four TCYC.
+ 0x3 = five TCYC.
+ 0x4 = six TCYC.
+ 0x5 = seven TCYC.
+ 0x6 = eight TCYC.
+ 0x7 = nine TCYC. */
+#else /* Word 0 - Little Endian */
+ uint64_t trrd_l : 3; /**< [ 2: 0](R/W) Specifies tRRD_L constraints. Set this field as follows:
+
+ _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
+
+ where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns).
+ 0x0 = reserved.
+ 0x1 = three TCYC.
+ 0x2 = four TCYC.
+ 0x3 = five TCYC.
+ 0x4 = six TCYC.
+ 0x5 = seven TCYC.
+ 0x6 = eight TCYC.
+ 0x7 = nine TCYC. */
+ uint64_t twtr_l : 4; /**< [ 6: 3](R/W) Specifies tWTR_L constraints. Set this field as follows:
+
+ _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
+
+ where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
+ data rate).
+
+ Typical = MAX(4 nCK, 7.5 ns)
+
+ Internal:
+ Seems the '- 1' is because we add one back into slot timing equation */
+ uint64_t t_rw_op_max : 4; /**< [ 10: 7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
+ timing of MRW and MPR operations. Set this field as follows:
+
+ _ RNDUP[Maximum operation delay (cycles) / 8]
+
+ Typical = 0x7. */
+ uint64_t trtp : 4; /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
+ _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
+
+ The TRTP parameter is dictated by the WR and RTP MR0 bits. */
+ uint64_t trrd_l_ext : 1; /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
+ when requiring tRRD_L of more than 9 nCK. Otherwise
+ this bit must be zero. */
+ uint64_t txs : 7; /**< [ 22: 16](R/W) Indicates TXS constraints. Set this field as follows:
+
+ _ RNDUP[tXS(ns) / (8 * TCYC(ns))]
+
+ where tXS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t tsync_gear : 6; /**< [ 28: 23](R/W) Indicates tSYNC_GEAR constraint. Set this field as follows:
+
+ _ RNDUP[tSYNC_GEAR(ns) / (2 * TCYC(ns))]
+
+ where tSYNC_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t tcmd_gear : 6; /**< [ 34: 29](R/W) Indicates tCMD_GEAR constraint. Set this field as follows:
+
+ _ RNDUP[tCMD_GEAR(ns) / (2 * TCYC(ns))]
+
+ where tCMD_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
+ frequency (not data rate). */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_timing_params2 bdk_lmcx_timing_params2_t;
+
+static inline uint64_t BDK_LMCX_TIMING_PARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_TIMING_PARAMS2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000060ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000060ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000060ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000060ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_TIMING_PARAMS2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_TIMING_PARAMS2(a) bdk_lmcx_timing_params2_t
+#define bustype_BDK_LMCX_TIMING_PARAMS2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_TIMING_PARAMS2(a) "LMCX_TIMING_PARAMS2"
+#define device_bar_BDK_LMCX_TIMING_PARAMS2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_TIMING_PARAMS2(a) (a)
+#define arguments_BDK_LMCX_TIMING_PARAMS2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_wlevel_ctl
+ *
+ * LMC Write Level Control Register
+ */
+union bdk_lmcx_wlevel_ctl
+{
+ uint64_t u;
+ struct bdk_lmcx_wlevel_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_22_63 : 42;
+ uint64_t rtt_nom : 3; /**< [ 21: 19](R/W) LMC writes a decoded value to MR1[Rtt_Nom] of the rank during write leveling. Per JEDEC
+ DDR3 specifications, only values MR1[Rtt_Nom] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are
+ allowed during write leveling with output buffer enabled.
+ DDR3 Spec:
+ 0x0 = LMC writes 0x1 (RZQ/4) to MR1[Rtt_Nom].
+ 0x1 = LMC writes 0x2 (RZQ/2) to MR1[Rtt_Nom].
+ 0x2 = LMC writes 0x3 (RZQ/6) to MR1[Rtt_Nom].
+ 0x3 = LMC writes 0x4 (RZQ/12) to MR1[Rtt_Nom].
+ 0x4 = LMC writes 0x5 (RZQ/8) to MR1[Rtt_Nom].
+ 0x5 = LMC writes 0x6 (Rsvd) to MR1[Rtt_Nom].
+ 0x6 = LMC writes 0x7 (Rsvd) to MR1[Rtt_Nom].
+ 0x7 = LMC writes 0x0 (Disabled) to MR1[Rtt_Nom].
+
+ Internal:
+ In DDR4 LRDIMM application, this is used to program the Data Buffer Control Word BC00
+ during the Host Interface Write Leveling Mode:
+ 0x0 = LMC writes 0x1 (RZQ/4).
+ 0x1 = LMC writes 0x2 (RZQ/2).
+ 0x2 = LMC writes 0x3 (RZQ/6).
+ 0x3 = LMC writes 0x4 (RZQ/1).
+ 0x4 = LMC writes 0x5 (RZQ/5).
+ 0x5 = LMC writes 0x6 (RZQ/3).
+ 0x6 = LMC writes 0x7 (RZQ/7).
+ 0x7 = LMC writes 0x0 (Disabled). */
+ uint64_t bitmask : 8; /**< [ 18: 11](R/W) Mask to select bit lanes on which write leveling feedback is returned when [OR_DIS] is set to one. */
+ uint64_t or_dis : 1; /**< [ 10: 10](R/W) Disable ORing of bits in a byte lane when computing the write leveling bitmask. */
+ uint64_t sset : 1; /**< [ 9: 9](R/W) Run write leveling on the current setting only. */
+ uint64_t lanemask : 9; /**< [ 8: 0](R/W) One-shot mask to select byte lane to be leveled by the write leveling sequence. Used with
+ x16 parts where the upper and lower byte lanes need to be leveled independently.
+
+ This field is also used for byte lane masking during read leveling sequence. */
+#else /* Word 0 - Little Endian */
+ uint64_t lanemask : 9; /**< [ 8: 0](R/W) One-shot mask to select byte lane to be leveled by the write leveling sequence. Used with
+ x16 parts where the upper and lower byte lanes need to be leveled independently.
+
+ This field is also used for byte lane masking during read leveling sequence. */
+ uint64_t sset : 1; /**< [ 9: 9](R/W) Run write leveling on the current setting only. */
+ uint64_t or_dis : 1; /**< [ 10: 10](R/W) Disable ORing of bits in a byte lane when computing the write leveling bitmask. */
+ uint64_t bitmask : 8; /**< [ 18: 11](R/W) Mask to select bit lanes on which write leveling feedback is returned when [OR_DIS] is set to one. */
+ uint64_t rtt_nom : 3; /**< [ 21: 19](R/W) LMC writes a decoded value to MR1[Rtt_Nom] of the rank during write leveling. Per JEDEC
+ DDR3 specifications, only values MR1[Rtt_Nom] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are
+ allowed during write leveling with output buffer enabled.
+ DDR3 Spec:
+ 0x0 = LMC writes 0x1 (RZQ/4) to MR1[Rtt_Nom].
+ 0x1 = LMC writes 0x2 (RZQ/2) to MR1[Rtt_Nom].
+ 0x2 = LMC writes 0x3 (RZQ/6) to MR1[Rtt_Nom].
+ 0x3 = LMC writes 0x4 (RZQ/12) to MR1[Rtt_Nom].
+ 0x4 = LMC writes 0x5 (RZQ/8) to MR1[Rtt_Nom].
+ 0x5 = LMC writes 0x6 (Rsvd) to MR1[Rtt_Nom].
+ 0x6 = LMC writes 0x7 (Rsvd) to MR1[Rtt_Nom].
+ 0x7 = LMC writes 0x0 (Disabled) to MR1[Rtt_Nom].
+
+ Internal:
+ In DDR4 LRDIMM application, this is used to program the Data Buffer Control Word BC00
+ during the Host Interface Write Leveling Mode:
+ 0x0 = LMC writes 0x1 (RZQ/4).
+ 0x1 = LMC writes 0x2 (RZQ/2).
+ 0x2 = LMC writes 0x3 (RZQ/6).
+ 0x3 = LMC writes 0x4 (RZQ/1).
+ 0x4 = LMC writes 0x5 (RZQ/5).
+ 0x5 = LMC writes 0x6 (RZQ/3).
+ 0x6 = LMC writes 0x7 (RZQ/7).
+ 0x7 = LMC writes 0x0 (Disabled). */
+ uint64_t reserved_22_63 : 42;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_wlevel_ctl_s cn8; */
+ struct bdk_lmcx_wlevel_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t bitmask : 8; /**< [ 18: 11](R/W) Mask to select bit lanes on which write leveling feedback is returned when [OR_DIS] is set to one. */
+ uint64_t or_dis : 1; /**< [ 10: 10](R/W) Disable ORing of bits in a byte lane when computing the write leveling bitmask. */
+ uint64_t sset : 1; /**< [ 9: 9](R/W) Run write leveling on the current setting only. */
+ uint64_t lanemask : 9; /**< [ 8: 0](R/W) One-shot mask to select byte lane to be leveled by the write leveling sequence. Used with
+ x16 parts where the upper and lower byte lanes need to be leveled independently.
+
+ This field is also used for byte lane masking during read leveling sequence. */
+#else /* Word 0 - Little Endian */
+ uint64_t lanemask : 9; /**< [ 8: 0](R/W) One-shot mask to select byte lane to be leveled by the write leveling sequence. Used with
+ x16 parts where the upper and lower byte lanes need to be leveled independently.
+
+ This field is also used for byte lane masking during read leveling sequence. */
+ uint64_t sset : 1; /**< [ 9: 9](R/W) Run write leveling on the current setting only. */
+ uint64_t or_dis : 1; /**< [ 10: 10](R/W) Disable ORing of bits in a byte lane when computing the write leveling bitmask. */
+ uint64_t bitmask : 8; /**< [ 18: 11](R/W) Mask to select bit lanes on which write leveling feedback is returned when [OR_DIS] is set to one. */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_lmcx_wlevel_ctl bdk_lmcx_wlevel_ctl_t;
+
+static inline uint64_t BDK_LMCX_WLEVEL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_WLEVEL_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000300ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000300ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000300ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000300ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_WLEVEL_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_WLEVEL_CTL(a) bdk_lmcx_wlevel_ctl_t
+#define bustype_BDK_LMCX_WLEVEL_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_WLEVEL_CTL(a) "LMCX_WLEVEL_CTL"
+#define device_bar_BDK_LMCX_WLEVEL_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_WLEVEL_CTL(a) (a)
+#define arguments_BDK_LMCX_WLEVEL_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_wlevel_dbg
+ *
+ * LMC Write Level Debug Register
+ * A given write of LMC()_WLEVEL_DBG returns the write leveling pass/fail results for all
+ * possible delay settings (i.e. the BITMASK) for only one byte in the last rank that the
+ * hardware write leveled. LMC()_WLEVEL_DBG[BYTE] selects the particular byte. To get these
+ * pass/fail results for a different rank, you must run the hardware write leveling again. For
+ * example, it is possible to get the [BITMASK] results for every byte of every rank if you run
+ * write leveling separately for each rank, probing LMC()_WLEVEL_DBG between each write-
+ * leveling.
+ */
+union bdk_lmcx_wlevel_dbg
+{
+ uint64_t u;
+ struct bdk_lmcx_wlevel_dbg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t bitmask : 8; /**< [ 11: 4](RO/H) Bitmask generated during write level settings sweep. If LMC()_WLEVEL_CTL[SSET]=0,
+ [BITMASK]\<n\>=0 means write level setting n failed; [BITMASK]\<n\>=1 means write level
+ setting n
+ passed for
+ 0 \<= n \<= 7. [BITMASK] contains the first 8 results of the total 16 collected by LMC
+ during
+ the write leveling sequence.
+
+ If LMC()_WLEVEL_CTL[SSET]=1, [BITMASK]\<0\>=0 means curr write level setting failed;
+ [BITMASK]\<0\>=1 means curr write level setting passed. */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte : 4; /**< [ 3: 0](R/W) 0 \<= BYTE \<= 8. */
+ uint64_t bitmask : 8; /**< [ 11: 4](RO/H) Bitmask generated during write level settings sweep. If LMC()_WLEVEL_CTL[SSET]=0,
+ [BITMASK]\<n\>=0 means write level setting n failed; [BITMASK]\<n\>=1 means write level
+ setting n
+ passed for
+ 0 \<= n \<= 7. [BITMASK] contains the first 8 results of the total 16 collected by LMC
+ during
+ the write leveling sequence.
+
+ If LMC()_WLEVEL_CTL[SSET]=1, [BITMASK]\<0\>=0 means curr write level setting failed;
+ [BITMASK]\<0\>=1 means curr write level setting passed. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmcx_wlevel_dbg_s cn; */
+};
+typedef union bdk_lmcx_wlevel_dbg bdk_lmcx_wlevel_dbg_t;
+
+static inline uint64_t BDK_LMCX_WLEVEL_DBG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_WLEVEL_DBG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e088000308ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e088000308ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e088000308ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e088000308ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_WLEVEL_DBG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_WLEVEL_DBG(a) bdk_lmcx_wlevel_dbg_t
+#define bustype_BDK_LMCX_WLEVEL_DBG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_WLEVEL_DBG(a) "LMCX_WLEVEL_DBG"
+#define device_bar_BDK_LMCX_WLEVEL_DBG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_WLEVEL_DBG(a) (a)
+#define arguments_BDK_LMCX_WLEVEL_DBG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) lmc#_wlevel_rank#
+ *
+ * LMC Write Level Rank Register
+ * Four of these CSRs exist per LMC, one for each rank. Write level setting is measured in units
+ * of 1/8 CK, so the below BYTEn values can range over 4 CK cycles. Assuming
+ * LMC()_WLEVEL_CTL[SSET]=0, the BYTEn\<2:0\> values are not used during write leveling, and
+ * they are overwritten by the hardware as part of the write leveling sequence. (Hardware sets
+ * [STATUS] to 3 after hardware write leveling completes for the rank). Software needs to set
+ * BYTEn\<4:3\> bits.
+ *
+ * Each CSR may also be written by software, but not while a write leveling sequence is in
+ * progress. (Hardware sets [STATUS] to 1 after a CSR write.) Software initiates a hardware
+ * write-
+ * leveling sequence by programming LMC()_WLEVEL_CTL and writing LMC()_CONFIG[RANKMASK] and
+ * LMC()_SEQ_CTL[INIT_START]=1 with
+ * LMC()_SEQ_CTL[SEQ_SEL]=6.
+ *
+ * LMC will then step through and accumulate write leveling results for 8 unique delay settings
+ * (twice), starting at a delay of LMC()_WLEVEL_RANK() [BYTEn\<4:3\>]* 8 CK increasing by
+ * 1/8 CK each setting. Hardware will then set LMC()_WLEVEL_RANK()[BYTEn\<2:0\>] to
+ * indicate the first write leveling result of 1 that followed a result of 0 during the
+ * sequence by searching for a '1100' pattern in the generated bitmask, except that LMC will
+ * always write LMC()_WLEVEL_RANK()[BYTEn\<0\>]=0. If hardware is unable to find a match
+ * for a '1100' pattern, then hardware sets LMC()_WLEVEL_RANK() [BYTEn\<2:0\>] to 0x4. See
+ * LMC()_WLEVEL_CTL.
+ *
+ * LMC()_WLEVEL_RANKi values for ranks i without attached DRAM should be set such that they do
+ * not
+ * increase the range of possible BYTE values for any byte lane. The easiest way to do this is to
+ * set LMC()_WLEVEL_RANKi = LMC()_WLEVEL_RANKj, where j is some rank with attached DRAM whose
+ * LMC()_WLEVEL_RANKj is already fully initialized.
+ */
+union bdk_lmcx_wlevel_rankx
+{
+ uint64_t u;
+ struct bdk_lmcx_wlevel_rankx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_47_63 : 17;
+ uint64_t status : 2; /**< [ 46: 45](RO/H) Indicates status of the write leveling and where the BYTE* programmings in \<44:0\> came
+ from:
+ 0x0 = BYTE* values are their reset value.
+ 0x1 = BYTE* values were set via a CSR write to this register.
+ 0x2 = Write leveling sequence currently in progress (BYTE* values are unpredictable).
+ 0x3 = BYTE* values came from a complete write leveling sequence, irrespective of which
+ lanes are masked via LMC()_WLEVEL_CTL[LANEMASK]. */
+ uint64_t byte8 : 5; /**< [ 44: 40](R/W/H) "Write level setting. Bit 0 of BYTE8 must be zero during normal operation. When ECC DRAM
+ is not present in 64-bit mode (i.e. when DRAM is not attached to chip signals DDR#_DQS_8_*
+ and DDR#_CB\<7:0\>), software should write BYTE8 with a value that does not increase the
+ range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_WLEVEL_RANK()[BYTE8] = LMC()_WLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t byte7 : 5; /**< [ 39: 35](R/W/H) Write level setting. Bit 0 of [BYTE7] must be zero during normal operation. */
+ uint64_t byte6 : 5; /**< [ 34: 30](R/W/H) Write level setting. Bit 0 of [BYTE6] must be zero during normal operation. */
+ uint64_t byte5 : 5; /**< [ 29: 25](R/W/H) Write level setting. Bit 0 of [BYTE5] must be zero during normal operation. */
+ uint64_t byte4 : 5; /**< [ 24: 20](R/W/H) Write level setting. Bit 0 of [BYTE4] must be zero during normal operation. */
+ uint64_t byte3 : 5; /**< [ 19: 15](R/W/H) Write level setting. Bit 0 of [BYTE3] must be zero during normal operation. */
+ uint64_t byte2 : 5; /**< [ 14: 10](R/W/H) Write level setting. Bit 0 of [BYTE2] must be zero during normal operation. */
+ uint64_t byte1 : 5; /**< [ 9: 5](R/W/H) Write level setting. Bit 0 of [BYTE1] must be zero during normal operation. */
+ uint64_t byte0 : 5; /**< [ 4: 0](R/W/H) Write level setting. Bit 0 of [BYTE0] must be zero during normal operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte0 : 5; /**< [ 4: 0](R/W/H) Write level setting. Bit 0 of [BYTE0] must be zero during normal operation. */
+ uint64_t byte1 : 5; /**< [ 9: 5](R/W/H) Write level setting. Bit 0 of [BYTE1] must be zero during normal operation. */
+ uint64_t byte2 : 5; /**< [ 14: 10](R/W/H) Write level setting. Bit 0 of [BYTE2] must be zero during normal operation. */
+ uint64_t byte3 : 5; /**< [ 19: 15](R/W/H) Write level setting. Bit 0 of [BYTE3] must be zero during normal operation. */
+ uint64_t byte4 : 5; /**< [ 24: 20](R/W/H) Write level setting. Bit 0 of [BYTE4] must be zero during normal operation. */
+ uint64_t byte5 : 5; /**< [ 29: 25](R/W/H) Write level setting. Bit 0 of [BYTE5] must be zero during normal operation. */
+ uint64_t byte6 : 5; /**< [ 34: 30](R/W/H) Write level setting. Bit 0 of [BYTE6] must be zero during normal operation. */
+ uint64_t byte7 : 5; /**< [ 39: 35](R/W/H) Write level setting. Bit 0 of [BYTE7] must be zero during normal operation. */
+ uint64_t byte8 : 5; /**< [ 44: 40](R/W/H) "Write level setting. Bit 0 of BYTE8 must be zero during normal operation. When ECC DRAM
+ is not present in 64-bit mode (i.e. when DRAM is not attached to chip signals DDR#_DQS_8_*
+ and DDR#_CB\<7:0\>), software should write BYTE8 with a value that does not increase the
+ range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_WLEVEL_RANK()[BYTE8] = LMC()_WLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t status : 2; /**< [ 46: 45](RO/H) Indicates status of the write leveling and where the BYTE* programmings in \<44:0\> came
+ from:
+ 0x0 = BYTE* values are their reset value.
+ 0x1 = BYTE* values were set via a CSR write to this register.
+ 0x2 = Write leveling sequence currently in progress (BYTE* values are unpredictable).
+ 0x3 = BYTE* values came from a complete write leveling sequence, irrespective of which
+ lanes are masked via LMC()_WLEVEL_CTL[LANEMASK]. */
+ uint64_t reserved_47_63 : 17;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_wlevel_rankx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_47_63 : 17;
+ uint64_t status : 2; /**< [ 46: 45](RO/H) Indicates status of the write leveling and where the BYTE* programmings in \<44:0\> came
+ from:
+ 0x0 = BYTE* values are their reset value.
+ 0x1 = BYTE* values were set via a CSR write to this register.
+ 0x2 = Write leveling sequence currently in progress (BYTE* values are unpredictable).
+ 0x3 = BYTE* values came from a complete write leveling sequence, irrespective of which
+ lanes are masked via LMC()_WLEVEL_CTL[LANEMASK]. */
+ uint64_t byte8 : 5; /**< [ 44: 40](R/W/H) "Write level setting. Bit 0 of BYTE8 must be zero during normal operation. When ECC DRAM
+ is not present in 64-bit mode (i.e. when DRAM is not attached to chip signals DDR#_CBS_0_*
+ and DDR#_CB\<7:0\>), software should write BYTE8 with a value that does not increase the
+ range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_WLEVEL_RANK()[BYTE8] = LMC()_WLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t byte7 : 5; /**< [ 39: 35](R/W/H) Write level setting. Bit 0 of [BYTE7] must be zero during normal operation. */
+ uint64_t byte6 : 5; /**< [ 34: 30](R/W/H) Write level setting. Bit 0 of [BYTE6] must be zero during normal operation. */
+ uint64_t byte5 : 5; /**< [ 29: 25](R/W/H) Write level setting. Bit 0 of [BYTE5] must be zero during normal operation. */
+ uint64_t byte4 : 5; /**< [ 24: 20](R/W/H) Write level setting. Bit 0 of [BYTE4] must be zero during normal operation. */
+ uint64_t byte3 : 5; /**< [ 19: 15](R/W/H) Write level setting. Bit 0 of [BYTE3] must be zero during normal operation. */
+ uint64_t byte2 : 5; /**< [ 14: 10](R/W/H) Write level setting. Bit 0 of [BYTE2] must be zero during normal operation. */
+ uint64_t byte1 : 5; /**< [ 9: 5](R/W/H) Write level setting. Bit 0 of [BYTE1] must be zero during normal operation. */
+ uint64_t byte0 : 5; /**< [ 4: 0](R/W/H) Write level setting. Bit 0 of [BYTE0] must be zero during normal operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t byte0 : 5; /**< [ 4: 0](R/W/H) Write level setting. Bit 0 of [BYTE0] must be zero during normal operation. */
+ uint64_t byte1 : 5; /**< [ 9: 5](R/W/H) Write level setting. Bit 0 of [BYTE1] must be zero during normal operation. */
+ uint64_t byte2 : 5; /**< [ 14: 10](R/W/H) Write level setting. Bit 0 of [BYTE2] must be zero during normal operation. */
+ uint64_t byte3 : 5; /**< [ 19: 15](R/W/H) Write level setting. Bit 0 of [BYTE3] must be zero during normal operation. */
+ uint64_t byte4 : 5; /**< [ 24: 20](R/W/H) Write level setting. Bit 0 of [BYTE4] must be zero during normal operation. */
+ uint64_t byte5 : 5; /**< [ 29: 25](R/W/H) Write level setting. Bit 0 of [BYTE5] must be zero during normal operation. */
+ uint64_t byte6 : 5; /**< [ 34: 30](R/W/H) Write level setting. Bit 0 of [BYTE6] must be zero during normal operation. */
+ uint64_t byte7 : 5; /**< [ 39: 35](R/W/H) Write level setting. Bit 0 of [BYTE7] must be zero during normal operation. */
+ uint64_t byte8 : 5; /**< [ 44: 40](R/W/H) "Write level setting. Bit 0 of BYTE8 must be zero during normal operation. When ECC DRAM
+ is not present in 64-bit mode (i.e. when DRAM is not attached to chip signals DDR#_CBS_0_*
+ and DDR#_CB\<7:0\>), software should write BYTE8 with a value that does not increase the
+ range of possible BYTE* values. The easiest way to do this is to set
+ LMC()_WLEVEL_RANK()[BYTE8] = LMC()_WLEVEL_RANK()[BYTE0] when there is no
+ ECC DRAM, using the final BYTE0 value." */
+ uint64_t status : 2; /**< [ 46: 45](RO/H) Indicates status of the write leveling and where the BYTE* programmings in \<44:0\> came
+ from:
+ 0x0 = BYTE* values are their reset value.
+ 0x1 = BYTE* values were set via a CSR write to this register.
+ 0x2 = Write leveling sequence currently in progress (BYTE* values are unpredictable).
+ 0x3 = BYTE* values came from a complete write leveling sequence, irrespective of which
+ lanes are masked via LMC()_WLEVEL_CTL[LANEMASK]. */
+ uint64_t reserved_47_63 : 17;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_wlevel_rankx_s cn81xx; */
+ /* struct bdk_lmcx_wlevel_rankx_s cn88xx; */
+ /* struct bdk_lmcx_wlevel_rankx_cn9 cn83xx; */
+};
+typedef union bdk_lmcx_wlevel_rankx bdk_lmcx_wlevel_rankx_t;
+
+static inline uint64_t BDK_LMCX_WLEVEL_RANKX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_WLEVEL_RANKX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
+ return 0x87e0880002c0ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
+ return 0x87e0880002c0ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=3)))
+ return 0x87e0880002c0ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=3)))
+ return 0x87e0880002c0ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("LMCX_WLEVEL_RANKX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_LMCX_WLEVEL_RANKX(a,b) bdk_lmcx_wlevel_rankx_t
+#define bustype_BDK_LMCX_WLEVEL_RANKX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_WLEVEL_RANKX(a,b) "LMCX_WLEVEL_RANKX"
+#define device_bar_BDK_LMCX_WLEVEL_RANKX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_WLEVEL_RANKX(a,b) (a)
+#define arguments_BDK_LMCX_WLEVEL_RANKX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) lmc#_wodt_mask
+ *
+ * LMC Write OnDieTermination Mask Register
+ * System designers may desire to terminate DQ/DQS lines for higher-frequency DDR operations,
+ * especially on a multirank system. DDR4 DQ/DQS I/Os have built-in termination resistors that
+ * can be turned on or off by the controller, after meeting TAOND and TAOF timing requirements.
+ * Each rank has its own ODT pin that fans out to all of the memory parts in that DIMM. System
+ * designers may prefer different combinations of ODT ONs for write operations into different
+ * ranks. CNXXXX supports full programmability by way of the mask register below. Each rank
+ * position has its own 8-bit programmable field. When the controller does a write to that rank,
+ * it sets the four ODT pins to the mask pins below. For example, when doing a write into Rank0,
+ * a
+ * system designer may desire to terminate the lines with the resistor on DIMM0/Rank1. The mask
+ * [WODT_D0_R0] would then be {00000010}.
+ *
+ * CNXXXX drives the appropriate mask values on the ODT pins by default. If this feature is not
+ * required, write 0x0 in this register. When a given RANK is selected, the WODT mask for that
+ * RANK is used. The resulting WODT mask is driven to the DIMMs in the following manner:
+ */
+union bdk_lmcx_wodt_mask
+{
+ uint64_t u;
+ struct bdk_lmcx_wodt_mask_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t wodt_d1_r1 : 4; /**< [ 27: 24](R/W) Reserved.
+ Internal:
+ Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked.
+ If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R1]\<3:0\> must be zero. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t wodt_d1_r0 : 4; /**< [ 19: 16](R/W) Reserved.
+ Internal:
+ Write ODT mask DIMM1, RANK0. If [RANK_ENA]=0, [WODT_D1_R0]\<3,1\> must be zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t wodt_d0_r1 : 4; /**< [ 11: 8](R/W) Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=0,
+ [WODT_D0_R1]\<3:0\> must be zero. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t wodt_d0_r0 : 4; /**< [ 3: 0](R/W) Write ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D0_R0]\<3,1\> must be zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t wodt_d0_r0 : 4; /**< [ 3: 0](R/W) Write ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D0_R0]\<3,1\> must be zero. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t wodt_d0_r1 : 4; /**< [ 11: 8](R/W) Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=0,
+ [WODT_D0_R1]\<3:0\> must be zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t wodt_d1_r0 : 4; /**< [ 19: 16](R/W) Reserved.
+ Internal:
+ Write ODT mask DIMM1, RANK0. If [RANK_ENA]=0, [WODT_D1_R0]\<3,1\> must be zero. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t wodt_d1_r1 : 4; /**< [ 27: 24](R/W) Reserved.
+ Internal:
+ Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked.
+ If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R1]\<3:0\> must be zero. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_lmcx_wodt_mask_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t wodt_d1_r1 : 4; /**< [ 27: 24](R/W) Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked.
+ If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R1]\<3:0\> must be zero. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t wodt_d1_r0 : 4; /**< [ 19: 16](R/W) Write ODT mask DIMM1, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R0]\<3,1\> must be zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t wodt_d0_r1 : 4; /**< [ 11: 8](R/W) Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=0,
+ [WODT_D0_R1]\<3:0\> must be zero. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t wodt_d0_r0 : 4; /**< [ 3: 0](R/W) Write ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D0_R0]\<3,1\> must be zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t wodt_d0_r0 : 4; /**< [ 3: 0](R/W) Write ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D0_R0]\<3,1\> must be zero. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t wodt_d0_r1 : 4; /**< [ 11: 8](R/W) Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=0,
+ [WODT_D0_R1]\<3:0\> must be zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t wodt_d1_r0 : 4; /**< [ 19: 16](R/W) Write ODT mask DIMM1, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R0]\<3,1\> must be zero. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t wodt_d1_r1 : 4; /**< [ 27: 24](R/W) Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked.
+ If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R1]\<3:0\> must be zero. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_lmcx_wodt_mask_s cn81xx; */
+ /* struct bdk_lmcx_wodt_mask_cn9 cn88xx; */
+ /* struct bdk_lmcx_wodt_mask_cn9 cn83xx; */
+};
+typedef union bdk_lmcx_wodt_mask bdk_lmcx_wodt_mask_t;
+
+static inline uint64_t BDK_LMCX_WODT_MASK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMCX_WODT_MASK(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0880001b0ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0880001b0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0880001b0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0880001b0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("LMCX_WODT_MASK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMCX_WODT_MASK(a) bdk_lmcx_wodt_mask_t
+#define bustype_BDK_LMCX_WODT_MASK(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_LMCX_WODT_MASK(a) "LMCX_WODT_MASK"
+#define device_bar_BDK_LMCX_WODT_MASK(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_LMCX_WODT_MASK(a) (a)
+#define arguments_BDK_LMCX_WODT_MASK(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_LMC_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmt.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmt.h
new file mode 100644
index 0000000000..fa0efc3d1e
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmt.h
@@ -0,0 +1,150 @@
+#ifndef __BDK_CSRS_LMT_H__
+#define __BDK_CSRS_LMT_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium LMT.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Register (RVU_PFVF_BAR2) lmt_lf_lmtcancel
+ *
+ * RVU VF LMT Cancel Register
+ */
+union bdk_lmt_lf_lmtcancel
+{
+ uint64_t u;
+ struct bdk_lmt_lf_lmtcancel_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) This register's address is used to perform an AP LMTCANCEL operation. This is
+ only used by code executing on AP cores and has no function when accessed by
+ other entities.
+
+ Internal:
+ LMTLINE and LMTCANCEL addresses are consumed by APs and never received by
+ RVU. See also RVU_BLOCK_ADDR_E::LMT. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) This register's address is used to perform an AP LMTCANCEL operation. This is
+ only used by code executing on AP cores and has no function when accessed by
+ other entities.
+
+ Internal:
+ LMTLINE and LMTCANCEL addresses are consumed by APs and never received by
+ RVU. See also RVU_BLOCK_ADDR_E::LMT. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmt_lf_lmtcancel_s cn; */
+};
+typedef union bdk_lmt_lf_lmtcancel bdk_lmt_lf_lmtcancel_t;
+
+#define BDK_LMT_LF_LMTCANCEL BDK_LMT_LF_LMTCANCEL_FUNC()
+static inline uint64_t BDK_LMT_LF_LMTCANCEL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMT_LF_LMTCANCEL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200100400ll;
+ __bdk_csr_fatal("LMT_LF_LMTCANCEL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_LMT_LF_LMTCANCEL bdk_lmt_lf_lmtcancel_t
+#define bustype_BDK_LMT_LF_LMTCANCEL BDK_CSR_TYPE_RVU_PFVF_BAR2
+#define basename_BDK_LMT_LF_LMTCANCEL "LMT_LF_LMTCANCEL"
+#define device_bar_BDK_LMT_LF_LMTCANCEL 0x2 /* BAR2 */
+#define busnum_BDK_LMT_LF_LMTCANCEL 0
+#define arguments_BDK_LMT_LF_LMTCANCEL -1,-1,-1,-1
+
+/**
+ * Register (RVU_PFVF_BAR2) lmt_lf_lmtline#
+ *
+ * RVU VF LMT Line Registers
+ */
+union bdk_lmt_lf_lmtlinex
+{
+ uint64_t u;
+ struct bdk_lmt_lf_lmtlinex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) This register's address is the 128-byte LMTLINE used to form LMTST
+ operations. This is only used by code executing on AP cores and has no function
+ when accessed by other entities.
+
+ Internal:
+ LMTLINE and LMTCANCEL addresses are consumed by APs and never received by
+ RVU. See also RVU_BLOCK_ADDR_E::LMT. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) This register's address is the 128-byte LMTLINE used to form LMTST
+ operations. This is only used by code executing on AP cores and has no function
+ when accessed by other entities.
+
+ Internal:
+ LMTLINE and LMTCANCEL addresses are consumed by APs and never received by
+ RVU. See also RVU_BLOCK_ADDR_E::LMT. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_lmt_lf_lmtlinex_s cn; */
+};
+typedef union bdk_lmt_lf_lmtlinex bdk_lmt_lf_lmtlinex_t;
+
+static inline uint64_t BDK_LMT_LF_LMTLINEX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_LMT_LF_LMTLINEX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850200100000ll + 8ll * ((a) & 0xf);
+ __bdk_csr_fatal("LMT_LF_LMTLINEX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_LMT_LF_LMTLINEX(a) bdk_lmt_lf_lmtlinex_t
+#define bustype_BDK_LMT_LF_LMTLINEX(a) BDK_CSR_TYPE_RVU_PFVF_BAR2
+#define basename_BDK_LMT_LF_LMTLINEX(a) "LMT_LF_LMTLINEX"
+#define device_bar_BDK_LMT_LF_LMTLINEX(a) 0x2 /* BAR2 */
+#define busnum_BDK_LMT_LF_LMTLINEX(a) (a)
+#define arguments_BDK_LMT_LF_LMTLINEX(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_LMT_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_boot.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_boot.h
new file mode 100644
index 0000000000..2223015a3c
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_boot.h
@@ -0,0 +1,537 @@
+#ifndef __BDK_CSRS_MIO_BOOT_H__
+#define __BDK_CSRS_MIO_BOOT_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium MIO_BOOT.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration mio_boot_bar_e
+ *
+ * MIO Boot Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_MIO_BOOT_BAR_E_MIO_BOOT_PF_BAR0 (0x87e000000000ll)
+#define BDK_MIO_BOOT_BAR_E_MIO_BOOT_PF_BAR0_SIZE 0x800000ull
+
+/**
+ * Register (RSL) mio_boot_ap_jump
+ *
+ * MIO Boot AP Jump Address Register
+ */
+union bdk_mio_boot_ap_jump
+{
+ uint64_t u;
+ struct bdk_mio_boot_ap_jump_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 64; /**< [ 63: 0](SR/W) Boot address. This register contains the address the internal boot loader
+ will jump to after reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64; /**< [ 63: 0](SR/W) Boot address. This register contains the address the internal boot loader
+ will jump to after reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_boot_ap_jump_s cn; */
+};
+typedef union bdk_mio_boot_ap_jump bdk_mio_boot_ap_jump_t;
+
+#define BDK_MIO_BOOT_AP_JUMP BDK_MIO_BOOT_AP_JUMP_FUNC()
+static inline uint64_t BDK_MIO_BOOT_AP_JUMP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_BOOT_AP_JUMP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0000000d0ll;
+ __bdk_csr_fatal("MIO_BOOT_AP_JUMP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_BOOT_AP_JUMP bdk_mio_boot_ap_jump_t
+#define bustype_BDK_MIO_BOOT_AP_JUMP BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_BOOT_AP_JUMP "MIO_BOOT_AP_JUMP"
+#define device_bar_BDK_MIO_BOOT_AP_JUMP 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_BOOT_AP_JUMP 0
+#define arguments_BDK_MIO_BOOT_AP_JUMP -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_boot_bist_stat
+ *
+ * MIO Boot BIST Status Register
+ * The boot BIST status register contains the BIST status for the MIO boot memories: 0 = pass, 1
+ * = fail.
+ */
+union bdk_mio_boot_bist_stat
+{
+ uint64_t u;
+ struct bdk_mio_boot_bist_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t stat : 16; /**< [ 15: 0](RO/H) BIST status.
+ Internal:
+ \<0\> = mio.mio_boot.mio_boot_fifo.mem.
+ \<1\> = mio.mio_boot.mio_boot_rsp.mem.
+ \<2\> = mio.mio_boot.mio_boot_dma.mem.
+ \<3\> = mio.mio_nbt.mio_nbt_fifo.mem.
+ \<4\> = mio.mio_emm.bufs.mem.mem.
+ \<5\> = mio.mio_nbr.ncbo_crd_fif_mem0.
+ \<6\> = mio.csr.csr_fifo.
+ \<7\> = mio_boot_rom/mio_boot_rom1 (internal bootroms).
+ \<8\> = mio.mio_rsl.mio_pcc.gpi_pcc.
+ \<9\> = mio.mio_pbus.mio_pbus_fifo.mem.
+ \<10\> = mio.mio_pbus.mio_pbus_rsp.mem.
+ \<11\> = mio.mio_ndf.mio_ndf_bufs.ndf_buf.
+ \<12\> = mio.mio_ndf.ndf_ncb_rcv_fif.
+ \<13\> = mio.mio_pcm.mio_pcm_ctl.dmamem.
+ \<14\> = mio.mio_pcm.mio_pcm_ctl.maskmem.
+ \<15\> = mio.mio_nbt.mio_nbt_pfifo.mem. */
+#else /* Word 0 - Little Endian */
+ uint64_t stat : 16; /**< [ 15: 0](RO/H) BIST status.
+ Internal:
+ \<0\> = mio.mio_boot.mio_boot_fifo.mem.
+ \<1\> = mio.mio_boot.mio_boot_rsp.mem.
+ \<2\> = mio.mio_boot.mio_boot_dma.mem.
+ \<3\> = mio.mio_nbt.mio_nbt_fifo.mem.
+ \<4\> = mio.mio_emm.bufs.mem.mem.
+ \<5\> = mio.mio_nbr.ncbo_crd_fif_mem0.
+ \<6\> = mio.csr.csr_fifo.
+ \<7\> = mio_boot_rom/mio_boot_rom1 (internal bootroms).
+ \<8\> = mio.mio_rsl.mio_pcc.gpi_pcc.
+ \<9\> = mio.mio_pbus.mio_pbus_fifo.mem.
+ \<10\> = mio.mio_pbus.mio_pbus_rsp.mem.
+ \<11\> = mio.mio_ndf.mio_ndf_bufs.ndf_buf.
+ \<12\> = mio.mio_ndf.ndf_ncb_rcv_fif.
+ \<13\> = mio.mio_pcm.mio_pcm_ctl.dmamem.
+ \<14\> = mio.mio_pcm.mio_pcm_ctl.maskmem.
+ \<15\> = mio.mio_nbt.mio_nbt_pfifo.mem. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_boot_bist_stat_s cn81xx; */
+ struct bdk_mio_boot_bist_stat_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t stat : 9; /**< [ 8: 0](RO/H) BIST status.
+ Internal:
+ \<0\> = mio.mio_boot.mio_boot_fifo.mem.
+ \<1\> = mio.mio_boot.mio_boot_rsp.mem.
+ \<2\> = mio.mio_boot.mio_boot_dma.mem.
+ \<3\> = mio.mio_nbt.mio_nbt_fifo.mem.
+ \<4\> = mio.mio_emm.bufs.mem.mem.
+ \<5\> = mio.mio_nbr.ncbo_crd_fif_mem0.
+ \<6\> = mio.csr.csr_fifo.
+ \<7\> = mio_boot_rom/mio_boot_rom1 (internal bootroms).
+ \<8\> = mio.mio_rsl.mio_pcc.gpi_pcc. */
+#else /* Word 0 - Little Endian */
+ uint64_t stat : 9; /**< [ 8: 0](RO/H) BIST status.
+ Internal:
+ \<0\> = mio.mio_boot.mio_boot_fifo.mem.
+ \<1\> = mio.mio_boot.mio_boot_rsp.mem.
+ \<2\> = mio.mio_boot.mio_boot_dma.mem.
+ \<3\> = mio.mio_nbt.mio_nbt_fifo.mem.
+ \<4\> = mio.mio_emm.bufs.mem.mem.
+ \<5\> = mio.mio_nbr.ncbo_crd_fif_mem0.
+ \<6\> = mio.csr.csr_fifo.
+ \<7\> = mio_boot_rom/mio_boot_rom1 (internal bootroms).
+ \<8\> = mio.mio_rsl.mio_pcc.gpi_pcc. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_mio_boot_bist_stat_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t stat : 13; /**< [ 12: 0](RO/H) BIST status.
+ Internal:
+ \<0\> = mio.mio_boot.mio_boot_fifo.mem.
+ \<1\> = mio.mio_boot.mio_boot_rsp.mem.
+ \<2\> = mio.mio_boot.mio_boot_dma.mem.
+ \<3\> = mio.mio_nbt.mio_nbt_fifo.mem.
+ \<4\> = mio.mio_emm.bufs.mem.mem.
+ \<5\> = mio.mio_nbr.ncbo_crd_fif_mem0.
+ \<6\> = mio.csr.csr_fifo.
+ \<7\> = mio_boot_rom/mio_boot_rom1 (internal bootroms).
+ \<8\> = mio.mio_rsl.mio_pcc.gpi_pcc.
+ \<9\> = mio.mio_pbus.mio_pbus_fifo.mem.
+ \<10\> = mio.mio_pbus.mio_pbus_rsp.mem.
+ \<11\> = mio.mio_ndf.mio_ndf_bufs.ndf_buf.
+ \<12\> = mio.mio_ndf.ndf_ncb_rcv_fif. */
+#else /* Word 0 - Little Endian */
+ uint64_t stat : 13; /**< [ 12: 0](RO/H) BIST status.
+ Internal:
+ \<0\> = mio.mio_boot.mio_boot_fifo.mem.
+ \<1\> = mio.mio_boot.mio_boot_rsp.mem.
+ \<2\> = mio.mio_boot.mio_boot_dma.mem.
+ \<3\> = mio.mio_nbt.mio_nbt_fifo.mem.
+ \<4\> = mio.mio_emm.bufs.mem.mem.
+ \<5\> = mio.mio_nbr.ncbo_crd_fif_mem0.
+ \<6\> = mio.csr.csr_fifo.
+ \<7\> = mio_boot_rom/mio_boot_rom1 (internal bootroms).
+ \<8\> = mio.mio_rsl.mio_pcc.gpi_pcc.
+ \<9\> = mio.mio_pbus.mio_pbus_fifo.mem.
+ \<10\> = mio.mio_pbus.mio_pbus_rsp.mem.
+ \<11\> = mio.mio_ndf.mio_ndf_bufs.ndf_buf.
+ \<12\> = mio.mio_ndf.ndf_ncb_rcv_fif. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_mio_boot_bist_stat bdk_mio_boot_bist_stat_t;
+
+#define BDK_MIO_BOOT_BIST_STAT BDK_MIO_BOOT_BIST_STAT_FUNC()
+static inline uint64_t BDK_MIO_BOOT_BIST_STAT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_BOOT_BIST_STAT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0000000f8ll;
+ __bdk_csr_fatal("MIO_BOOT_BIST_STAT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_BOOT_BIST_STAT bdk_mio_boot_bist_stat_t
+#define bustype_BDK_MIO_BOOT_BIST_STAT BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_BOOT_BIST_STAT "MIO_BOOT_BIST_STAT"
+#define device_bar_BDK_MIO_BOOT_BIST_STAT 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_BOOT_BIST_STAT 0
+#define arguments_BDK_MIO_BOOT_BIST_STAT -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_boot_pin_defs
+ *
+ * MIO Boot Pin Defaults Register
+ * This register reflects the value of some of the pins sampled
+ * at the rising edge of PLL_DCOK. The GPIO pins sampled at
+ * the same time are available in the GPIO_STRAP csr.
+ */
+union bdk_mio_boot_pin_defs
+{
+ uint64_t u;
+ struct bdk_mio_boot_pin_defs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t uart1_rts : 1; /**< [ 17: 17](RO) State of UART1_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t uart0_rts : 1; /**< [ 16: 16](RO) State of UART0_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t reserved_11_15 : 5;
+ uint64_t rgm_supply : 2; /**< [ 10: 9](RO) RGMII power supply setting based on VDD_RGM_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ _ All other values reserved. */
+ uint64_t emm_supply : 3; /**< [ 8: 6](RO) EMMC power supply settings.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t smi_supply : 3; /**< [ 5: 3](RO) SMI power supply setting based on VDD_SMI_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t io_supply : 3; /**< [ 2: 0](RO) I/O power supply setting based on VDD_IO_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t io_supply : 3; /**< [ 2: 0](RO) I/O power supply setting based on VDD_IO_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t smi_supply : 3; /**< [ 5: 3](RO) SMI power supply setting based on VDD_SMI_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t emm_supply : 3; /**< [ 8: 6](RO) EMMC power supply settings.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t rgm_supply : 2; /**< [ 10: 9](RO) RGMII power supply setting based on VDD_RGM_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ _ All other values reserved. */
+ uint64_t reserved_11_15 : 5;
+ uint64_t uart0_rts : 1; /**< [ 16: 16](RO) State of UART0_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t uart1_rts : 1; /**< [ 17: 17](RO) State of UART1_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_boot_pin_defs_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t uart1_rts : 1; /**< [ 17: 17](RO) State of UART1_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t uart0_rts : 1; /**< [ 16: 16](RO) Reference clock select. State of UART0_RTS_N pin sampled when DCOK asserts.
+ 0 = Board supplies 100 MHz to DLM_REF_CLK\<1\> (divided by 2 internally).
+ 1 = Board supplies 50 MHz to PLL_REFCLK. */
+ uint64_t reserved_11_15 : 5;
+ uint64_t rgm_supply : 2; /**< [ 10: 9](RO) RGMII power supply setting based on VDD_RGM_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ _ All other values reserved. */
+ uint64_t emm_supply : 3; /**< [ 8: 6](RO) EMMC power supply settings.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t smi_supply : 3; /**< [ 5: 3](RO) SMI power supply setting based on VDD_SMI_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t io_supply : 3; /**< [ 2: 0](RO) I/O power supply setting based on VDD_IO_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t io_supply : 3; /**< [ 2: 0](RO) I/O power supply setting based on VDD_IO_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t smi_supply : 3; /**< [ 5: 3](RO) SMI power supply setting based on VDD_SMI_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t emm_supply : 3; /**< [ 8: 6](RO) EMMC power supply settings.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t rgm_supply : 2; /**< [ 10: 9](RO) RGMII power supply setting based on VDD_RGM_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ _ All other values reserved. */
+ uint64_t reserved_11_15 : 5;
+ uint64_t uart0_rts : 1; /**< [ 16: 16](RO) Reference clock select. State of UART0_RTS_N pin sampled when DCOK asserts.
+ 0 = Board supplies 100 MHz to DLM_REF_CLK\<1\> (divided by 2 internally).
+ 1 = Board supplies 50 MHz to PLL_REFCLK. */
+ uint64_t uart1_rts : 1; /**< [ 17: 17](RO) State of UART1_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_mio_boot_pin_defs_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t smi_supply : 3; /**< [ 5: 3](RO) SMI power supply setting based on VDD_SMI_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t io_supply : 3; /**< [ 2: 0](RO) I/O power supply setting based on VDD_IO_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t io_supply : 3; /**< [ 2: 0](RO) I/O power supply setting based on VDD_IO_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t smi_supply : 3; /**< [ 5: 3](RO) SMI power supply setting based on VDD_SMI_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_mio_boot_pin_defs_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t uart1_rts : 1; /**< [ 17: 17](RO) State of UART1_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t uart0_rts : 1; /**< [ 16: 16](RO) State of UART0_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t emm_supply : 3; /**< [ 8: 6](RO) EMMC power supply settings.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t smi_supply : 3; /**< [ 5: 3](RO) SMI power supply setting based on VDD_SMI_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t io_supply : 3; /**< [ 2: 0](RO) I/O power supply setting based on VDD_IO_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t io_supply : 3; /**< [ 2: 0](RO) I/O power supply setting based on VDD_IO_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t smi_supply : 3; /**< [ 5: 3](RO) SMI power supply setting based on VDD_SMI_SUPPLY_SELECT pin:
+ 0x2 = 2.5V.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t emm_supply : 3; /**< [ 8: 6](RO) EMMC power supply settings.
+ 0x4 = 3.3V.
+ _ All other values reserved. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t uart0_rts : 1; /**< [ 16: 16](RO) State of UART0_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t uart1_rts : 1; /**< [ 17: 17](RO) State of UART1_RTS_N pin strap sampled when DCOK asserts. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_mio_boot_pin_defs bdk_mio_boot_pin_defs_t;
+
+#define BDK_MIO_BOOT_PIN_DEFS BDK_MIO_BOOT_PIN_DEFS_FUNC()
+static inline uint64_t BDK_MIO_BOOT_PIN_DEFS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_BOOT_PIN_DEFS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0000000c0ll;
+ __bdk_csr_fatal("MIO_BOOT_PIN_DEFS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_BOOT_PIN_DEFS bdk_mio_boot_pin_defs_t
+#define bustype_BDK_MIO_BOOT_PIN_DEFS BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_BOOT_PIN_DEFS "MIO_BOOT_PIN_DEFS"
+#define device_bar_BDK_MIO_BOOT_PIN_DEFS 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_BOOT_PIN_DEFS 0
+#define arguments_BDK_MIO_BOOT_PIN_DEFS -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_boot_rom_limit
+ *
+ * MIO Boot ROM Limit Register
+ * This register contains the largest valid address in the internal bootrom.
+ */
+union bdk_mio_boot_rom_limit
+{
+ uint64_t u;
+ struct bdk_mio_boot_rom_limit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ uint64_t addr : 17; /**< [ 16: 0](SR/W/H) End of ROM address. This field specifies the first invalid address in ROM_MEM();
+ access to a ROM_MEM() address at or above this address will return a fault and zeros.
+ Writes to this register which attempt to set an [ADDR] greater than the previous [ADDR]
+ setting are ignored.
+
+ Internal:
+ If trusted boot mode, resets to the size of the internal
+ bootrom (0x4000), ROM boot instructions may then write to decrease the value. If
+ non-trusted boot resets to a value read from ROM_MEM() at address MAX - 12, just
+ before the CRC. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 17; /**< [ 16: 0](SR/W/H) End of ROM address. This field specifies the first invalid address in ROM_MEM();
+ access to a ROM_MEM() address at or above this address will return a fault and zeros.
+ Writes to this register which attempt to set an [ADDR] greater than the previous [ADDR]
+ setting are ignored.
+
+ Internal:
+ If trusted boot mode, resets to the size of the internal
+ bootrom (0x4000), ROM boot instructions may then write to decrease the value. If
+ non-trusted boot resets to a value read from ROM_MEM() at address MAX - 12, just
+ before the CRC. */
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_boot_rom_limit_s cn; */
+};
+typedef union bdk_mio_boot_rom_limit bdk_mio_boot_rom_limit_t;
+
+#define BDK_MIO_BOOT_ROM_LIMIT BDK_MIO_BOOT_ROM_LIMIT_FUNC()
+static inline uint64_t BDK_MIO_BOOT_ROM_LIMIT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_BOOT_ROM_LIMIT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0000000d8ll;
+ __bdk_csr_fatal("MIO_BOOT_ROM_LIMIT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_BOOT_ROM_LIMIT bdk_mio_boot_rom_limit_t
+#define bustype_BDK_MIO_BOOT_ROM_LIMIT BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_BOOT_ROM_LIMIT "MIO_BOOT_ROM_LIMIT"
+#define device_bar_BDK_MIO_BOOT_ROM_LIMIT 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_BOOT_ROM_LIMIT 0
+#define arguments_BDK_MIO_BOOT_ROM_LIMIT -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_boot_thr
+ *
+ * MIO Boot Threshold Register
+ * The boot-threshold register contains MIO boot-threshold values.
+ */
+union bdk_mio_boot_thr
+{
+ uint64_t u;
+ struct bdk_mio_boot_thr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t fif_cnt : 6; /**< [ 13: 8](RO/H) Current FIFO count. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fif_thr : 6; /**< [ 5: 0](R/W) NCB busy threshold. Should always read 0x19 (the only legal value). */
+#else /* Word 0 - Little Endian */
+ uint64_t fif_thr : 6; /**< [ 5: 0](R/W) NCB busy threshold. Should always read 0x19 (the only legal value). */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fif_cnt : 6; /**< [ 13: 8](RO/H) Current FIFO count. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_boot_thr_s cn81xx; */
+ struct bdk_mio_boot_thr_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t fif_cnt : 6; /**< [ 13: 8](RO/H) Current NCB FIFO count. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fif_thr : 6; /**< [ 5: 0](R/W) NCB busy threshold. Should always read 0x19 (the only legal value). */
+#else /* Word 0 - Little Endian */
+ uint64_t fif_thr : 6; /**< [ 5: 0](R/W) NCB busy threshold. Should always read 0x19 (the only legal value). */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fif_cnt : 6; /**< [ 13: 8](RO/H) Current NCB FIFO count. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_mio_boot_thr_s cn83xx; */
+};
+typedef union bdk_mio_boot_thr bdk_mio_boot_thr_t;
+
+#define BDK_MIO_BOOT_THR BDK_MIO_BOOT_THR_FUNC()
+static inline uint64_t BDK_MIO_BOOT_THR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_BOOT_THR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0000000b0ll;
+ __bdk_csr_fatal("MIO_BOOT_THR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_BOOT_THR bdk_mio_boot_thr_t
+#define bustype_BDK_MIO_BOOT_THR BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_BOOT_THR "MIO_BOOT_THR"
+#define device_bar_BDK_MIO_BOOT_THR 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_BOOT_THR 0
+#define arguments_BDK_MIO_BOOT_THR -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_MIO_BOOT_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h
new file mode 100644
index 0000000000..387a3937dd
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h
@@ -0,0 +1,2365 @@
+#ifndef __BDK_CSRS_MIO_FUS_H__
+#define __BDK_CSRS_MIO_FUS_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium MIO_FUS.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration mio_fus_bar_e
+ *
+ * MIO FUS Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_MIO_FUS_BAR_E_MIO_FUS_PF_BAR0 (0x87e003000000ll)
+#define BDK_MIO_FUS_BAR_E_MIO_FUS_PF_BAR0_SIZE 0x800000ull
+
+/**
+ * Enumeration mio_fus_fuse_num_e
+ *
+ * INTERNAL: MIO FUS Fuse Number Enumeration
+ *
+ * Enumerates the fuse numbers.
+ */
+#define BDK_MIO_FUS_FUSE_NUM_E_BAR2_SZ_CONF (0x54)
+#define BDK_MIO_FUS_FUSE_NUM_E_BGX2_DIS (0xe5)
+#define BDK_MIO_FUS_FUSE_NUM_E_BGX3_DIS (0xe6)
+#define BDK_MIO_FUS_FUSE_NUM_E_BGX_DISX(a) (0x6c + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(a) (0x40 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_CMB_RCLK_BYP_SELECT (0x266)
+#define BDK_MIO_FUS_FUSE_NUM_E_CMB_RCLK_BYP_SETTINGX(a) (0x25a + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_CORE_PLL_MULX(a) (0x7b + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(a) (0x684 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(a) (0x6b4 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(a) (0x680 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_DDF_DIS (0xe4)
+#define BDK_MIO_FUS_FUSE_NUM_E_DESX(a) (0x3c0 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_DFA_INFO_CLMX(a) (0x5e + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_DFA_INFO_DTEX(a) (0x5b + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_DORM_CRYPTO (0x67)
+#define BDK_MIO_FUS_FUSE_NUM_E_EAST_RCLK_BYP_SELECT (0x273)
+#define BDK_MIO_FUS_FUSE_NUM_E_EAST_RCLK_BYP_SETTINGX(a) (0x267 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_EFUS_IGN (0x53)
+#define BDK_MIO_FUS_FUSE_NUM_E_EFUS_LCK_DES (0x6a)
+#define BDK_MIO_FUS_FUSE_NUM_E_EFUS_LCK_MAN (0x69)
+#define BDK_MIO_FUS_FUSE_NUM_E_EFUS_LCK_PRD (0x68)
+#define BDK_MIO_FUS_FUSE_NUM_E_EMA0X(a) (0x80 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_EMA1X(a) (0x8e + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_GBL_PWR_THROTTLEX(a) (0xaf + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_GLOBAL_RCLK_BYP_SELECT (0xa0)
+#define BDK_MIO_FUS_FUSE_NUM_E_GLOBAL_RCLK_BYP_SETTINGX(a) (0x94 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_GLOBAL_SCLK_BYP_SELECT (0xe2)
+#define BDK_MIO_FUS_FUSE_NUM_E_GLOBAL_SCLK_BYP_SETTINGX(a) (0xd6 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_GSERX(a) (0x400 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(a) (0x57 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS (0x76)
+#define BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF (0x4b)
+#define BDK_MIO_FUS_FUSE_NUM_E_MFG0X(a) (0x280 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_MFG1X(a) (0x2c0 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_MFG2X(a) (0x600 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_NOCRYPTO (0x50)
+#define BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2 (0x5a)
+#define BDK_MIO_FUS_FUSE_NUM_E_NOMUL (0x51)
+#define BDK_MIO_FUS_FUSE_NUM_E_NOZIP (0x52)
+#define BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS (0x6b)
+#define BDK_MIO_FUS_FUSE_NUM_E_PCI_NITROX (0xe7)
+#define BDK_MIO_FUS_FUSE_NUM_E_PDFX(a) (0x340 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(a) (0x72 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_PLL_ALT_MATRIX (0x4a)
+#define BDK_MIO_FUS_FUSE_NUM_E_PLL_BWADJ_DENOMX(a) (0x48 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_PLL_HALF_DIS (0x75)
+#define BDK_MIO_FUS_FUSE_NUM_E_PNAMEX(a) (0x580 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_PNR_PLL_MULX(a) (0x77 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_POWER_LIMITX(a) (0x64 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_PP_AVAILABLEX(a) (0 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_PP_RCLK_BYP_SELECT (0x259)
+#define BDK_MIO_FUS_FUSE_NUM_E_PP_RCLK_BYP_SETTINGX(a) (0x24d + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RAID_DIS (0x66)
+#define BDK_MIO_FUS_FUSE_NUM_E_REDUNDANTX(a) (0x780 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_REFCLK_CHECK_CN81XX (0xc0)
+#define BDK_MIO_FUS_FUSE_NUM_E_REFCLK_CHECK_CN88XX (0xc2)
+#define BDK_MIO_FUS_FUSE_NUM_E_REFCLK_CHECK_CN83XX (0xc0)
+#define BDK_MIO_FUS_FUSE_NUM_E_REPAIRX(a) (0x800 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_ROC_RCLK_BYP_SELECT (0xae)
+#define BDK_MIO_FUS_FUSE_NUM_E_ROC_RCLK_BYP_SETTINGX(a) (0xa2 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_ROC_SCLK_BYP_SELECT (0xd5)
+#define BDK_MIO_FUS_FUSE_NUM_E_ROC_SCLK_BYP_SETTINGX(a) (0xc9 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_ROM_INFOX(a) (0x276 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVDX(a) (0xc6 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(a) (0x86 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD1600X(a) (0x640 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD1737X(a) (0x6c9 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD1764X(a) (0x6e4 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD183X(a) (0xb7 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD189X(a) (0xbd + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD193X(a) (0xc1 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD228X(a) (0xe4 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD231X(a) (0xe7 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD232X(a) (0xe8 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD3056X(a) (0xbf0 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD570X(a) (0x23a + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD628X(a) (0x274 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD76X(a) (0x4c + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD91X(a) (0x5b + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD94X(a) (0x5e + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD98 (0x62)
+#define BDK_MIO_FUS_FUSE_NUM_E_RSVD99 (0x63)
+#define BDK_MIO_FUS_FUSE_NUM_E_RUN_PLATFORMX(a) (0xc3 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(a) (0x6e + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_SERIALX(a) (0x380 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_TAD_RCLK_BYP_SELECT (0x24c)
+#define BDK_MIO_FUS_FUSE_NUM_E_TAD_RCLK_BYP_SETTINGX(a) (0x240 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_TGGX(a) (0x300 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_TIM_DIS (0xe3)
+#define BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE (0xa1)
+#define BDK_MIO_FUS_FUSE_NUM_E_TSENSE0X(a) (0x100 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_TSENSE1X(a) (0x680 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_TZ_MODE (0x4f)
+#define BDK_MIO_FUS_FUSE_NUM_E_USE_INT_REFCLK (0x4e)
+#define BDK_MIO_FUS_FUSE_NUM_E_VRMX(a) (0x200 + (a))
+#define BDK_MIO_FUS_FUSE_NUM_E_ZIP_INFOX(a) (0x55 + (a))
+
+/**
+ * Register (RSL) mio_fus_bnk_dat#
+ *
+ * MIO Fuse Bank Store Register
+ * The initial state of MIO_FUS_BNK_DAT(0..1) is as if bank6 were just read,
+ * i.e. DAT* = fus[895:768].
+ */
+union bdk_mio_fus_bnk_datx
+{
+ uint64_t u;
+ struct bdk_mio_fus_bnk_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W/H) Efuse bank store. For read operations, the DAT gets the fus bank last read. For write
+ operations, the DAT determines which fuses to blow. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W/H) Efuse bank store. For read operations, the DAT gets the fus bank last read. For write
+ operations, the DAT determines which fuses to blow. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_bnk_datx_s cn; */
+};
+typedef union bdk_mio_fus_bnk_datx bdk_mio_fus_bnk_datx_t;
+
+static inline uint64_t BDK_MIO_FUS_BNK_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_BNK_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=1))
+ return 0x87e003001520ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("MIO_FUS_BNK_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_BNK_DATX(a) bdk_mio_fus_bnk_datx_t
+#define bustype_BDK_MIO_FUS_BNK_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_BNK_DATX(a) "MIO_FUS_BNK_DATX"
+#define device_bar_BDK_MIO_FUS_BNK_DATX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_BNK_DATX(a) (a)
+#define arguments_BDK_MIO_FUS_BNK_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_dat0
+ *
+ * MIO Fuse Data Register 0
+ */
+union bdk_mio_fus_dat0
+{
+ uint64_t u;
+ struct bdk_mio_fus_dat0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t man_info : 32; /**< [ 31: 0](RO) Fuse information - manufacturing info [31:0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t man_info : 32; /**< [ 31: 0](RO) Fuse information - manufacturing info [31:0]. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_dat0_s cn; */
+};
+typedef union bdk_mio_fus_dat0 bdk_mio_fus_dat0_t;
+
+#define BDK_MIO_FUS_DAT0 BDK_MIO_FUS_DAT0_FUNC()
+static inline uint64_t BDK_MIO_FUS_DAT0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_DAT0_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001400ll;
+ __bdk_csr_fatal("MIO_FUS_DAT0", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_DAT0 bdk_mio_fus_dat0_t
+#define bustype_BDK_MIO_FUS_DAT0 BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_DAT0 "MIO_FUS_DAT0"
+#define device_bar_BDK_MIO_FUS_DAT0 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_DAT0 0
+#define arguments_BDK_MIO_FUS_DAT0 -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_dat1
+ *
+ * MIO Fuse Data Register 1
+ */
+union bdk_mio_fus_dat1
+{
+ uint64_t u;
+ struct bdk_mio_fus_dat1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t man_info : 32; /**< [ 31: 0](RO) Fuse information - manufacturing info [63:32]. */
+#else /* Word 0 - Little Endian */
+ uint64_t man_info : 32; /**< [ 31: 0](RO) Fuse information - manufacturing info [63:32]. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_dat1_s cn; */
+};
+typedef union bdk_mio_fus_dat1 bdk_mio_fus_dat1_t;
+
+#define BDK_MIO_FUS_DAT1 BDK_MIO_FUS_DAT1_FUNC()
+static inline uint64_t BDK_MIO_FUS_DAT1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_DAT1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001408ll;
+ __bdk_csr_fatal("MIO_FUS_DAT1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_DAT1 bdk_mio_fus_dat1_t
+#define bustype_BDK_MIO_FUS_DAT1 BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_DAT1 "MIO_FUS_DAT1"
+#define device_bar_BDK_MIO_FUS_DAT1 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_DAT1 0
+#define arguments_BDK_MIO_FUS_DAT1 -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_dat2
+ *
+ * MIO Fuse Data Register 2
+ */
+union bdk_mio_fus_dat2
+{
+ uint64_t u;
+ struct bdk_mio_fus_dat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t gbl_pwr_throttle : 8; /**< [ 55: 48](RO) Controls global power throttling. MSB is a spare, and lower 7 bits indicate
+ N/128 power reduction. Small values have less throttling and higher
+ performance. 0x0 disables throttling. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t reserved_31 : 1;
+ uint64_t lmc_mode32 : 1; /**< [ 30: 30](RO) DRAM controller is limited to 32/36 bit wide parts.
+ Internal:
+ 30 = fuse[75]. */
+ uint64_t reserved_29 : 1;
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t reserved_24 : 1;
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ 0x0 = CN81xx-xxxx-BG676.
+ 0x1 = CN80xx-xxxx-BG555.
+ 0x2 = CN80xx-xxxx-BG676.
+ 0x3 = Reserved.
+
+ \<21:19\> = Major revision.
+
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Fuse information - OCX disable. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0-3 disable.
+ \<10\> = SATA4-7 disable.
+ \<11\> = SATA8-11 disable.
+ \<12\> = SATA12-15 disable. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0-1 disable.
+ \<7\> = PEM2-3 disable
+ \<8\> = PEM4-5 disable. */
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC uses two channels rather than four. */
+ uint64_t tim_dis : 1; /**< [ 4: 4](RO) Fuse information TIM disable. */
+ uint64_t bgx3_dis : 1; /**< [ 3: 3](RO) Fuse information BGX3 disable. */
+ uint64_t bgx2_dis : 1; /**< [ 2: 2](RO) Fuse information BGX2 disable. */
+ uint64_t ddf_dis : 1; /**< [ 1: 1](RO) Fuse information DDF disable. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t ddf_dis : 1; /**< [ 1: 1](RO) Fuse information DDF disable. */
+ uint64_t bgx2_dis : 1; /**< [ 2: 2](RO) Fuse information BGX2 disable. */
+ uint64_t bgx3_dis : 1; /**< [ 3: 3](RO) Fuse information BGX3 disable. */
+ uint64_t tim_dis : 1; /**< [ 4: 4](RO) Fuse information TIM disable. */
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC uses two channels rather than four. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0-1 disable.
+ \<7\> = PEM2-3 disable
+ \<8\> = PEM4-5 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0-3 disable.
+ \<10\> = SATA4-7 disable.
+ \<11\> = SATA8-11 disable.
+ \<12\> = SATA12-15 disable. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Fuse information - OCX disable. */
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ 0x0 = CN81xx-xxxx-BG676.
+ 0x1 = CN80xx-xxxx-BG555.
+ 0x2 = CN80xx-xxxx-BG676.
+ 0x3 = Reserved.
+
+ \<21:19\> = Major revision.
+
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t reserved_24 : 1;
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t reserved_29 : 1;
+ uint64_t lmc_mode32 : 1; /**< [ 30: 30](RO) DRAM controller is limited to 32/36 bit wide parts.
+ Internal:
+ 30 = fuse[75]. */
+ uint64_t reserved_31 : 1;
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t gbl_pwr_throttle : 8; /**< [ 55: 48](RO) Controls global power throttling. MSB is a spare, and lower 7 bits indicate
+ N/128 power reduction. Small values have less throttling and higher
+ performance. 0x0 disables throttling. */
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_fus_dat2_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t reserved_48_55 : 8;
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t reserved_24 : 1;
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ \<21:19\> = Major revision.
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Fuse information - OCX disable. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0-3 disable.
+ \<10\> = SATA4-7 disable.
+ \<11\> = SATA8-11 disable.
+ \<12\> = SATA12-15 disable. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0-1 disable.
+ \<7\> = PEM2-3 disable
+ \<8\> = PEM4-5 disable. */
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC uses two channels rather than four. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC uses two channels rather than four. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0-1 disable.
+ \<7\> = PEM2-3 disable
+ \<8\> = PEM4-5 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0-3 disable.
+ \<10\> = SATA4-7 disable.
+ \<11\> = SATA8-11 disable.
+ \<12\> = SATA12-15 disable. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Fuse information - OCX disable. */
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ \<21:19\> = Major revision.
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t reserved_24 : 1;
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t reserved_29_31 : 3;
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t reserved_48_55 : 8;
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_mio_fus_dat2_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t gbl_pwr_throttle : 8; /**< [ 55: 48](RO) Reserved.
+ Internal:
+ Controls global power throttling. MSB is a spare, and lower 7 bits indicate
+ N/128 power reduction. Small values have less throttling and higher
+ performance. 0x0 disables throttling. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t reserved_31 : 1;
+ uint64_t lmc_mode32 : 1; /**< [ 30: 30](RO) DRAM controller is limited to 32/36 bit wide parts. In CN80XX always set.
+ Internal:
+ 30 = fuse[75]. */
+ uint64_t reserved_29 : 1;
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t reserved_24 : 1;
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ 0x0 = CN81xx-xxxx-BG676.
+ 0x1 = CN80xx-xxxx-BG555.
+ 0x2 = CN80xx-xxxx-BG676.
+ 0x3 = Reserved.
+
+ \<21:19\> = Major revision.
+
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Reserved. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0 disable.
+ \<10\> = SATA1 disable.
+ \<11\> = Reserved.
+ \<12\> = Reserved. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0 disable.
+ \<7\> = PEM1 disable
+ \<8\> = PEM2 disable. */
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC1 disabled. LMC1 not present in CN80XX/CN81XX, so clear. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC1 disabled. LMC1 not present in CN80XX/CN81XX, so clear. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0 disable.
+ \<7\> = PEM1 disable
+ \<8\> = PEM2 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0 disable.
+ \<10\> = SATA1 disable.
+ \<11\> = Reserved.
+ \<12\> = Reserved. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Reserved. */
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ 0x0 = CN81xx-xxxx-BG676.
+ 0x1 = CN80xx-xxxx-BG555.
+ 0x2 = CN80xx-xxxx-BG676.
+ 0x3 = Reserved.
+
+ \<21:19\> = Major revision.
+
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t reserved_24 : 1;
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t reserved_29 : 1;
+ uint64_t lmc_mode32 : 1; /**< [ 30: 30](RO) DRAM controller is limited to 32/36 bit wide parts. In CN80XX always set.
+ Internal:
+ 30 = fuse[75]. */
+ uint64_t reserved_31 : 1;
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t gbl_pwr_throttle : 8; /**< [ 55: 48](RO) Reserved.
+ Internal:
+ Controls global power throttling. MSB is a spare, and lower 7 bits indicate
+ N/128 power reduction. Small values have less throttling and higher
+ performance. 0x0 disables throttling. */
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_mio_fus_dat2_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t gbl_pwr_throttle : 8; /**< [ 55: 48](RO) Reserved.
+ Internal:
+ Controls global power throttling. MSB is a spare, and lower 7 bits indicate
+ N/128 power reduction. Small values have less throttling and higher
+ performance. 0x0 disables throttling. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t reserved_31 : 1;
+ uint64_t lmc_mode32 : 1; /**< [ 30: 30](RO) DRAM controller is limited to 32/36 bit wide parts.
+ Internal:
+ 30 = fuse[75]. */
+ uint64_t reserved_29 : 1;
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t reserved_24 : 1;
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ \<21:19\> = Major revision.
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Reserved. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0-1 disable.
+ \<10\> = SATA2-3 disable.
+ \<11\> = SATA4-5 disable.
+ \<12\> = Reserved. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0-1 disable.
+ \<7\> = PEM2 disable
+ \<8\> = PEM3 disable. */
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC1 disabled. */
+ uint64_t tim_dis : 1; /**< [ 4: 4](RO) Fuse information TIM disable. */
+ uint64_t bgx3_dis : 1; /**< [ 3: 3](RO) Fuse information BGX3 disable. */
+ uint64_t bgx2_dis : 1; /**< [ 2: 2](RO) Fuse information BGX2 disable. */
+ uint64_t ddf_dis : 1; /**< [ 1: 1](RO) Fuse information DDF disable. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t ddf_dis : 1; /**< [ 1: 1](RO) Fuse information DDF disable. */
+ uint64_t bgx2_dis : 1; /**< [ 2: 2](RO) Fuse information BGX2 disable. */
+ uint64_t bgx3_dis : 1; /**< [ 3: 3](RO) Fuse information BGX3 disable. */
+ uint64_t tim_dis : 1; /**< [ 4: 4](RO) Fuse information TIM disable. */
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC1 disabled. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0-1 disable.
+ \<7\> = PEM2 disable
+ \<8\> = PEM3 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0-1 disable.
+ \<10\> = SATA2-3 disable.
+ \<11\> = SATA4-5 disable.
+ \<12\> = Reserved. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Reserved. */
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ \<21:19\> = Major revision.
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t reserved_24 : 1;
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t reserved_29 : 1;
+ uint64_t lmc_mode32 : 1; /**< [ 30: 30](RO) DRAM controller is limited to 32/36 bit wide parts.
+ Internal:
+ 30 = fuse[75]. */
+ uint64_t reserved_31 : 1;
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t gbl_pwr_throttle : 8; /**< [ 55: 48](RO) Reserved.
+ Internal:
+ Controls global power throttling. MSB is a spare, and lower 7 bits indicate
+ N/128 power reduction. Small values have less throttling and higher
+ performance. 0x0 disables throttling. */
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_mio_fus_dat2_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t gbl_pwr_throttle : 8; /**< [ 55: 48](RO) Controls global power throttling. MSB is a spare, and lower 7 bits indicate
+ N/128 power reduction. Small values have less throttling and higher
+ performance. 0x0 disables throttling. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t reserved_24 : 1;
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ \<21:19\> = Major revision.
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Fuse information - OCX disable. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0-3 disable.
+ \<10\> = SATA4-7 disable.
+ \<11\> = SATA8-11 disable.
+ \<12\> = SATA12-15 disable. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0-1 disable.
+ \<7\> = PEM2-3 disable
+ \<8\> = PEM4-5 disable. */
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC uses two channels rather than four. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t lmc_half : 1; /**< [ 5: 5](RO) Fuse information - LMC uses two channels rather than four. */
+ uint64_t pem_dis : 3; /**< [ 8: 6](RO) Fuse information - PEM disable:
+ \<6\> = PEM0-1 disable.
+ \<7\> = PEM2-3 disable
+ \<8\> = PEM4-5 disable. */
+ uint64_t sata_dis : 4; /**< [ 12: 9](RO) Fuse information - SATA disable:
+ \<9\> = SATA0-3 disable.
+ \<10\> = SATA4-7 disable.
+ \<11\> = SATA8-11 disable.
+ \<12\> = SATA12-15 disable. */
+ uint64_t bgx_dis : 2; /**< [ 14: 13](RO) Fuse information - BGX disable:
+ \<13\> = BGX0 disable.
+ \<14\> = BGX1 disable. */
+ uint64_t ocx_dis : 1; /**< [ 15: 15](RO) Fuse information - OCX disable. */
+ uint64_t chip_id : 8; /**< [ 23: 16](RO) Chip revision identifier.
+ \<23:22\> = Alternate package.
+ \<21:19\> = Major revision.
+ \<18:16\> = Minor revision.
+
+ For example:
+ \<pre\>
+ \<21:19\> \<18:16\> Description
+ ------- ------- -----------
+ 0x0 0x0 Pass 1.0.
+ 0x0 0x1 Pass 1.1.
+ 0x0 0x2 Pass 1.2.
+ 0x1 0x0 Pass 2.0.
+ 0x1 0x1 Pass 2.1.
+ 0x1 0x2 Pass 2.2.
+ ... ... ...
+ 0x7 0x7 Pass 8.8.
+ \</pre\> */
+ uint64_t reserved_24 : 1;
+ uint64_t trustzone_en : 1; /**< [ 25: 25](RO) Fuse information - TrustZone enable. */
+ uint64_t nocrypto : 1; /**< [ 26: 26](RO) Fuse information - [DORM_CRYPTO] and [NOCRYPTO] together select the crypto mode:
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 0: AES/SHA/PMULL enabled.
+
+ _ [DORM_CRYPTO] = 0, [NOCRYPTO] = 1: The AES, SHA, and PMULL 1D/2D instructions will
+ cause undefined exceptions, and AP_ID_AA64ISAR0_EL1[AES, SHA1, SHA2] are zero
+ indicating this behavior.
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 0: Dormant encryption enable. AES/SHA/PMULL are
+ disabled (as if [NOCRYPTO] = 1) until the appropriate key is written to
+ RNM_EER_KEY, then they are enabled (as if [NOCRYPTO] = 1).
+
+ _ [DORM_CRYPTO] = 1, [NOCRYPTO] = 1: Reserved. */
+ uint64_t nomul : 1; /**< [ 27: 27](RO) Fuse information - VMUL disable. */
+ uint64_t nodfa_cp2 : 1; /**< [ 28: 28](RO) Fuse information - HFA disable (CP2). */
+ uint64_t reserved_29_31 : 3;
+ uint64_t raid_en : 1; /**< [ 32: 32](RO) Fuse information - RAID enabled. */
+ uint64_t fus318 : 1; /**< [ 33: 33](RO) Reserved.
+ Internal:
+ Tied to 0. */
+ uint64_t dorm_crypto : 1; /**< [ 34: 34](RO) Fuse information - Dormant encryption enable. See NOCRYPTO. */
+ uint64_t power_limit : 2; /**< [ 36: 35](RO) Reserved.
+ Internal:
+ Fuse information - Power limit. */
+ uint64_t rom_info : 10; /**< [ 46: 37](RO) Fuse information - ROM info. */
+ uint64_t fus118 : 1; /**< [ 47: 47](RO) Reserved.
+ Internal:
+ fuse[99]. Fuse information - Ignore trusted-mode disable. */
+ uint64_t gbl_pwr_throttle : 8; /**< [ 55: 48](RO) Controls global power throttling. MSB is a spare, and lower 7 bits indicate
+ N/128 power reduction. Small values have less throttling and higher
+ performance. 0x0 disables throttling. */
+ uint64_t run_platform : 3; /**< [ 58: 56](RO) Fuses to indicate the run platform. Not to be blown in actual hardware.
+ Provides software a means of determining the platform at run time.
+ 0x0 = Hardware.
+ 0x1 = Emulator.
+ 0x2 = RTL simulator.
+ 0x3 = ASIM.
+ 0x4-0x7 = reserved. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_mio_fus_dat2 bdk_mio_fus_dat2_t;
+
+#define BDK_MIO_FUS_DAT2 BDK_MIO_FUS_DAT2_FUNC()
+static inline uint64_t BDK_MIO_FUS_DAT2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_DAT2_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001410ll;
+ __bdk_csr_fatal("MIO_FUS_DAT2", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_DAT2 bdk_mio_fus_dat2_t
+#define bustype_BDK_MIO_FUS_DAT2 BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_DAT2 "MIO_FUS_DAT2"
+#define device_bar_BDK_MIO_FUS_DAT2 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_DAT2 0
+#define arguments_BDK_MIO_FUS_DAT2 -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_dat3
+ *
+ * MIO Fuse Data3 Register
+ */
+union bdk_mio_fus_dat3
+{
+ uint64_t u;
+ struct bdk_mio_fus_dat3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ema0 : 6; /**< [ 63: 58](RO) Fuse information - EMA0.
+ Internal:
+ Default value is 0x11. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t pll_ctl : 10; /**< [ 57: 48](RO) Reserved. */
+ uint64_t dfa_info_dte : 3; /**< [ 47: 45](RO) Fuse information - HFA information (HTE). */
+ uint64_t dfa_info_clm : 4; /**< [ 44: 41](RO) Fuse information - HFA information (cluster mask). */
+ uint64_t pll_alt_matrix : 1; /**< [ 40: 40](RO) Fuse information - Select alternate PLL matrix. */
+ uint64_t pll_bwadj_denom : 2; /**< [ 39: 38](RO) Select CLKF denominator for BWADJ value.
+ 0x0 = Selects CLKF/4.
+ 0x1 = Selects CLKF/2.
+ 0x2 = Selects CLKF/8. */
+ uint64_t efus_lck_rsv : 1; /**< [ 37: 37](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_lck_man : 1; /**< [ 36: 36](RO) Fuse information - efuse lockdown. */
+ uint64_t pll_half_dis : 1; /**< [ 35: 35](RO/H) Fuse information - coprocessor-clock PLL control. */
+ uint64_t l2c_crip : 3; /**< [ 34: 32](RO) Fuse information - L2C cripple:
+ 0x0 = Full cache (16-way, 2 MB).
+ 0x1 = 3/4 ways (12-way, 1.5 MB).
+ 0x2 = 1/2 ways (8-way, 1 MB).
+ 0x3 = 1/4 ways (4-way, 512 KB).
+ 0x4-0x7 = Reserved. */
+ uint64_t use_int_refclk : 1; /**< [ 31: 31](RO) If set, use the PLL output as the low-jitter reference clock to the rclk DLLs. Default is
+ to use the internal input reference clock. */
+ uint64_t zip_info : 2; /**< [ 30: 29](RO) Fuse information - ZIP information. */
+ uint64_t bar2_sz_conf : 1; /**< [ 28: 28](RO) Fuse information - When 0, BAR2 size conforms to PCIe specification. */
+ uint64_t efus_lck : 1; /**< [ 27: 27](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_ign : 1; /**< [ 26: 26](RO) Fuse information - efuse ignore. */
+ uint64_t nozip : 1; /**< [ 25: 25](RO) Fuse information - ZIP disable. */
+ uint64_t nodfa_dte : 1; /**< [ 24: 24](RO) Fuse information - HFA disable (HTE). */
+ uint64_t ema1 : 6; /**< [ 23: 18](RO) Fuse information - EMA1.
+ Internal:
+ Default value is 0x02. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t nohna_dte : 1; /**< [ 17: 17](RO) Fuse information - HNA disable (DTE). */
+ uint64_t hna_info_dte : 3; /**< [ 16: 14](RO) Fuse information - HNA information (DTE). */
+ uint64_t hna_info_clm : 4; /**< [ 13: 10](RO) Fuse information - HNA information (cluster mask). */
+ uint64_t tns_cripple : 1; /**< [ 9: 9](RO) When set to 1, TNS switching functionality is permanently disabled. */
+ uint64_t core_pll_mul : 5; /**< [ 8: 4](RO) Core-clock PLL multiplier hardware limit. Indicates maximum
+ value for PLL_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t pnr_pll_mul : 4; /**< [ 3: 0](RO) Coprocessor-clock PLL multiplier hardware limit. Indicates maximum
+ value for PNR_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+#else /* Word 0 - Little Endian */
+ uint64_t pnr_pll_mul : 4; /**< [ 3: 0](RO) Coprocessor-clock PLL multiplier hardware limit. Indicates maximum
+ value for PNR_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t core_pll_mul : 5; /**< [ 8: 4](RO) Core-clock PLL multiplier hardware limit. Indicates maximum
+ value for PLL_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t tns_cripple : 1; /**< [ 9: 9](RO) When set to 1, TNS switching functionality is permanently disabled. */
+ uint64_t hna_info_clm : 4; /**< [ 13: 10](RO) Fuse information - HNA information (cluster mask). */
+ uint64_t hna_info_dte : 3; /**< [ 16: 14](RO) Fuse information - HNA information (DTE). */
+ uint64_t nohna_dte : 1; /**< [ 17: 17](RO) Fuse information - HNA disable (DTE). */
+ uint64_t ema1 : 6; /**< [ 23: 18](RO) Fuse information - EMA1.
+ Internal:
+ Default value is 0x02. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t nodfa_dte : 1; /**< [ 24: 24](RO) Fuse information - HFA disable (HTE). */
+ uint64_t nozip : 1; /**< [ 25: 25](RO) Fuse information - ZIP disable. */
+ uint64_t efus_ign : 1; /**< [ 26: 26](RO) Fuse information - efuse ignore. */
+ uint64_t efus_lck : 1; /**< [ 27: 27](RO) Fuse information - efuse lockdown. */
+ uint64_t bar2_sz_conf : 1; /**< [ 28: 28](RO) Fuse information - When 0, BAR2 size conforms to PCIe specification. */
+ uint64_t zip_info : 2; /**< [ 30: 29](RO) Fuse information - ZIP information. */
+ uint64_t use_int_refclk : 1; /**< [ 31: 31](RO) If set, use the PLL output as the low-jitter reference clock to the rclk DLLs. Default is
+ to use the internal input reference clock. */
+ uint64_t l2c_crip : 3; /**< [ 34: 32](RO) Fuse information - L2C cripple:
+ 0x0 = Full cache (16-way, 2 MB).
+ 0x1 = 3/4 ways (12-way, 1.5 MB).
+ 0x2 = 1/2 ways (8-way, 1 MB).
+ 0x3 = 1/4 ways (4-way, 512 KB).
+ 0x4-0x7 = Reserved. */
+ uint64_t pll_half_dis : 1; /**< [ 35: 35](RO/H) Fuse information - coprocessor-clock PLL control. */
+ uint64_t efus_lck_man : 1; /**< [ 36: 36](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_lck_rsv : 1; /**< [ 37: 37](RO) Fuse information - efuse lockdown. */
+ uint64_t pll_bwadj_denom : 2; /**< [ 39: 38](RO) Select CLKF denominator for BWADJ value.
+ 0x0 = Selects CLKF/4.
+ 0x1 = Selects CLKF/2.
+ 0x2 = Selects CLKF/8. */
+ uint64_t pll_alt_matrix : 1; /**< [ 40: 40](RO) Fuse information - Select alternate PLL matrix. */
+ uint64_t dfa_info_clm : 4; /**< [ 44: 41](RO) Fuse information - HFA information (cluster mask). */
+ uint64_t dfa_info_dte : 3; /**< [ 47: 45](RO) Fuse information - HFA information (HTE). */
+ uint64_t pll_ctl : 10; /**< [ 57: 48](RO) Reserved. */
+ uint64_t ema0 : 6; /**< [ 63: 58](RO) Fuse information - EMA0.
+ Internal:
+ Default value is 0x11. Soft or hard blow of these fuses
+ will XOR with this value. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_fus_dat3_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ema0 : 6; /**< [ 63: 58](RO) Fuse information - EMA0.
+ Internal:
+ Default value is 0x11. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t pll_ctl : 10; /**< [ 57: 48](RO) Reserved. */
+ uint64_t dfa_info_dte : 3; /**< [ 47: 45](RO) Fuse information - HFA information (HTE). */
+ uint64_t dfa_info_clm : 4; /**< [ 44: 41](RO) Fuse information - HFA information (cluster mask). */
+ uint64_t pll_alt_matrix : 1; /**< [ 40: 40](RO) Fuse information - Select alternate PLL matrix. */
+ uint64_t pll_bwadj_denom : 2; /**< [ 39: 38](RO) Select CLKF denominator for BWADJ value.
+ 0x0 = Selects CLKF/4.
+ 0x1 = Selects CLKF/2.
+ 0x2 = Selects CLKF/8. */
+ uint64_t efus_lck_rsv : 1; /**< [ 37: 37](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_lck_man : 1; /**< [ 36: 36](RO) Fuse information - efuse lockdown. */
+ uint64_t pll_half_dis : 1; /**< [ 35: 35](RO/H) Fuse information - coprocessor-clock PLL control. */
+ uint64_t l2c_crip : 3; /**< [ 34: 32](RO) Fuse information - L2C cripple:
+ 0x0 = Full cache (16-way, 2 MB).
+ 0x1 = 3/4 ways (12-way, 1.5 MB).
+ 0x2 = 1/2 ways (8-way, 1 MB).
+ 0x3 = 1/4 ways (4-way, 512 KB).
+ 0x4-0x7 = Reserved. */
+ uint64_t use_int_refclk : 1; /**< [ 31: 31](RO) If set, use the PLL output as the low-jitter reference clock to the rclk DLLs. Default is
+ to use the internal input reference clock. */
+ uint64_t zip_info : 2; /**< [ 30: 29](RO) Fuse information - ZIP information. */
+ uint64_t bar2_sz_conf : 1; /**< [ 28: 28](RO) Fuse information - When 0, BAR2 size conforms to PCIe specification. */
+ uint64_t efus_lck : 1; /**< [ 27: 27](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_ign : 1; /**< [ 26: 26](RO) Fuse information - efuse ignore. */
+ uint64_t nozip : 1; /**< [ 25: 25](RO) Fuse information - ZIP disable. */
+ uint64_t nodfa_dte : 1; /**< [ 24: 24](RO) Fuse information - HFA disable (HTE). */
+ uint64_t ema1 : 6; /**< [ 23: 18](RO) Fuse information - EMA1.
+ Internal:
+ Default value is 0x02. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t nohna_dte : 1; /**< [ 17: 17](RO) Fuse information - HNA disable (DTE). */
+ uint64_t hna_info_dte : 3; /**< [ 16: 14](RO) Fuse information - HNA information (DTE). */
+ uint64_t hna_info_clm : 4; /**< [ 13: 10](RO) Fuse information - HNA information (cluster mask). */
+ uint64_t tns_cripple : 1; /**< [ 9: 9](RO) Reserved.
+ Internal:
+ When set to 1, TNS switching functionality is permanently disabled. */
+ uint64_t core_pll_mul : 5; /**< [ 8: 4](RO) Core-clock PLL multiplier hardware limit. Indicates maximum
+ value for PLL_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t pnr_pll_mul : 4; /**< [ 3: 0](RO) Coprocessor-clock PLL multiplier hardware limit. Indicates maximum
+ value for PNR_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+#else /* Word 0 - Little Endian */
+ uint64_t pnr_pll_mul : 4; /**< [ 3: 0](RO) Coprocessor-clock PLL multiplier hardware limit. Indicates maximum
+ value for PNR_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t core_pll_mul : 5; /**< [ 8: 4](RO) Core-clock PLL multiplier hardware limit. Indicates maximum
+ value for PLL_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t tns_cripple : 1; /**< [ 9: 9](RO) Reserved.
+ Internal:
+ When set to 1, TNS switching functionality is permanently disabled. */
+ uint64_t hna_info_clm : 4; /**< [ 13: 10](RO) Fuse information - HNA information (cluster mask). */
+ uint64_t hna_info_dte : 3; /**< [ 16: 14](RO) Fuse information - HNA information (DTE). */
+ uint64_t nohna_dte : 1; /**< [ 17: 17](RO) Fuse information - HNA disable (DTE). */
+ uint64_t ema1 : 6; /**< [ 23: 18](RO) Fuse information - EMA1.
+ Internal:
+ Default value is 0x02. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t nodfa_dte : 1; /**< [ 24: 24](RO) Fuse information - HFA disable (HTE). */
+ uint64_t nozip : 1; /**< [ 25: 25](RO) Fuse information - ZIP disable. */
+ uint64_t efus_ign : 1; /**< [ 26: 26](RO) Fuse information - efuse ignore. */
+ uint64_t efus_lck : 1; /**< [ 27: 27](RO) Fuse information - efuse lockdown. */
+ uint64_t bar2_sz_conf : 1; /**< [ 28: 28](RO) Fuse information - When 0, BAR2 size conforms to PCIe specification. */
+ uint64_t zip_info : 2; /**< [ 30: 29](RO) Fuse information - ZIP information. */
+ uint64_t use_int_refclk : 1; /**< [ 31: 31](RO) If set, use the PLL output as the low-jitter reference clock to the rclk DLLs. Default is
+ to use the internal input reference clock. */
+ uint64_t l2c_crip : 3; /**< [ 34: 32](RO) Fuse information - L2C cripple:
+ 0x0 = Full cache (16-way, 2 MB).
+ 0x1 = 3/4 ways (12-way, 1.5 MB).
+ 0x2 = 1/2 ways (8-way, 1 MB).
+ 0x3 = 1/4 ways (4-way, 512 KB).
+ 0x4-0x7 = Reserved. */
+ uint64_t pll_half_dis : 1; /**< [ 35: 35](RO/H) Fuse information - coprocessor-clock PLL control. */
+ uint64_t efus_lck_man : 1; /**< [ 36: 36](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_lck_rsv : 1; /**< [ 37: 37](RO) Fuse information - efuse lockdown. */
+ uint64_t pll_bwadj_denom : 2; /**< [ 39: 38](RO) Select CLKF denominator for BWADJ value.
+ 0x0 = Selects CLKF/4.
+ 0x1 = Selects CLKF/2.
+ 0x2 = Selects CLKF/8. */
+ uint64_t pll_alt_matrix : 1; /**< [ 40: 40](RO) Fuse information - Select alternate PLL matrix. */
+ uint64_t dfa_info_clm : 4; /**< [ 44: 41](RO) Fuse information - HFA information (cluster mask). */
+ uint64_t dfa_info_dte : 3; /**< [ 47: 45](RO) Fuse information - HFA information (HTE). */
+ uint64_t pll_ctl : 10; /**< [ 57: 48](RO) Reserved. */
+ uint64_t ema0 : 6; /**< [ 63: 58](RO) Fuse information - EMA0.
+ Internal:
+ Default value is 0x11. Soft or hard blow of these fuses
+ will XOR with this value. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_mio_fus_dat3_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ema0 : 6; /**< [ 63: 58](RO) Fuse information - EMA0.
+ Internal:
+ Default value is 0x11. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t pll_ctl : 10; /**< [ 57: 48](RO) Reserved. */
+ uint64_t dfa_info_dte : 3; /**< [ 47: 45](RO) Fuse information - HFA information (HTE). */
+ uint64_t dfa_info_clm : 4; /**< [ 44: 41](RO) Fuse information - HFA information (cluster mask). */
+ uint64_t pll_alt_matrix : 1; /**< [ 40: 40](RO) Fuse information - Select alternate PLL matrix. */
+ uint64_t pll_bwadj_denom : 2; /**< [ 39: 38](RO) Select CLKF denominator for BWADJ value.
+ 0x0 = Selects CLKF/4.
+ 0x1 = Selects CLKF/2.
+ 0x2 = Selects CLKF/8. */
+ uint64_t efus_lck_rsv : 1; /**< [ 37: 37](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_lck_man : 1; /**< [ 36: 36](RO) Fuse information - efuse lockdown. */
+ uint64_t pll_half_dis : 1; /**< [ 35: 35](RO/H) Fuse information - coprocessor-clock PLL control. */
+ uint64_t l2c_crip : 3; /**< [ 34: 32](RO) Fuse information - L2C cripple:
+ 0x0 = Full cache (16-way, 16 MB).
+ 0x1 = 3/4 ways (12-way, 12 MB).
+ 0x2 = 1/2 ways (8-way, 8 MB).
+ 0x3 = 1/4 ways (4-way, 4MB).
+ 0x4-0x7 = Reserved. */
+ uint64_t use_int_refclk : 1; /**< [ 31: 31](RO) If set, use the PLL output as the low-jitter reference clock to the rclk DLLs. Default is
+ to use the internal input reference clock. */
+ uint64_t zip_info : 2; /**< [ 30: 29](RO) Fuse information - ZIP information. */
+ uint64_t bar2_sz_conf : 1; /**< [ 28: 28](RO) Fuse information - When 0, BAR2 size conforms to PCIe specification. */
+ uint64_t efus_lck : 1; /**< [ 27: 27](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_ign : 1; /**< [ 26: 26](RO) Fuse information - efuse ignore. */
+ uint64_t nozip : 1; /**< [ 25: 25](RO) Fuse information - ZIP disable. */
+ uint64_t nodfa_dte : 1; /**< [ 24: 24](RO) Fuse information - HFA disable (HTE). */
+ uint64_t ema1 : 6; /**< [ 23: 18](RO) Fuse information - EMA1.
+ Internal:
+ Default value is 0x02. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t nohna_dte : 1; /**< [ 17: 17](RO) Fuse information - HNA disable (DTE). */
+ uint64_t hna_info_dte : 3; /**< [ 16: 14](RO) Fuse information - HNA information (DTE). */
+ uint64_t hna_info_clm : 4; /**< [ 13: 10](RO) Fuse information - HNA information (cluster mask). */
+ uint64_t tns_cripple : 1; /**< [ 9: 9](RO) When set to 1, TNS switching functionality is permanently disabled. */
+ uint64_t core_pll_mul : 5; /**< [ 8: 4](RO) Core-clock PLL multiplier hardware limit. Indicates maximum
+ value for PLL_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t pnr_pll_mul : 4; /**< [ 3: 0](RO) Coprocessor-clock PLL multiplier hardware limit. Indicates maximum
+ value for PNR_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+#else /* Word 0 - Little Endian */
+ uint64_t pnr_pll_mul : 4; /**< [ 3: 0](RO) Coprocessor-clock PLL multiplier hardware limit. Indicates maximum
+ value for PNR_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t core_pll_mul : 5; /**< [ 8: 4](RO) Core-clock PLL multiplier hardware limit. Indicates maximum
+ value for PLL_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t tns_cripple : 1; /**< [ 9: 9](RO) When set to 1, TNS switching functionality is permanently disabled. */
+ uint64_t hna_info_clm : 4; /**< [ 13: 10](RO) Fuse information - HNA information (cluster mask). */
+ uint64_t hna_info_dte : 3; /**< [ 16: 14](RO) Fuse information - HNA information (DTE). */
+ uint64_t nohna_dte : 1; /**< [ 17: 17](RO) Fuse information - HNA disable (DTE). */
+ uint64_t ema1 : 6; /**< [ 23: 18](RO) Fuse information - EMA1.
+ Internal:
+ Default value is 0x02. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t nodfa_dte : 1; /**< [ 24: 24](RO) Fuse information - HFA disable (HTE). */
+ uint64_t nozip : 1; /**< [ 25: 25](RO) Fuse information - ZIP disable. */
+ uint64_t efus_ign : 1; /**< [ 26: 26](RO) Fuse information - efuse ignore. */
+ uint64_t efus_lck : 1; /**< [ 27: 27](RO) Fuse information - efuse lockdown. */
+ uint64_t bar2_sz_conf : 1; /**< [ 28: 28](RO) Fuse information - When 0, BAR2 size conforms to PCIe specification. */
+ uint64_t zip_info : 2; /**< [ 30: 29](RO) Fuse information - ZIP information. */
+ uint64_t use_int_refclk : 1; /**< [ 31: 31](RO) If set, use the PLL output as the low-jitter reference clock to the rclk DLLs. Default is
+ to use the internal input reference clock. */
+ uint64_t l2c_crip : 3; /**< [ 34: 32](RO) Fuse information - L2C cripple:
+ 0x0 = Full cache (16-way, 16 MB).
+ 0x1 = 3/4 ways (12-way, 12 MB).
+ 0x2 = 1/2 ways (8-way, 8 MB).
+ 0x3 = 1/4 ways (4-way, 4MB).
+ 0x4-0x7 = Reserved. */
+ uint64_t pll_half_dis : 1; /**< [ 35: 35](RO/H) Fuse information - coprocessor-clock PLL control. */
+ uint64_t efus_lck_man : 1; /**< [ 36: 36](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_lck_rsv : 1; /**< [ 37: 37](RO) Fuse information - efuse lockdown. */
+ uint64_t pll_bwadj_denom : 2; /**< [ 39: 38](RO) Select CLKF denominator for BWADJ value.
+ 0x0 = Selects CLKF/4.
+ 0x1 = Selects CLKF/2.
+ 0x2 = Selects CLKF/8. */
+ uint64_t pll_alt_matrix : 1; /**< [ 40: 40](RO) Fuse information - Select alternate PLL matrix. */
+ uint64_t dfa_info_clm : 4; /**< [ 44: 41](RO) Fuse information - HFA information (cluster mask). */
+ uint64_t dfa_info_dte : 3; /**< [ 47: 45](RO) Fuse information - HFA information (HTE). */
+ uint64_t pll_ctl : 10; /**< [ 57: 48](RO) Reserved. */
+ uint64_t ema0 : 6; /**< [ 63: 58](RO) Fuse information - EMA0.
+ Internal:
+ Default value is 0x11. Soft or hard blow of these fuses
+ will XOR with this value. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_mio_fus_dat3_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ema0 : 6; /**< [ 63: 58](RO) Fuse information - EMA0.
+ Internal:
+ Default value is 0x11. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t pll_ctl : 10; /**< [ 57: 48](RO) Reserved. */
+ uint64_t dfa_info_dte : 3; /**< [ 47: 45](RO) Fuse information - HFA information (HTE). */
+ uint64_t dfa_info_clm : 4; /**< [ 44: 41](RO) Fuse information - HFA information (cluster mask). */
+ uint64_t pll_alt_matrix : 1; /**< [ 40: 40](RO) Fuse information - Select alternate PLL matrix. */
+ uint64_t pll_bwadj_denom : 2; /**< [ 39: 38](RO) Select CLKF denominator for BWADJ value.
+ 0x0 = Selects CLKF/4.
+ 0x1 = Selects CLKF/2.
+ 0x2 = Selects CLKF/8. */
+ uint64_t efus_lck_rsv : 1; /**< [ 37: 37](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_lck_man : 1; /**< [ 36: 36](RO) Fuse information - efuse lockdown. */
+ uint64_t pll_half_dis : 1; /**< [ 35: 35](RO/H) Fuse information - coprocessor-clock PLL control. */
+ uint64_t l2c_crip : 3; /**< [ 34: 32](RO) Fuse information - L2C cripple:
+ 0x0 = Full cache (16-way, 8 MB).
+ 0x1 = 3/4 ways (12-way, 6 MB).
+ 0x2 = 1/2 ways (8-way, 4 MB).
+ 0x3 = 1/4 ways (4-way, 2 MB).
+ 0x4-0x7 = Reserved. */
+ uint64_t use_int_refclk : 1; /**< [ 31: 31](RO) If set, use the PLL output as the low-jitter reference clock to the rclk DLLs. Default is
+ to use the internal input reference clock. */
+ uint64_t zip_info : 2; /**< [ 30: 29](RO) Fuse information - ZIP information. */
+ uint64_t bar2_sz_conf : 1; /**< [ 28: 28](RO) Fuse information - When 0, BAR2 size conforms to PCIe specification. */
+ uint64_t efus_lck : 1; /**< [ 27: 27](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_ign : 1; /**< [ 26: 26](RO) Fuse information - efuse ignore. */
+ uint64_t nozip : 1; /**< [ 25: 25](RO) Fuse information - ZIP disable. */
+ uint64_t nodfa_dte : 1; /**< [ 24: 24](RO) Fuse information - HFA disable (HTE). */
+ uint64_t ema1 : 6; /**< [ 23: 18](RO) Fuse information - EMA1.
+ Internal:
+ Default value is 0x02. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t nohna_dte : 1; /**< [ 17: 17](RO) Fuse information - HNA disable (DTE). */
+ uint64_t hna_info_dte : 3; /**< [ 16: 14](RO) Fuse information - HNA information (DTE). */
+ uint64_t hna_info_clm : 4; /**< [ 13: 10](RO) Fuse information - HNA information (cluster mask). */
+ uint64_t tns_cripple : 1; /**< [ 9: 9](RO) Reserved.
+ Internal:
+ When set to 1, TNS switching functionality is permanently disabled. */
+ uint64_t core_pll_mul : 5; /**< [ 8: 4](RO) Core-clock PLL multiplier hardware limit. Indicates maximum
+ value for PLL_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t pnr_pll_mul : 4; /**< [ 3: 0](RO) Coprocessor-clock PLL multiplier hardware limit. Indicates maximum
+ value for PNR_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+#else /* Word 0 - Little Endian */
+ uint64_t pnr_pll_mul : 4; /**< [ 3: 0](RO) Coprocessor-clock PLL multiplier hardware limit. Indicates maximum
+ value for PNR_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t core_pll_mul : 5; /**< [ 8: 4](RO) Core-clock PLL multiplier hardware limit. Indicates maximum
+ value for PLL_MUL[5:1] straps. Any strap setting above this
+ value will be ignored. A value of 0 indicates no hardware limit. */
+ uint64_t tns_cripple : 1; /**< [ 9: 9](RO) Reserved.
+ Internal:
+ When set to 1, TNS switching functionality is permanently disabled. */
+ uint64_t hna_info_clm : 4; /**< [ 13: 10](RO) Fuse information - HNA information (cluster mask). */
+ uint64_t hna_info_dte : 3; /**< [ 16: 14](RO) Fuse information - HNA information (DTE). */
+ uint64_t nohna_dte : 1; /**< [ 17: 17](RO) Fuse information - HNA disable (DTE). */
+ uint64_t ema1 : 6; /**< [ 23: 18](RO) Fuse information - EMA1.
+ Internal:
+ Default value is 0x02. Soft or hard blow of these fuses
+ will XOR with this value. */
+ uint64_t nodfa_dte : 1; /**< [ 24: 24](RO) Fuse information - HFA disable (HTE). */
+ uint64_t nozip : 1; /**< [ 25: 25](RO) Fuse information - ZIP disable. */
+ uint64_t efus_ign : 1; /**< [ 26: 26](RO) Fuse information - efuse ignore. */
+ uint64_t efus_lck : 1; /**< [ 27: 27](RO) Fuse information - efuse lockdown. */
+ uint64_t bar2_sz_conf : 1; /**< [ 28: 28](RO) Fuse information - When 0, BAR2 size conforms to PCIe specification. */
+ uint64_t zip_info : 2; /**< [ 30: 29](RO) Fuse information - ZIP information. */
+ uint64_t use_int_refclk : 1; /**< [ 31: 31](RO) If set, use the PLL output as the low-jitter reference clock to the rclk DLLs. Default is
+ to use the internal input reference clock. */
+ uint64_t l2c_crip : 3; /**< [ 34: 32](RO) Fuse information - L2C cripple:
+ 0x0 = Full cache (16-way, 8 MB).
+ 0x1 = 3/4 ways (12-way, 6 MB).
+ 0x2 = 1/2 ways (8-way, 4 MB).
+ 0x3 = 1/4 ways (4-way, 2 MB).
+ 0x4-0x7 = Reserved. */
+ uint64_t pll_half_dis : 1; /**< [ 35: 35](RO/H) Fuse information - coprocessor-clock PLL control. */
+ uint64_t efus_lck_man : 1; /**< [ 36: 36](RO) Fuse information - efuse lockdown. */
+ uint64_t efus_lck_rsv : 1; /**< [ 37: 37](RO) Fuse information - efuse lockdown. */
+ uint64_t pll_bwadj_denom : 2; /**< [ 39: 38](RO) Select CLKF denominator for BWADJ value.
+ 0x0 = Selects CLKF/4.
+ 0x1 = Selects CLKF/2.
+ 0x2 = Selects CLKF/8. */
+ uint64_t pll_alt_matrix : 1; /**< [ 40: 40](RO) Fuse information - Select alternate PLL matrix. */
+ uint64_t dfa_info_clm : 4; /**< [ 44: 41](RO) Fuse information - HFA information (cluster mask). */
+ uint64_t dfa_info_dte : 3; /**< [ 47: 45](RO) Fuse information - HFA information (HTE). */
+ uint64_t pll_ctl : 10; /**< [ 57: 48](RO) Reserved. */
+ uint64_t ema0 : 6; /**< [ 63: 58](RO) Fuse information - EMA0.
+ Internal:
+ Default value is 0x11. Soft or hard blow of these fuses
+ will XOR with this value. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_mio_fus_dat3 bdk_mio_fus_dat3_t;
+
+#define BDK_MIO_FUS_DAT3 BDK_MIO_FUS_DAT3_FUNC()
+static inline uint64_t BDK_MIO_FUS_DAT3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_DAT3_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001418ll;
+ __bdk_csr_fatal("MIO_FUS_DAT3", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_DAT3 bdk_mio_fus_dat3_t
+#define bustype_BDK_MIO_FUS_DAT3 BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_DAT3 "MIO_FUS_DAT3"
+#define device_bar_BDK_MIO_FUS_DAT3 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_DAT3 0
+#define arguments_BDK_MIO_FUS_DAT3 -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_dat4
+ *
+ * MIO Fuse Data4 Register
+ */
+union bdk_mio_fus_dat4
+{
+ uint64_t u;
+ struct bdk_mio_fus_dat4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t global_rclk_byp_select : 1; /**< [ 63: 63](RO) Reserved. */
+ uint64_t global_rclk_byp_setting : 11;/**< [ 62: 52](RO) Bits\<11:1\>. Reserved. */
+ uint64_t east_rclk_byp_select : 1; /**< [ 51: 51](RO) Reserved. */
+ uint64_t east_rclk_byp_setting : 12; /**< [ 50: 39](RO) Reserved. */
+ uint64_t cmb_rclk_byp_select : 1; /**< [ 38: 38](RO) Reserved. */
+ uint64_t cmb_rclk_byp_setting : 12; /**< [ 37: 26](RO) Reserved. */
+ uint64_t pp_rclk_byp_select : 1; /**< [ 25: 25](RO) Reserved. */
+ uint64_t pp_rclk_byp_setting : 12; /**< [ 24: 13](RO) Reserved. */
+ uint64_t tad_rclk_byp_select : 1; /**< [ 12: 12](RO) Reserved. */
+ uint64_t tad_rclk_byp_setting : 12; /**< [ 11: 0](RO) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t tad_rclk_byp_setting : 12; /**< [ 11: 0](RO) Reserved. */
+ uint64_t tad_rclk_byp_select : 1; /**< [ 12: 12](RO) Reserved. */
+ uint64_t pp_rclk_byp_setting : 12; /**< [ 24: 13](RO) Reserved. */
+ uint64_t pp_rclk_byp_select : 1; /**< [ 25: 25](RO) Reserved. */
+ uint64_t cmb_rclk_byp_setting : 12; /**< [ 37: 26](RO) Reserved. */
+ uint64_t cmb_rclk_byp_select : 1; /**< [ 38: 38](RO) Reserved. */
+ uint64_t east_rclk_byp_setting : 12; /**< [ 50: 39](RO) Reserved. */
+ uint64_t east_rclk_byp_select : 1; /**< [ 51: 51](RO) Reserved. */
+ uint64_t global_rclk_byp_setting : 11;/**< [ 62: 52](RO) Bits\<11:1\>. Reserved. */
+ uint64_t global_rclk_byp_select : 1; /**< [ 63: 63](RO) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_dat4_s cn; */
+};
+typedef union bdk_mio_fus_dat4 bdk_mio_fus_dat4_t;
+
+#define BDK_MIO_FUS_DAT4 BDK_MIO_FUS_DAT4_FUNC()
+static inline uint64_t BDK_MIO_FUS_DAT4_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_DAT4_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001420ll;
+ __bdk_csr_fatal("MIO_FUS_DAT4", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_DAT4 bdk_mio_fus_dat4_t
+#define bustype_BDK_MIO_FUS_DAT4 BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_DAT4 "MIO_FUS_DAT4"
+#define device_bar_BDK_MIO_FUS_DAT4 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_DAT4 0
+#define arguments_BDK_MIO_FUS_DAT4 -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_int
+ *
+ * INTERNAL: MIO Fuse Repair Interrupt Register
+ */
+union bdk_mio_fus_int
+{
+ uint64_t u;
+ struct bdk_mio_fus_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t rpr_dbe : 1; /**< [ 1: 1](R/W1C/H) Internal:
+ Indicates an uncorrectable double-bit-error occurred to RPR_MEM. */
+ uint64_t rpr_sbe : 1; /**< [ 0: 0](R/W1C/H) Internal:
+ Indicates a corrected single-bit-error occurred to RPR_MEM. */
+#else /* Word 0 - Little Endian */
+ uint64_t rpr_sbe : 1; /**< [ 0: 0](R/W1C/H) Internal:
+ Indicates a corrected single-bit-error occurred to RPR_MEM. */
+ uint64_t rpr_dbe : 1; /**< [ 1: 1](R/W1C/H) Internal:
+ Indicates an uncorrectable double-bit-error occurred to RPR_MEM. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_int_s cn; */
+};
+typedef union bdk_mio_fus_int bdk_mio_fus_int_t;
+
+#define BDK_MIO_FUS_INT BDK_MIO_FUS_INT_FUNC()
+static inline uint64_t BDK_MIO_FUS_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_INT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001548ll;
+ __bdk_csr_fatal("MIO_FUS_INT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_INT bdk_mio_fus_int_t
+#define bustype_BDK_MIO_FUS_INT BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_INT "MIO_FUS_INT"
+#define device_bar_BDK_MIO_FUS_INT 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_INT 0
+#define arguments_BDK_MIO_FUS_INT -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_pdf
+ *
+ * MIO Fuse Product Definition Field Register
+ */
+union bdk_mio_fus_pdf
+{
+ uint64_t u;
+ struct bdk_mio_fus_pdf_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pdf : 64; /**< [ 63: 0](RO) Fuse information--product definition field. */
+#else /* Word 0 - Little Endian */
+ uint64_t pdf : 64; /**< [ 63: 0](RO) Fuse information--product definition field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_pdf_s cn; */
+};
+typedef union bdk_mio_fus_pdf bdk_mio_fus_pdf_t;
+
+#define BDK_MIO_FUS_PDF BDK_MIO_FUS_PDF_FUNC()
+static inline uint64_t BDK_MIO_FUS_PDF_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_PDF_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001428ll;
+ __bdk_csr_fatal("MIO_FUS_PDF", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_PDF bdk_mio_fus_pdf_t
+#define bustype_BDK_MIO_FUS_PDF BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_PDF "MIO_FUS_PDF"
+#define device_bar_BDK_MIO_FUS_PDF 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_PDF 0
+#define arguments_BDK_MIO_FUS_PDF -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_pll
+ *
+ * MIO Fuse PLL Register
+ * This register contains PLL status and controls for the MSC_CLKOUT and
+ * MSC_SYS_CLKOUT pins. The fields are reset to zero on a cold reset.
+ * the values are preserved on both a warm and soft reset starting with pass 3.
+ */
+union bdk_mio_fus_pll
+{
+ uint64_t u;
+ struct bdk_mio_fus_pll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t core_status : 3; /**< [ 14: 12](RO) Core-clock PLL status information. */
+ uint64_t reserved_11 : 1;
+ uint64_t pnr_status : 3; /**< [ 10: 8](RO) Coprocessor-clock PLL status information. */
+ uint64_t c_cout_rst : 1; /**< [ 7: 7](R/W) Core clockout postscaler reset. The core clockout postscaler
+ should be placed in reset at least 10 reference-clock cycles prior
+ to changing [C_COUT_SEL]. The core clockout postscaler should remain
+ under reset for at least 10 reference-clock cycles after [C_COUT_SEL]
+ changes. This field is reset to zero on a cold reset, it is preserved
+ on both warm and soft resets. */
+ uint64_t c_cout_sel : 2; /**< [ 6: 5](R/W) Core-clock output select:
+ 0x0 = Core clock.
+ 0x1 = PS output.
+ 0x2 = PLL output.
+ 0x3 = Undivided core clock. */
+ uint64_t pnr_cout_rst : 1; /**< [ 4: 4](R/W) SYS clockout postscaler reset. The PNR clockout postscaler
+ should be placed in reset at least 10 reference-clock cycles
+ prior to changing [PNR_COUT_SEL]. The SYS clockout postscaler
+ should remain under reset for at least 10 reference clock cycles
+ after [PNR_COUT_SEL] changes. */
+ uint64_t pnr_cout_sel : 2; /**< [ 3: 2](R/W) Coprocessor-clock output select:
+ 0x0 = Coprocessor clock.
+ 0x1 = PS output.
+ 0x2 = PLL output.
+ 0x3 = Undivided core clock. */
+ uint64_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_1 : 2;
+ uint64_t pnr_cout_sel : 2; /**< [ 3: 2](R/W) Coprocessor-clock output select:
+ 0x0 = Coprocessor clock.
+ 0x1 = PS output.
+ 0x2 = PLL output.
+ 0x3 = Undivided core clock. */
+ uint64_t pnr_cout_rst : 1; /**< [ 4: 4](R/W) SYS clockout postscaler reset. The PNR clockout postscaler
+ should be placed in reset at least 10 reference-clock cycles
+ prior to changing [PNR_COUT_SEL]. The SYS clockout postscaler
+ should remain under reset for at least 10 reference clock cycles
+ after [PNR_COUT_SEL] changes. */
+ uint64_t c_cout_sel : 2; /**< [ 6: 5](R/W) Core-clock output select:
+ 0x0 = Core clock.
+ 0x1 = PS output.
+ 0x2 = PLL output.
+ 0x3 = Undivided core clock. */
+ uint64_t c_cout_rst : 1; /**< [ 7: 7](R/W) Core clockout postscaler reset. The core clockout postscaler
+ should be placed in reset at least 10 reference-clock cycles prior
+ to changing [C_COUT_SEL]. The core clockout postscaler should remain
+ under reset for at least 10 reference-clock cycles after [C_COUT_SEL]
+ changes. This field is reset to zero on a cold reset, it is preserved
+ on both warm and soft resets. */
+ uint64_t pnr_status : 3; /**< [ 10: 8](RO) Coprocessor-clock PLL status information. */
+ uint64_t reserved_11 : 1;
+ uint64_t core_status : 3; /**< [ 14: 12](RO) Core-clock PLL status information. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_pll_s cn; */
+};
+typedef union bdk_mio_fus_pll bdk_mio_fus_pll_t;
+
+#define BDK_MIO_FUS_PLL BDK_MIO_FUS_PLL_FUNC()
+static inline uint64_t BDK_MIO_FUS_PLL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_PLL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001580ll;
+ __bdk_csr_fatal("MIO_FUS_PLL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_PLL bdk_mio_fus_pll_t
+#define bustype_BDK_MIO_FUS_PLL BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_PLL "MIO_FUS_PLL"
+#define device_bar_BDK_MIO_FUS_PLL 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_PLL 0
+#define arguments_BDK_MIO_FUS_PLL -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_pname#
+ *
+ * MIO Fuse Product Name Register
+ * ""These registers contain a 24-character string representing the part number,
+ * e.g. "CN8800-2000BG2601-CPT-PR".
+ *
+ * The string is represented in a RAD-40-like encoding, padded with trailing spaces
+ * that must be removed. If the resulting string is empty, the product has not been
+ * fused programmed and the name should be constructed from e.g. the core's device
+ * number.
+ *
+ * Pseudocode for the decoding:
+ * \<pre\>
+ * datap = data_from_fuses;
+ * // where bit 0 of byte 0 array is fuse 1408;
+ * // i.e. bit 0 of MIO_FUS_PNAME(0)
+ * void rad50_decode(const uint8_t* datap, char* bufferp) {
+ * // Psudocode only - assumes datap sized to at least 16 bytes,
+ * // and bufferp to at least 26 characters.
+ * const char* CHAR_MAP = " ABCDEFGHIJKLMNOPQRSTUVWXYZ#.-0123456789";
+ * char* cp = bufferp;
+ * for (int i=0; i\<FUSE_BYTES; i+=2) {
+ * // Data is stored little endian
+ * uint16_t data = ((const uint16_t*)datap)[i/2];
+ * ifndef MACHINE_LITTLE_ENDIAN
+ * data = __swab16(data);
+ * endif
+ * *cp++ = CHAR_MAP[(data/40/40) % 40];
+ * *cp++ = CHAR_MAP[(data/40) % 40];
+ * *cp++ = CHAR_MAP[(data) % 40];
+ * }
+ * *cp++ = '\0';
+ * for (cp = bufferp+strlen(bufferp)-1; cp\>=bufferp && isspace(*cp); --cp) *cp='\0';
+ * }
+ * \</pre\>"
+ *
+ * Internal:
+ * Fuse[1535:1408]."
+ */
+union bdk_mio_fus_pnamex
+{
+ uint64_t u;
+ struct bdk_mio_fus_pnamex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) Product name information. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) Product name information. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_pnamex_s cn; */
+};
+typedef union bdk_mio_fus_pnamex bdk_mio_fus_pnamex_t;
+
+static inline uint64_t BDK_MIO_FUS_PNAMEX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_PNAMEX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e003001440ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e003001440ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=1))
+ return 0x87e003001440ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("MIO_FUS_PNAMEX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_PNAMEX(a) bdk_mio_fus_pnamex_t
+#define bustype_BDK_MIO_FUS_PNAMEX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_PNAMEX(a) "MIO_FUS_PNAMEX"
+#define device_bar_BDK_MIO_FUS_PNAMEX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_PNAMEX(a) (a)
+#define arguments_BDK_MIO_FUS_PNAMEX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_prog
+ *
+ * INTERNAL: MIO Fuse Programming Register
+ */
+union bdk_mio_fus_prog
+{
+ uint64_t u;
+ struct bdk_mio_fus_prog_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t sft : 1; /**< [ 1: 1](R/W/H) Internal:
+ When set with [PROG], causes only the local storage to change and will
+ not blow any fuses. A soft blow is still subject to lockdown fuses.
+ Hardware will clear this bit when the program operation is complete.
+ Soft blown fuses will become active after a either a soft or warm
+ reset but will not persist through a cold reset. */
+ uint64_t prog : 1; /**< [ 0: 0](R/W/H) Internal:
+ When written to one by software, blow the fuse bank. Hardware will
+ clear when the program operation is complete. To write a bank of
+ fuses, software must set MIO_FUS_WADR[ADDR] to the bank to be
+ programmed and then set each bit within MIO_FUS_BNK_DAT() to
+ indicate which fuses to blow. Once ADDR, and DAT are set up,
+ Software can write to MIO_FUS_PROG[PROG] to start the bank write
+ and poll on [PROG]. Once [PROG] is clear, the bank write is complete.
+ MIO_FUS_READ_TIMES[WRSTB_WH] determines the time for the operation
+ to complete. New fuses will become active after a reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t prog : 1; /**< [ 0: 0](R/W/H) Internal:
+ When written to one by software, blow the fuse bank. Hardware will
+ clear when the program operation is complete. To write a bank of
+ fuses, software must set MIO_FUS_WADR[ADDR] to the bank to be
+ programmed and then set each bit within MIO_FUS_BNK_DAT() to
+ indicate which fuses to blow. Once ADDR, and DAT are set up,
+ Software can write to MIO_FUS_PROG[PROG] to start the bank write
+ and poll on [PROG]. Once [PROG] is clear, the bank write is complete.
+ MIO_FUS_READ_TIMES[WRSTB_WH] determines the time for the operation
+ to complete. New fuses will become active after a reset. */
+ uint64_t sft : 1; /**< [ 1: 1](R/W/H) Internal:
+ When set with [PROG], causes only the local storage to change and will
+ not blow any fuses. A soft blow is still subject to lockdown fuses.
+ Hardware will clear this bit when the program operation is complete.
+ Soft blown fuses will become active after a either a soft or warm
+ reset but will not persist through a cold reset. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_prog_s cn; */
+};
+typedef union bdk_mio_fus_prog bdk_mio_fus_prog_t;
+
+#define BDK_MIO_FUS_PROG BDK_MIO_FUS_PROG_FUNC()
+static inline uint64_t BDK_MIO_FUS_PROG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_PROG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001510ll;
+ __bdk_csr_fatal("MIO_FUS_PROG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_PROG bdk_mio_fus_prog_t
+#define bustype_BDK_MIO_FUS_PROG BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_PROG "MIO_FUS_PROG"
+#define device_bar_BDK_MIO_FUS_PROG 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_PROG 0
+#define arguments_BDK_MIO_FUS_PROG -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_prog_times
+ *
+ * INTERNAL: MIO Fuse Program Times Register
+ */
+union bdk_mio_fus_prog_times
+{
+ uint64_t u;
+ struct bdk_mio_fus_prog_times_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t vgate_pin : 1; /**< [ 34: 34](RO) Internal:
+ Efuse vgate pin (L6G). */
+ uint64_t fsrc_pin : 1; /**< [ 33: 33](RO) Internal:
+ Efuse fsource pin (L6G). */
+ uint64_t prog_pin : 1; /**< [ 32: 32](RO) Internal:
+ Efuse program pin (IFB). */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t prog_pin : 1; /**< [ 32: 32](RO) Internal:
+ Efuse program pin (IFB). */
+ uint64_t fsrc_pin : 1; /**< [ 33: 33](RO) Internal:
+ Efuse fsource pin (L6G). */
+ uint64_t vgate_pin : 1; /**< [ 34: 34](RO) Internal:
+ Efuse vgate pin (L6G). */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_prog_times_s cn; */
+};
+typedef union bdk_mio_fus_prog_times bdk_mio_fus_prog_times_t;
+
+#define BDK_MIO_FUS_PROG_TIMES BDK_MIO_FUS_PROG_TIMES_FUNC()
+static inline uint64_t BDK_MIO_FUS_PROG_TIMES_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_PROG_TIMES_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001518ll;
+ __bdk_csr_fatal("MIO_FUS_PROG_TIMES", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_PROG_TIMES bdk_mio_fus_prog_times_t
+#define bustype_BDK_MIO_FUS_PROG_TIMES BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_PROG_TIMES "MIO_FUS_PROG_TIMES"
+#define device_bar_BDK_MIO_FUS_PROG_TIMES 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_PROG_TIMES 0
+#define arguments_BDK_MIO_FUS_PROG_TIMES -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_rcmd
+ *
+ * MIO Fuse Read Command Register
+ * To read an efuse, software writes [ADDR,PEND] with
+ * the byte address of the fuse in question, then software can poll
+ * [PEND]. When [PEND] = 0, then [DAT] is valid.
+ * In addition, if the efuse read went to the efuse banks, software can
+ * read MIO_FUS_BNK_DAT() which contains all 128 fuses in the bank
+ * associated in ADDR. Fuses 1023..960 are never accessible on pass 1 parts.
+ * In addition, fuses 1023..960 are not accessible if
+ * MIO_FUS_DAT2[DORM_CRYPTO] is enabled.
+ */
+union bdk_mio_fus_rcmd
+{
+ uint64_t u;
+ struct bdk_mio_fus_rcmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t dat : 8; /**< [ 23: 16](RO/H) Eight bits of fuse data. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pend : 1; /**< [ 12: 12](R/W/H) Software sets this bit to 1 on a write operation that starts
+ the fuse read operation. Hardware clears this bit when the read
+ operation is complete and [DAT] is valid. MIO_FUS_READ_TIMES[RDSTB_WH]
+ determines the time for the operation to complete. */
+ uint64_t reserved_11 : 1;
+ uint64_t addr_hi : 2; /**< [ 10: 9](R/W) Upper fuse address bits to extend space beyond 2k fuses.
+ Valid range is 0x0-0x3. Software should not change this
+ field while [PEND] is set. It should wait for
+ the hardware to clear it. */
+ uint64_t efuse : 1; /**< [ 8: 8](R/W) Efuse storage. When set, the return data is from the efuse
+ directly rather than the local storage. */
+ uint64_t addr : 8; /**< [ 7: 0](R/W) Address. Specifies the byte address of the fuse to read.
+ Software should not change this field while [PEND]
+ is set. It must wait for the hardware to clear it. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 8; /**< [ 7: 0](R/W) Address. Specifies the byte address of the fuse to read.
+ Software should not change this field while [PEND]
+ is set. It must wait for the hardware to clear it. */
+ uint64_t efuse : 1; /**< [ 8: 8](R/W) Efuse storage. When set, the return data is from the efuse
+ directly rather than the local storage. */
+ uint64_t addr_hi : 2; /**< [ 10: 9](R/W) Upper fuse address bits to extend space beyond 2k fuses.
+ Valid range is 0x0-0x3. Software should not change this
+ field while [PEND] is set. It should wait for
+ the hardware to clear it. */
+ uint64_t reserved_11 : 1;
+ uint64_t pend : 1; /**< [ 12: 12](R/W/H) Software sets this bit to 1 on a write operation that starts
+ the fuse read operation. Hardware clears this bit when the read
+ operation is complete and [DAT] is valid. MIO_FUS_READ_TIMES[RDSTB_WH]
+ determines the time for the operation to complete. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t dat : 8; /**< [ 23: 16](RO/H) Eight bits of fuse data. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_rcmd_s cn; */
+};
+typedef union bdk_mio_fus_rcmd bdk_mio_fus_rcmd_t;
+
+#define BDK_MIO_FUS_RCMD BDK_MIO_FUS_RCMD_FUNC()
+static inline uint64_t BDK_MIO_FUS_RCMD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_RCMD_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001500ll;
+ __bdk_csr_fatal("MIO_FUS_RCMD", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_RCMD bdk_mio_fus_rcmd_t
+#define bustype_BDK_MIO_FUS_RCMD BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_RCMD "MIO_FUS_RCMD"
+#define device_bar_BDK_MIO_FUS_RCMD 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_RCMD 0
+#define arguments_BDK_MIO_FUS_RCMD -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_read_times
+ *
+ * MIO Fuse Read Times Register
+ * IFB fuses are 0 to 1791. The reset values are for IFB fuses for PLL_REF_CLK up to 100MHz when
+ * the core PLL is engaged. If any of the formulas below result in a value less than 0x0, the
+ * corresponding timing parameter should be set to 0.
+ *
+ * Prior to issuing a read operation to the fuse banks (via MIO_FUS_RCMD), this register should
+ * be written with the timing parameters that will be read.
+ * This register should not be written while MIO_FUS_RCMD[PEND] = 1.
+ */
+union bdk_mio_fus_read_times
+{
+ uint64_t u;
+ struct bdk_mio_fus_read_times_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t done : 4; /**< [ 31: 28](R/W) Hold time of CSB, PGENB, and LOAD with respect to falling edge of STROBE for read and
+ write mode in PLL_REF_CLK + 1 cycles. Timing specs are th_CS = 6ns, th_PG = 10ns, th_LD_p
+ = 7ns. Default of 0x0 yields 20ns for a PLL_REF_CLK of 50 MHz, 10ns at 100MHz. */
+ uint64_t ahd : 4; /**< [ 27: 24](R/W) Hold time of A with respect to falling edge of STROBE for read and write modes in
+ PLL_REF_CLK + 2 cycles. Timing spec of tsu_A_r and tsu_A_p is 3ns min. Default of 0x0
+ yields 40ns for a PLL_REF_CLK of 50 MHz, 20ns at 100MHz. */
+ uint64_t wrstb_wh : 12; /**< [ 23: 12](R/W) Pulse width high of STROBE in write mode in PLL_REF_CLK + 1 cycles. Timing spec of
+ twh_SB_p is 9.8us max. Default of 0x1F3 yields 10 us at PLL_REF_CLK of 50 MHz. */
+ uint64_t rdstb_wh : 4; /**< [ 11: 8](R/W) Pulse width high of STROBE in read mode in PLL_REF_CLK + 1 cycles. Timing spec of twh_SB_p
+ is 20ns min. Default of 0x1 yields 40 ns at PLL_REF_CLK of 50 MHz, 20ns at 100MHz. */
+ uint64_t asu : 4; /**< [ 7: 4](R/W) Setup time of A to rising edge of STROBE for read and write modes in PLL_REF_CLK cycles.
+ Timing spec of tsu_A_r and tsu_A_p is 12 ns min. Default of 0x1 yields 40 ns at
+ PLL_REF_CLK of 50 MHz, 20ns at 100MHz. */
+ uint64_t setup : 4; /**< [ 3: 0](R/W) Setup time of CSB, PGENB, LOAD to rising edge of STROBE in read and write modes in
+ PLL_REF_CLK + 1 cycles. tsu_CS = 16ns, tsu_PG = 14ns, tsu_LD_r = 10ns. Default of 0x0
+ yields 20 ns plus ASU cycles at PLL_REF_CLK of 50 MHz, 10ns + ASU at 100MHz. */
+#else /* Word 0 - Little Endian */
+ uint64_t setup : 4; /**< [ 3: 0](R/W) Setup time of CSB, PGENB, LOAD to rising edge of STROBE in read and write modes in
+ PLL_REF_CLK + 1 cycles. tsu_CS = 16ns, tsu_PG = 14ns, tsu_LD_r = 10ns. Default of 0x0
+ yields 20 ns plus ASU cycles at PLL_REF_CLK of 50 MHz, 10ns + ASU at 100MHz. */
+ uint64_t asu : 4; /**< [ 7: 4](R/W) Setup time of A to rising edge of STROBE for read and write modes in PLL_REF_CLK cycles.
+ Timing spec of tsu_A_r and tsu_A_p is 12 ns min. Default of 0x1 yields 40 ns at
+ PLL_REF_CLK of 50 MHz, 20ns at 100MHz. */
+ uint64_t rdstb_wh : 4; /**< [ 11: 8](R/W) Pulse width high of STROBE in read mode in PLL_REF_CLK + 1 cycles. Timing spec of twh_SB_p
+ is 20ns min. Default of 0x1 yields 40 ns at PLL_REF_CLK of 50 MHz, 20ns at 100MHz. */
+ uint64_t wrstb_wh : 12; /**< [ 23: 12](R/W) Pulse width high of STROBE in write mode in PLL_REF_CLK + 1 cycles. Timing spec of
+ twh_SB_p is 9.8us max. Default of 0x1F3 yields 10 us at PLL_REF_CLK of 50 MHz. */
+ uint64_t ahd : 4; /**< [ 27: 24](R/W) Hold time of A with respect to falling edge of STROBE for read and write modes in
+ PLL_REF_CLK + 2 cycles. Timing spec of tsu_A_r and tsu_A_p is 3ns min. Default of 0x0
+ yields 40ns for a PLL_REF_CLK of 50 MHz, 20ns at 100MHz. */
+ uint64_t done : 4; /**< [ 31: 28](R/W) Hold time of CSB, PGENB, and LOAD with respect to falling edge of STROBE for read and
+ write mode in PLL_REF_CLK + 1 cycles. Timing specs are th_CS = 6ns, th_PG = 10ns, th_LD_p
+ = 7ns. Default of 0x0 yields 20ns for a PLL_REF_CLK of 50 MHz, 10ns at 100MHz. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_read_times_s cn; */
+};
+typedef union bdk_mio_fus_read_times bdk_mio_fus_read_times_t;
+
+#define BDK_MIO_FUS_READ_TIMES BDK_MIO_FUS_READ_TIMES_FUNC()
+static inline uint64_t BDK_MIO_FUS_READ_TIMES_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_READ_TIMES_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001570ll;
+ __bdk_csr_fatal("MIO_FUS_READ_TIMES", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_READ_TIMES bdk_mio_fus_read_times_t
+#define bustype_BDK_MIO_FUS_READ_TIMES BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_READ_TIMES "MIO_FUS_READ_TIMES"
+#define device_bar_BDK_MIO_FUS_READ_TIMES 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_READ_TIMES 0
+#define arguments_BDK_MIO_FUS_READ_TIMES -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_rpr_dat#
+ *
+ * INTERNAL: MIO Fuse Repair Memory Register
+ */
+union bdk_mio_fus_rpr_datx
+{
+ uint64_t u;
+ struct bdk_mio_fus_rpr_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) Internal:
+ Repair memory store (RPR_MEM). Data for read and write. A write to
+ MIO_FUS_RPR_DAT(1) writes all 128 bits from both registers to RPR_MEM. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) Internal:
+ Repair memory store (RPR_MEM). Data for read and write. A write to
+ MIO_FUS_RPR_DAT(1) writes all 128 bits from both registers to RPR_MEM. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_rpr_datx_s cn; */
+};
+typedef union bdk_mio_fus_rpr_datx bdk_mio_fus_rpr_datx_t;
+
+static inline uint64_t BDK_MIO_FUS_RPR_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_RPR_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=1))
+ return 0x87e003001530ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("MIO_FUS_RPR_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_RPR_DATX(a) bdk_mio_fus_rpr_datx_t
+#define bustype_BDK_MIO_FUS_RPR_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_RPR_DATX(a) "MIO_FUS_RPR_DATX"
+#define device_bar_BDK_MIO_FUS_RPR_DATX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_RPR_DATX(a) (a)
+#define arguments_BDK_MIO_FUS_RPR_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_soft_repair
+ *
+ * INTERNAL: MIO Fuse Soft Repair Register
+ *
+ * Internal:
+ * INTERNAL: Aka `Soft Blow'. Upon reset fuse repairs are loaded into REPAIR_MEM as they are
+ * loaded into the memories. Any new defects are loaded in afterwards, leaving END_PTR at the
+ * last defect.
+ */
+union bdk_mio_fus_soft_repair
+{
+ uint64_t u;
+ struct bdk_mio_fus_soft_repair_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t rpr_flip_synd : 2; /**< [ 19: 18](R/W/H) Internal:
+ Flip syndrome bits on RPR_MEM writes. For diagnostic use only. */
+ uint64_t autoblow : 1; /**< [ 17: 17](R/W/H) Internal:
+ Set to initiate burning of defect fuses to fuse macro. Clears when fuses are
+ blown. */
+ uint64_t too_many : 1; /**< [ 16: 16](RO/H) Internal:
+ Set if the sum of fuse repairs and memory defects exceeds 48. */
+ uint64_t numdefects : 8; /**< [ 15: 8](RO/H) Internal:
+ After reset/BIST indicates the number of memory defects reported. Defects are
+ stored in REPAIR_MEM from bit address NUMREPAIRS*21 to (NUMREPAIRS*21 + NUMDEFECTS*21 -
+ 1). */
+ uint64_t numrepairs : 8; /**< [ 7: 0](R/W) Internal:
+ Indicates the number of soft repairs to load from repair mem to the memories on
+ a soft/warm reset. Indicates the number of repairs loaded from efuses to repair mem on a
+ cold reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t numrepairs : 8; /**< [ 7: 0](R/W) Internal:
+ Indicates the number of soft repairs to load from repair mem to the memories on
+ a soft/warm reset. Indicates the number of repairs loaded from efuses to repair mem on a
+ cold reset. */
+ uint64_t numdefects : 8; /**< [ 15: 8](RO/H) Internal:
+ After reset/BIST indicates the number of memory defects reported. Defects are
+ stored in REPAIR_MEM from bit address NUMREPAIRS*21 to (NUMREPAIRS*21 + NUMDEFECTS*21 -
+ 1). */
+ uint64_t too_many : 1; /**< [ 16: 16](RO/H) Internal:
+ Set if the sum of fuse repairs and memory defects exceeds 48. */
+ uint64_t autoblow : 1; /**< [ 17: 17](R/W/H) Internal:
+ Set to initiate burning of defect fuses to fuse macro. Clears when fuses are
+ blown. */
+ uint64_t rpr_flip_synd : 2; /**< [ 19: 18](R/W/H) Internal:
+ Flip syndrome bits on RPR_MEM writes. For diagnostic use only. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_soft_repair_s cn81xx; */
+ struct bdk_mio_fus_soft_repair_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t rpr_flip_synd : 2; /**< [ 19: 18](R/W/H) Internal:
+ Flip syndrome bits on RPR_MEM writes. For diagnostic use only. */
+ uint64_t autoblow : 1; /**< [ 17: 17](R/W/H) Internal:
+ Set to initiate burning of defect fuses to fuse macro. Clears when fuses are
+ blown. */
+ uint64_t too_many : 1; /**< [ 16: 16](RO/H) Internal:
+ Set if the sum of fuse repairs and memory defects exceeds 195. */
+ uint64_t numdefects : 8; /**< [ 15: 8](RO/H) Internal:
+ After reset/BIST indicates the number of memory defects reported. Defects are
+ stored in REPAIR_MEM from bit address NUMREPAIRS*21 to (NUMREPAIRS*21 + NUMDEFECTS*21 -
+ 1). */
+ uint64_t numrepairs : 8; /**< [ 7: 0](R/W) Internal:
+ Indicates the number of soft repairs to load from repair mem to the memories on
+ a soft/warm reset. Indicates the number of repairs loaded from efuses to repair mem on a
+ cold reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t numrepairs : 8; /**< [ 7: 0](R/W) Internal:
+ Indicates the number of soft repairs to load from repair mem to the memories on
+ a soft/warm reset. Indicates the number of repairs loaded from efuses to repair mem on a
+ cold reset. */
+ uint64_t numdefects : 8; /**< [ 15: 8](RO/H) Internal:
+ After reset/BIST indicates the number of memory defects reported. Defects are
+ stored in REPAIR_MEM from bit address NUMREPAIRS*21 to (NUMREPAIRS*21 + NUMDEFECTS*21 -
+ 1). */
+ uint64_t too_many : 1; /**< [ 16: 16](RO/H) Internal:
+ Set if the sum of fuse repairs and memory defects exceeds 195. */
+ uint64_t autoblow : 1; /**< [ 17: 17](R/W/H) Internal:
+ Set to initiate burning of defect fuses to fuse macro. Clears when fuses are
+ blown. */
+ uint64_t rpr_flip_synd : 2; /**< [ 19: 18](R/W/H) Internal:
+ Flip syndrome bits on RPR_MEM writes. For diagnostic use only. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_mio_fus_soft_repair_s cn83xx; */
+};
+typedef union bdk_mio_fus_soft_repair bdk_mio_fus_soft_repair_t;
+
+#define BDK_MIO_FUS_SOFT_REPAIR BDK_MIO_FUS_SOFT_REPAIR_FUNC()
+static inline uint64_t BDK_MIO_FUS_SOFT_REPAIR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_SOFT_REPAIR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001540ll;
+ __bdk_csr_fatal("MIO_FUS_SOFT_REPAIR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_SOFT_REPAIR bdk_mio_fus_soft_repair_t
+#define bustype_BDK_MIO_FUS_SOFT_REPAIR BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_SOFT_REPAIR "MIO_FUS_SOFT_REPAIR"
+#define device_bar_BDK_MIO_FUS_SOFT_REPAIR 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_SOFT_REPAIR 0
+#define arguments_BDK_MIO_FUS_SOFT_REPAIR -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_tgg
+ *
+ * MIO Fuse TGG Register
+ * This register exists to support Authentik. Authentik code should read this register, then
+ * clear VAL to prevent other software from observing the value of the TGG fuses.
+ *
+ * Internal:
+ * INTERNAL: It is never possible to read the TGG fuses via MIO_FUS_RCMD. Whenever the fuse
+ * corresponding to VAL (TGG\<63\>) is blown, it is not possible to blow any of TGG\<62:0\>. The fuse
+ * corresponding to VAL must be the one and only lock down bit for TGG\<62:0\> - no other fuse
+ * lockdown bit can prevent blowing TGG\<62:0\>. It must always be possible to blow the fuse
+ * corresponding to VAL when it is not already blown. If an Authentik part may be converted to a
+ * non-Authentik part (via some JTAG mechanism or any other mechanism), it must not be possible
+ * to read the TGG fuse values from the Authentik part by performing this conversion -\> the reset
+ * value of VAL should be zero in this converted case.
+ */
+union bdk_mio_fus_tgg
+{
+ uint64_t u;
+ struct bdk_mio_fus_tgg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t val : 1; /**< [ 63: 63](R/W/H) Software can write VAL to 0, but cannot write VAL to a 1. When VAL = 1, DAT reads
+ the corresponding TGG fuses. When VAL = 0, DAT reads as 0s. The reset value of
+ this VAL bit is normally its fuse setting (i.e. TGG\<63\>). */
+ uint64_t dat : 63; /**< [ 62: 0](RO/H) When VAL = 0, DAT always reads as 0x0, regardless of the value of the TGG\<62:0\>
+ fuses. When VAL = 1, DAT returns the value of the TGG\<62:0\> fuses. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 63; /**< [ 62: 0](RO/H) When VAL = 0, DAT always reads as 0x0, regardless of the value of the TGG\<62:0\>
+ fuses. When VAL = 1, DAT returns the value of the TGG\<62:0\> fuses. */
+ uint64_t val : 1; /**< [ 63: 63](R/W/H) Software can write VAL to 0, but cannot write VAL to a 1. When VAL = 1, DAT reads
+ the corresponding TGG fuses. When VAL = 0, DAT reads as 0s. The reset value of
+ this VAL bit is normally its fuse setting (i.e. TGG\<63\>). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_tgg_s cn; */
+};
+typedef union bdk_mio_fus_tgg bdk_mio_fus_tgg_t;
+
+#define BDK_MIO_FUS_TGG BDK_MIO_FUS_TGG_FUNC()
+static inline uint64_t BDK_MIO_FUS_TGG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_TGG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001430ll;
+ __bdk_csr_fatal("MIO_FUS_TGG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_TGG bdk_mio_fus_tgg_t
+#define bustype_BDK_MIO_FUS_TGG BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_TGG "MIO_FUS_TGG"
+#define device_bar_BDK_MIO_FUS_TGG 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_TGG 0
+#define arguments_BDK_MIO_FUS_TGG -1,-1,-1,-1
+
+/**
+ * Register (RSL) mio_fus_wadr
+ *
+ * MIO Fuse Write Address Register
+ */
+union bdk_mio_fus_wadr
+{
+ uint64_t u;
+ struct bdk_mio_fus_wadr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t addr : 6; /**< [ 5: 0](R/W) Indicates which of the banks of 128 fuses to blow. Software
+ should not change this field while the FUSF_PROG[PROG] bit is set.
+ It must wait for the hardware to clear it. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 6; /**< [ 5: 0](R/W) Indicates which of the banks of 128 fuses to blow. Software
+ should not change this field while the FUSF_PROG[PROG] bit is set.
+ It must wait for the hardware to clear it. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_fus_wadr_s cn; */
+};
+typedef union bdk_mio_fus_wadr bdk_mio_fus_wadr_t;
+
+#define BDK_MIO_FUS_WADR BDK_MIO_FUS_WADR_FUNC()
+static inline uint64_t BDK_MIO_FUS_WADR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_FUS_WADR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e003001508ll;
+ __bdk_csr_fatal("MIO_FUS_WADR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_FUS_WADR bdk_mio_fus_wadr_t
+#define bustype_BDK_MIO_FUS_WADR BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_FUS_WADR "MIO_FUS_WADR"
+#define device_bar_BDK_MIO_FUS_WADR 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_FUS_WADR 0
+#define arguments_BDK_MIO_FUS_WADR -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_MIO_FUS_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_tws.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_tws.h
new file mode 100644
index 0000000000..54eff71fbe
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_tws.h
@@ -0,0 +1,1682 @@
+#ifndef __BDK_CSRS_MIO_TWS_H__
+#define __BDK_CSRS_MIO_TWS_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium MIO_TWS.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration mio_tws_bar_e
+ *
+ * TWSI Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_MIO_TWS_BAR_E_MIO_TWSX_PF_BAR0_CN8(a) (0x87e0d0000000ll + 0x1000000ll * (a))
+#define BDK_MIO_TWS_BAR_E_MIO_TWSX_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_MIO_TWS_BAR_E_MIO_TWSX_PF_BAR0_CN9(a) (0x87e0d0000000ll + 0x1000000ll * (a))
+#define BDK_MIO_TWS_BAR_E_MIO_TWSX_PF_BAR0_CN9_SIZE 0x10000ull
+#define BDK_MIO_TWS_BAR_E_MIO_TWSX_PF_BAR4(a) (0x87e0d0f00000ll + 0x1000000ll * (a))
+#define BDK_MIO_TWS_BAR_E_MIO_TWSX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration mio_tws_int_vec_e
+ *
+ * TWSI MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_MIO_TWS_INT_VEC_E_INT_ST (0)
+
+/**
+ * Register (RSL) mio_tws#_access_wdog
+ *
+ * TWSI Watch Dog Register
+ * This register contains the watch dog control register.
+ */
+union bdk_mio_twsx_access_wdog
+{
+ uint64_t u;
+ struct bdk_mio_twsx_access_wdog_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t wdog_cnt : 32; /**< [ 31: 0](R/W) Number of maximum TCLK clocks (defined by TWSI_CLK control THP) allowed for the
+ TWSI high-level controller (HLC) to stay in one state other than idle
+ state. Hardware will halt the operation if HLC is stuck longer than this delay
+ and MIO_TWS()_INT[ST_INT] interrupt will be generated and error code 0xF0 also
+ reported by MIO_TWS()_SW_TWSI[DATA]. Setting [WDOG_CNT] to 0x0 disables the
+ watch dog function. */
+#else /* Word 0 - Little Endian */
+ uint64_t wdog_cnt : 32; /**< [ 31: 0](R/W) Number of maximum TCLK clocks (defined by TWSI_CLK control THP) allowed for the
+ TWSI high-level controller (HLC) to stay in one state other than idle
+ state. Hardware will halt the operation if HLC is stuck longer than this delay
+ and MIO_TWS()_INT[ST_INT] interrupt will be generated and error code 0xF0 also
+ reported by MIO_TWS()_SW_TWSI[DATA]. Setting [WDOG_CNT] to 0x0 disables the
+ watch dog function. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_access_wdog_s cn; */
+};
+typedef union bdk_mio_twsx_access_wdog bdk_mio_twsx_access_wdog_t;
+
+static inline uint64_t BDK_MIO_TWSX_ACCESS_WDOG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_ACCESS_WDOG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001040ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_ACCESS_WDOG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_ACCESS_WDOG(a) bdk_mio_twsx_access_wdog_t
+#define bustype_BDK_MIO_TWSX_ACCESS_WDOG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_ACCESS_WDOG(a) "MIO_TWSX_ACCESS_WDOG"
+#define device_bar_BDK_MIO_TWSX_ACCESS_WDOG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_ACCESS_WDOG(a) (a)
+#define arguments_BDK_MIO_TWSX_ACCESS_WDOG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_clken
+ *
+ * MIO Clock Enable Register
+ * This register controls conditional clocks.
+ */
+union bdk_mio_twsx_clken
+{
+ uint64_t u;
+ struct bdk_mio_twsx_clken_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t clken : 1; /**< [ 0: 0](R/W) Force the TWSI interface conditional clocking to be always on. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t clken : 1; /**< [ 0: 0](R/W) Force the TWSI interface conditional clocking to be always on. For diagnostic use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_clken_s cn; */
+};
+typedef union bdk_mio_twsx_clken bdk_mio_twsx_clken_t;
+
+static inline uint64_t BDK_MIO_TWSX_CLKEN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_CLKEN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001078ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_CLKEN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_CLKEN(a) bdk_mio_twsx_clken_t
+#define bustype_BDK_MIO_TWSX_CLKEN(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_CLKEN(a) "MIO_TWSX_CLKEN"
+#define device_bar_BDK_MIO_TWSX_CLKEN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_CLKEN(a) (a)
+#define arguments_BDK_MIO_TWSX_CLKEN(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_const
+ *
+ * TWSI Constants Register
+ * This register contains constants for software discovery.
+ */
+union bdk_mio_twsx_const
+{
+ uint64_t u;
+ struct bdk_mio_twsx_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ver : 4; /**< [ 3: 0](RO) TWSI version.
+ Internal:
+ FIXME spec values. Make 8 bits? */
+#else /* Word 0 - Little Endian */
+ uint64_t ver : 4; /**< [ 3: 0](RO) TWSI version.
+ Internal:
+ FIXME spec values. Make 8 bits? */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_const_s cn; */
+};
+typedef union bdk_mio_twsx_const bdk_mio_twsx_const_t;
+
+static inline uint64_t BDK_MIO_TWSX_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0000000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_CONST(a) bdk_mio_twsx_const_t
+#define bustype_BDK_MIO_TWSX_CONST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_CONST(a) "MIO_TWSX_CONST"
+#define device_bar_BDK_MIO_TWSX_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_CONST(a) (a)
+#define arguments_BDK_MIO_TWSX_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_int
+ *
+ * TWSI Interrupt Register
+ * This register contains the TWSI interrupt-source bits and SDA/SCL override bits.
+ */
+union bdk_mio_twsx_int
+{
+ uint64_t u;
+ struct bdk_mio_twsx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t scl : 1; /**< [ 11: 11](RO/H) SCL signal. */
+ uint64_t sda : 1; /**< [ 10: 10](RO/H) SDA signal. */
+ uint64_t scl_ovr : 1; /**< [ 9: 9](R/W) SCL testing override:
+ 0 = Normal operation, SCL bus controlled by TWSI core.
+ 1 = Pull SCL low. */
+ uint64_t sda_ovr : 1; /**< [ 8: 8](R/W) SDA testing override:
+ 0 = Normal operation, SDA bus controlled by TWSI core.
+ 1 = Pull SDA low. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1C/H) BLOCK transaction threshold interrupt. Interrupt fires when the remaining
+ bytes to be sent/received is less than threshold MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_THRESH].
+ Only valid in HLC BlOCK_MODE. Ignored when BLOCK mode is disabled. */
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) TWSI core interrupt, whenever IFLG is set. Ignored when the HLC is enabled. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) MIO_TWS()_TWSI_SW register-update interrupt. Ignored when the HLC is disabled. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) MIO_TWS()_SW_TWSI register-update interrupt. Ignored when the HLC is disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) MIO_TWS()_SW_TWSI register-update interrupt. Ignored when the HLC is disabled. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) MIO_TWS()_TWSI_SW register-update interrupt. Ignored when the HLC is disabled. */
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) TWSI core interrupt, whenever IFLG is set. Ignored when the HLC is enabled. */
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1C/H) BLOCK transaction threshold interrupt. Interrupt fires when the remaining
+ bytes to be sent/received is less than threshold MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_THRESH].
+ Only valid in HLC BlOCK_MODE. Ignored when BLOCK mode is disabled. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t sda_ovr : 1; /**< [ 8: 8](R/W) SDA testing override:
+ 0 = Normal operation, SDA bus controlled by TWSI core.
+ 1 = Pull SDA low. */
+ uint64_t scl_ovr : 1; /**< [ 9: 9](R/W) SCL testing override:
+ 0 = Normal operation, SCL bus controlled by TWSI core.
+ 1 = Pull SCL low. */
+ uint64_t sda : 1; /**< [ 10: 10](RO/H) SDA signal. */
+ uint64_t scl : 1; /**< [ 11: 11](RO/H) SCL signal. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_twsx_int_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t scl : 1; /**< [ 11: 11](RO/H) SCL signal. */
+ uint64_t sda : 1; /**< [ 10: 10](RO/H) SDA signal. */
+ uint64_t scl_ovr : 1; /**< [ 9: 9](R/W) SCL testing override:
+ 0 = Normal operation, SCL bus controlled by TWSI core.
+ 1 = Pull SCL low. */
+ uint64_t sda_ovr : 1; /**< [ 8: 8](R/W) SDA testing override:
+ 0 = Normal operation, SDA bus controlled by TWSI core.
+ 1 = Pull SDA low. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) TWSI core interrupt, whenever IFLG is set. Ignored when the HLC is enabled. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) MIO_TWS()_TWSI_SW register-update interrupt. Ignored when the HLC is disabled. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) MIO_TWS()_SW_TWSI register-update interrupt. Ignored when the HLC is disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) MIO_TWS()_SW_TWSI register-update interrupt. Ignored when the HLC is disabled. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) MIO_TWS()_TWSI_SW register-update interrupt. Ignored when the HLC is disabled. */
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) TWSI core interrupt, whenever IFLG is set. Ignored when the HLC is enabled. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t sda_ovr : 1; /**< [ 8: 8](R/W) SDA testing override:
+ 0 = Normal operation, SDA bus controlled by TWSI core.
+ 1 = Pull SDA low. */
+ uint64_t scl_ovr : 1; /**< [ 9: 9](R/W) SCL testing override:
+ 0 = Normal operation, SCL bus controlled by TWSI core.
+ 1 = Pull SCL low. */
+ uint64_t sda : 1; /**< [ 10: 10](RO/H) SDA signal. */
+ uint64_t scl : 1; /**< [ 11: 11](RO/H) SCL signal. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_mio_twsx_int_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t scl : 1; /**< [ 11: 11](RO/H) SCL signal. */
+ uint64_t sda : 1; /**< [ 10: 10](RO/H) SDA signal. */
+ uint64_t scl_ovr : 1; /**< [ 9: 9](R/W) SCL testing override:
+ 0 = Normal operation, SCL bus controlled by TWSI core.
+ 1 = Pull SCL low. */
+ uint64_t sda_ovr : 1; /**< [ 8: 8](R/W) SDA testing override:
+ 0 = Normal operation, SDA bus controlled by TWSI core.
+ 1 = Pull SDA low. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1C/H) BLOCK transaction threshold interrupt. Interrupt fires when the remaining
+ bytes to be sent/received is less than threshold MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_THRESH].
+ Only valid in HLC BlOCK_MODE. Ignored when BLOCK mode is disabled. */
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) TWSI core interrupt, whenever IFLG is set. Ignored when the HLC is enabled.
+ In order to clear [CORE_INT], software needs to write 0 to TWSI_CTL[IFLG]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) MIO_TWS()_TWSI_SW register-update interrupt. Only valid in HLC mode.
+ Ignored when HLC is disabled. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) MIO_TWS()_SW_TWSI register-update interrupt. Only valid in HLC mode.
+ Ignored when HLC is disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) MIO_TWS()_SW_TWSI register-update interrupt. Only valid in HLC mode.
+ Ignored when HLC is disabled. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) MIO_TWS()_TWSI_SW register-update interrupt. Only valid in HLC mode.
+ Ignored when HLC is disabled. */
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) TWSI core interrupt, whenever IFLG is set. Ignored when the HLC is enabled.
+ In order to clear [CORE_INT], software needs to write 0 to TWSI_CTL[IFLG]. */
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1C/H) BLOCK transaction threshold interrupt. Interrupt fires when the remaining
+ bytes to be sent/received is less than threshold MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_THRESH].
+ Only valid in HLC BlOCK_MODE. Ignored when BLOCK mode is disabled. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t sda_ovr : 1; /**< [ 8: 8](R/W) SDA testing override:
+ 0 = Normal operation, SDA bus controlled by TWSI core.
+ 1 = Pull SDA low. */
+ uint64_t scl_ovr : 1; /**< [ 9: 9](R/W) SCL testing override:
+ 0 = Normal operation, SCL bus controlled by TWSI core.
+ 1 = Pull SCL low. */
+ uint64_t sda : 1; /**< [ 10: 10](RO/H) SDA signal. */
+ uint64_t scl : 1; /**< [ 11: 11](RO/H) SCL signal. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_mio_twsx_int bdk_mio_twsx_int_t;
+
+static inline uint64_t BDK_MIO_TWSX_INT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_INT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0d0001010ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0d0001010ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0d0001010ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001010ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_INT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_INT(a) bdk_mio_twsx_int_t
+#define bustype_BDK_MIO_TWSX_INT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_INT(a) "MIO_TWSX_INT"
+#define device_bar_BDK_MIO_TWSX_INT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_INT(a) (a)
+#define arguments_BDK_MIO_TWSX_INT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_int_ena_w1c
+ *
+ * TWSI Interrupt Enable Clear Register
+ */
+union bdk_mio_twsx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_mio_twsx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1C/H) Reads or clears MIO_TWS()_INT[BLOCK_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](R/W1C/H) Reads or clears MIO_TWS()_INT[CORE_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) Reads or clears MIO_TWS()_INT[TS_INT]. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) Reads or clears MIO_TWS()_INT[ST_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) Reads or clears MIO_TWS()_INT[ST_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) Reads or clears MIO_TWS()_INT[TS_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](R/W1C/H) Reads or clears MIO_TWS()_INT[CORE_INT]. */
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1C/H) Reads or clears MIO_TWS()_INT[BLOCK_INT]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_twsx_int_ena_w1c_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t core_int : 1; /**< [ 2: 2](R/W1C/H) Reads or clears MIO_TWS()_INT[CORE_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) Reads or clears MIO_TWS()_INT[TS_INT]. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) Reads or clears MIO_TWS()_INT[ST_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1C/H) Reads or clears MIO_TWS()_INT[ST_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1C/H) Reads or clears MIO_TWS()_INT[TS_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](R/W1C/H) Reads or clears MIO_TWS()_INT[CORE_INT]. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_mio_twsx_int_ena_w1c_s cn9; */
+};
+typedef union bdk_mio_twsx_int_ena_w1c bdk_mio_twsx_int_ena_w1c_t;
+
+static inline uint64_t BDK_MIO_TWSX_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0d0001028ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0d0001028ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0d0001028ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001028ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_INT_ENA_W1C(a) bdk_mio_twsx_int_ena_w1c_t
+#define bustype_BDK_MIO_TWSX_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_INT_ENA_W1C(a) "MIO_TWSX_INT_ENA_W1C"
+#define device_bar_BDK_MIO_TWSX_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_INT_ENA_W1C(a) (a)
+#define arguments_BDK_MIO_TWSX_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_int_ena_w1s
+ *
+ * TWSI Interrupt Enable Set Register
+ */
+union bdk_mio_twsx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_mio_twsx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1S/H) Enables reporting of MIO_TWS()_INT[BLOCK_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](R/W1S/H) Enables reporting of MIO_TWS()_INT[CORE_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1S/H) Enables reporting of MIO_TWS()_INT[TS_INT]. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1S/H) Enables reporting of MIO_TWS()_INT[ST_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1S/H) Enables reporting of MIO_TWS()_INT[ST_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1S/H) Enables reporting of MIO_TWS()_INT[TS_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](R/W1S/H) Enables reporting of MIO_TWS()_INT[CORE_INT]. */
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1S/H) Enables reporting of MIO_TWS()_INT[BLOCK_INT]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_twsx_int_ena_w1s_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t core_int : 1; /**< [ 2: 2](R/W1S/H) Enables reporting of MIO_TWS()_INT[CORE_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1S/H) Enables reporting of MIO_TWS()_INT[TS_INT]. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1S/H) Enables reporting of MIO_TWS()_INT[ST_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1S/H) Enables reporting of MIO_TWS()_INT[ST_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1S/H) Enables reporting of MIO_TWS()_INT[TS_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](R/W1S/H) Enables reporting of MIO_TWS()_INT[CORE_INT]. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_mio_twsx_int_ena_w1s_s cn9; */
+};
+typedef union bdk_mio_twsx_int_ena_w1s bdk_mio_twsx_int_ena_w1s_t;
+
+static inline uint64_t BDK_MIO_TWSX_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0d0001030ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0d0001030ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0d0001030ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001030ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_INT_ENA_W1S(a) bdk_mio_twsx_int_ena_w1s_t
+#define bustype_BDK_MIO_TWSX_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_INT_ENA_W1S(a) "MIO_TWSX_INT_ENA_W1S"
+#define device_bar_BDK_MIO_TWSX_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_INT_ENA_W1S(a) (a)
+#define arguments_BDK_MIO_TWSX_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_int_w1s
+ *
+ * TWSI Interrupt Set Register
+ */
+union bdk_mio_twsx_int_w1s
+{
+ uint64_t u;
+ struct bdk_mio_twsx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1S/H) Reads or sets MIO_TWS()_INT[BLOCK_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) Reads MIO_TWS()_INT[CORE_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1S/H) Reads or sets MIO_TWS()_INT[TS_INT]. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1S/H) Reads or sets MIO_TWS()_INT[ST_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1S/H) Reads or sets MIO_TWS()_INT[ST_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1S/H) Reads or sets MIO_TWS()_INT[TS_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) Reads MIO_TWS()_INT[CORE_INT]. */
+ uint64_t block_int : 1; /**< [ 3: 3](R/W1S/H) Reads or sets MIO_TWS()_INT[BLOCK_INT]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_twsx_int_w1s_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) Reads MIO_TWS()_INT[CORE_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1S/H) Reads or sets MIO_TWS()_INT[TS_INT]. */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1S/H) Reads or sets MIO_TWS()_INT[ST_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_int : 1; /**< [ 0: 0](R/W1S/H) Reads or sets MIO_TWS()_INT[ST_INT]. */
+ uint64_t ts_int : 1; /**< [ 1: 1](R/W1S/H) Reads or sets MIO_TWS()_INT[TS_INT]. */
+ uint64_t core_int : 1; /**< [ 2: 2](RO/H) Reads MIO_TWS()_INT[CORE_INT]. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_mio_twsx_int_w1s_s cn9; */
+};
+typedef union bdk_mio_twsx_int_w1s bdk_mio_twsx_int_w1s_t;
+
+static inline uint64_t BDK_MIO_TWSX_INT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_INT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0d0001020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0d0001020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0d0001020ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001020ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_INT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_INT_W1S(a) bdk_mio_twsx_int_w1s_t
+#define bustype_BDK_MIO_TWSX_INT_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_INT_W1S(a) "MIO_TWSX_INT_W1S"
+#define device_bar_BDK_MIO_TWSX_INT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_INT_W1S(a) (a)
+#define arguments_BDK_MIO_TWSX_INT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_io_ctl
+ *
+ * MIO TWSI IO Control Register
+ * This register control the TWSI IO drive strength and slew rates. Index {a} of zero
+ * (MIO_TWS(0)_IO_CTL) is used to control all TWSI outputs on CNXXXX; other index
+ * values have no effect.
+ */
+union bdk_mio_twsx_io_ctl
+{
+ uint64_t u;
+ struct bdk_mio_twsx_io_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t drive : 2; /**< [ 3: 2](R/W) TWSI bus pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t reserved_1 : 1;
+ uint64_t slew : 1; /**< [ 0: 0](R/W) TWSI bus pins output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+#else /* Word 0 - Little Endian */
+ uint64_t slew : 1; /**< [ 0: 0](R/W) TWSI bus pins output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+ uint64_t reserved_1 : 1;
+ uint64_t drive : 2; /**< [ 3: 2](R/W) TWSI bus pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_io_ctl_s cn; */
+};
+typedef union bdk_mio_twsx_io_ctl bdk_mio_twsx_io_ctl_t;
+
+static inline uint64_t BDK_MIO_TWSX_IO_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_IO_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001070ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_IO_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_IO_CTL(a) bdk_mio_twsx_io_ctl_t
+#define bustype_BDK_MIO_TWSX_IO_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_IO_CTL(a) "MIO_TWSX_IO_CTL"
+#define device_bar_BDK_MIO_TWSX_IO_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_IO_CTL(a) (a)
+#define arguments_BDK_MIO_TWSX_IO_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_mode
+ *
+ * TWSI Mode and Control Register
+ */
+union bdk_mio_twsx_mode
+{
+ uint64_t u;
+ struct bdk_mio_twsx_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t refclk_src : 1; /**< [ 4: 4](R/W) Reference clock source used to generate TWSI clock. See TWSI_CLK and TWSI_CLKCTL for
+ details.
+ 0 = 100 MHz input reference generates TWSI clock.
+ 1 = Coprocessor clock generates TWSI clock. */
+ uint64_t bus_mon_rst : 1; /**< [ 3: 3](WO/H) Reset the TWSI bus monitor for both HLC and non-HLC mode.
+ When TWSI bus in hang state with a timeout interrupt, it is possible that the TWSI bus
+ monitor still waiting for STP on the bus before accepting any new commands.
+ Write 1 to send a pulse to reset interface monitor back to the initial condition. */
+ uint64_t block_mode : 1; /**< [ 2: 2](R/W) Block transfer mode in HLC, only valid in HLC mode.
+ When device is enabled to block transfer mode, software can access TWSI data through a
+ FIFO interface. Software needs to write to MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE] with
+ the number of bytes to be transfered/received. */
+ uint64_t stretch : 1; /**< [ 1: 1](R/W) Clock stretching enable.
+ When enabled and device in master mode, it allows slave device
+ to extend low period of the clock. During the clock extension period, the SCL output from
+ master device is disabled. */
+ uint64_t hs_mode : 1; /**< [ 0: 0](R/W) I2C bus high-speed mode.
+
+ 0 = Open drain drive on TWS_SCL. TWS_SCL clock signal high-to-low ratio is 1 to 1.
+ OSCL output frequency divisor is 10.
+
+ 1 = Current sourced circuit is used to drive TWS_SCL pin when device is in master mode,
+ but disabled after each repeated start condition
+ and after each ACK or NACK to give a slave a chance to stretch the clock.
+ TWS_SCL clock signal high-to-low ratio is 1 to 2.
+ OSCL output frequency divisor is 15. */
+#else /* Word 0 - Little Endian */
+ uint64_t hs_mode : 1; /**< [ 0: 0](R/W) I2C bus high-speed mode.
+
+ 0 = Open drain drive on TWS_SCL. TWS_SCL clock signal high-to-low ratio is 1 to 1.
+ OSCL output frequency divisor is 10.
+
+ 1 = Current sourced circuit is used to drive TWS_SCL pin when device is in master mode,
+ but disabled after each repeated start condition
+ and after each ACK or NACK to give a slave a chance to stretch the clock.
+ TWS_SCL clock signal high-to-low ratio is 1 to 2.
+ OSCL output frequency divisor is 15. */
+ uint64_t stretch : 1; /**< [ 1: 1](R/W) Clock stretching enable.
+ When enabled and device in master mode, it allows slave device
+ to extend low period of the clock. During the clock extension period, the SCL output from
+ master device is disabled. */
+ uint64_t block_mode : 1; /**< [ 2: 2](R/W) Block transfer mode in HLC, only valid in HLC mode.
+ When device is enabled to block transfer mode, software can access TWSI data through a
+ FIFO interface. Software needs to write to MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE] with
+ the number of bytes to be transfered/received. */
+ uint64_t bus_mon_rst : 1; /**< [ 3: 3](WO/H) Reset the TWSI bus monitor for both HLC and non-HLC mode.
+ When TWSI bus in hang state with a timeout interrupt, it is possible that the TWSI bus
+ monitor still waiting for STP on the bus before accepting any new commands.
+ Write 1 to send a pulse to reset interface monitor back to the initial condition. */
+ uint64_t refclk_src : 1; /**< [ 4: 4](R/W) Reference clock source used to generate TWSI clock. See TWSI_CLK and TWSI_CLKCTL for
+ details.
+ 0 = 100 MHz input reference generates TWSI clock.
+ 1 = Coprocessor clock generates TWSI clock. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_twsx_mode_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t stretch : 1; /**< [ 1: 1](R/W) Clock stretching enable.
+ When enabled and device in master mode, it allows slave device
+ to extend low period of the clock. During the clock extension period, the SCL output from
+ master device is disabled. */
+ uint64_t hs_mode : 1; /**< [ 0: 0](R/W) I2C bus high-speed mode.
+
+ 0 = Open drain drive on TWS_SCL. TWS_SCL clock signal high-to-low ratio is 1 to 1.
+ OSCL output frequency divisor is 10.
+
+ 1 = Current sourced circuit is used to drive TWS_SCL pin when device is in master mode,
+ but disabled after each repeated start condition
+ and after each ACK or NACK to give a slave a chance to stretch the clock.
+ TWS_SCL clock signal high-to-low ratio is 1 to 2.
+ OSCL output frequency divisor is 15. */
+#else /* Word 0 - Little Endian */
+ uint64_t hs_mode : 1; /**< [ 0: 0](R/W) I2C bus high-speed mode.
+
+ 0 = Open drain drive on TWS_SCL. TWS_SCL clock signal high-to-low ratio is 1 to 1.
+ OSCL output frequency divisor is 10.
+
+ 1 = Current sourced circuit is used to drive TWS_SCL pin when device is in master mode,
+ but disabled after each repeated start condition
+ and after each ACK or NACK to give a slave a chance to stretch the clock.
+ TWS_SCL clock signal high-to-low ratio is 1 to 2.
+ OSCL output frequency divisor is 15. */
+ uint64_t stretch : 1; /**< [ 1: 1](R/W) Clock stretching enable.
+ When enabled and device in master mode, it allows slave device
+ to extend low period of the clock. During the clock extension period, the SCL output from
+ master device is disabled. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_mio_twsx_mode_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t refclk_src : 1; /**< [ 4: 4](R/W) Reference clock source used to generate TWSI clock. See TWSI_CLK and TWSI_CLKCTL for
+ details.
+ 0 = 100 MHz input reference generates TWSI clock.
+ 1 = Coprocessor clock generates TWSI clock. */
+ uint64_t bus_mon_rst : 1; /**< [ 3: 3](WO/H) Reset the TWSI bus monitor for both HLC and non-HLC mode.
+ When TWSI bus in hang state with a timeout interrupt, it is possible that the TWSI bus
+ monitor still waiting for STP on the bus before accepting any new commands.
+ Write 1 to send a pulse to reset interface monitor back to the initial condition. */
+ uint64_t block_mode : 1; /**< [ 2: 2](R/W) Block transfer mode in HLC, only valid in HLC mode.
+ When device is enabled to block transfer mode, software can access TWSI data through a
+ FIFO interface. Software needs to write to MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE] with
+ the number of bytes to be transfered/received. */
+ uint64_t stretch : 1; /**< [ 1: 1](R/W) Clock stretching enable.
+ When enabled and device in non-HLC master mode, it allows slave device
+ to extend low period of the clock. During the clock extension period, the SCL output from
+ master device is disabled. */
+ uint64_t hs_mode : 1; /**< [ 0: 0](R/W) I2C bus high-speed mode.
+
+ 0 = Open drain drive on TWS_SCL. TWS_SCL clock signal high-to-low ratio is 1 to 1.
+ OSCL output frequency divisor is 10.
+
+ 1 = Current sourced circuit is used to drive TWS_SCL pin when device is in master mode,
+ but disabled after each repeated start condition
+ and after each ACK or NACK to give a slave a chance to stretch the clock.
+ TWS_SCL clock signal high-to-low ratio is 1 to 2.
+ OSCL output frequency divisor is 15. */
+#else /* Word 0 - Little Endian */
+ uint64_t hs_mode : 1; /**< [ 0: 0](R/W) I2C bus high-speed mode.
+
+ 0 = Open drain drive on TWS_SCL. TWS_SCL clock signal high-to-low ratio is 1 to 1.
+ OSCL output frequency divisor is 10.
+
+ 1 = Current sourced circuit is used to drive TWS_SCL pin when device is in master mode,
+ but disabled after each repeated start condition
+ and after each ACK or NACK to give a slave a chance to stretch the clock.
+ TWS_SCL clock signal high-to-low ratio is 1 to 2.
+ OSCL output frequency divisor is 15. */
+ uint64_t stretch : 1; /**< [ 1: 1](R/W) Clock stretching enable.
+ When enabled and device in non-HLC master mode, it allows slave device
+ to extend low period of the clock. During the clock extension period, the SCL output from
+ master device is disabled. */
+ uint64_t block_mode : 1; /**< [ 2: 2](R/W) Block transfer mode in HLC, only valid in HLC mode.
+ When device is enabled to block transfer mode, software can access TWSI data through a
+ FIFO interface. Software needs to write to MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE] with
+ the number of bytes to be transfered/received. */
+ uint64_t bus_mon_rst : 1; /**< [ 3: 3](WO/H) Reset the TWSI bus monitor for both HLC and non-HLC mode.
+ When TWSI bus in hang state with a timeout interrupt, it is possible that the TWSI bus
+ monitor still waiting for STP on the bus before accepting any new commands.
+ Write 1 to send a pulse to reset interface monitor back to the initial condition. */
+ uint64_t refclk_src : 1; /**< [ 4: 4](R/W) Reference clock source used to generate TWSI clock. See TWSI_CLK and TWSI_CLKCTL for
+ details.
+ 0 = 100 MHz input reference generates TWSI clock.
+ 1 = Coprocessor clock generates TWSI clock. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_mio_twsx_mode bdk_mio_twsx_mode_t;
+
+static inline uint64_t BDK_MIO_TWSX_MODE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_MODE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0d0001038ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0d0001038ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0d0001038ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001038ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_MODE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_MODE(a) bdk_mio_twsx_mode_t
+#define bustype_BDK_MIO_TWSX_MODE(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_MODE(a) "MIO_TWSX_MODE"
+#define device_bar_BDK_MIO_TWSX_MODE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_MODE(a) (a)
+#define arguments_BDK_MIO_TWSX_MODE(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_msix_pba#
+ *
+ * TWSI MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table, the bit number is indexed by the MIO_TWS_INT_VEC_E
+ * enumeration.
+ */
+union bdk_mio_twsx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_mio_twsx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated MIO_TWS()_MSIX_VEC()_CTL, enumerated by
+ MIO_TWS_INT_VEC_E. Bits that have no associated MIO_TWS_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated MIO_TWS()_MSIX_VEC()_CTL, enumerated by
+ MIO_TWS_INT_VEC_E. Bits that have no associated MIO_TWS_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_msix_pbax_s cn; */
+};
+typedef union bdk_mio_twsx_msix_pbax bdk_mio_twsx_msix_pbax_t;
+
+static inline uint64_t BDK_MIO_TWSX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x87e0d0ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e0d0ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=5) && (b==0)))
+ return 0x87e0d0ff0000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=5) && (b==0)))
+ return 0x87e0d0ff0000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("MIO_TWSX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_MSIX_PBAX(a,b) bdk_mio_twsx_msix_pbax_t
+#define bustype_BDK_MIO_TWSX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_MSIX_PBAX(a,b) "MIO_TWSX_MSIX_PBAX"
+#define device_bar_BDK_MIO_TWSX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_MIO_TWSX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_MIO_TWSX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) mio_tws#_msix_vec#_addr
+ *
+ * TWSI MSI-X Vector Table Address Registers
+ * This register is the MSI-X vector table, indexed by the MIO_TWS_INT_VEC_E enumeration.
+ */
+union bdk_mio_twsx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_mio_twsx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's MIO_TWS()_MSIX_VEC()_ADDR, MIO_TWS()_MSIX_VEC()_CTL, and corresponding
+ bit of MIO_TWS()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_MIO_TWS()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's MIO_TWS()_MSIX_VEC()_ADDR, MIO_TWS()_MSIX_VEC()_CTL, and corresponding
+ bit of MIO_TWS()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_MIO_TWS()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_twsx_msix_vecx_addr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's MIO_TWS()_MSIX_VEC()_ADDR, MIO_TWS()_MSIX_VEC()_CTL, and corresponding
+ bit of MIO_TWS()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_MIO_TWS()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's MIO_TWS()_MSIX_VEC()_ADDR, MIO_TWS()_MSIX_VEC()_CTL, and corresponding
+ bit of MIO_TWS()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_MIO_TWS()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_mio_twsx_msix_vecx_addr_s cn9; */
+};
+typedef union bdk_mio_twsx_msix_vecx_addr bdk_mio_twsx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_MIO_TWSX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x87e0d0f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e0d0f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=5) && (b==0)))
+ return 0x87e0d0f00000ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=5) && (b==0)))
+ return 0x87e0d0f00000ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("MIO_TWSX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_MSIX_VECX_ADDR(a,b) bdk_mio_twsx_msix_vecx_addr_t
+#define bustype_BDK_MIO_TWSX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_MSIX_VECX_ADDR(a,b) "MIO_TWSX_MSIX_VECX_ADDR"
+#define device_bar_BDK_MIO_TWSX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_MIO_TWSX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_MIO_TWSX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) mio_tws#_msix_vec#_ctl
+ *
+ * TWSI MSI-X Vector Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the MIO_TWS_INT_VEC_E enumeration.
+ */
+union bdk_mio_twsx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_mio_twsx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_mio_twsx_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_mio_twsx_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_mio_twsx_msix_vecx_ctl bdk_mio_twsx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_MIO_TWSX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x87e0d0f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e0d0f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=5) && (b==0)))
+ return 0x87e0d0f00008ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=5) && (b==0)))
+ return 0x87e0d0f00008ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("MIO_TWSX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_MSIX_VECX_CTL(a,b) bdk_mio_twsx_msix_vecx_ctl_t
+#define bustype_BDK_MIO_TWSX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_MSIX_VECX_CTL(a,b) "MIO_TWSX_MSIX_VECX_CTL"
+#define device_bar_BDK_MIO_TWSX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_MIO_TWSX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_MIO_TWSX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) mio_tws#_sw_twsi
+ *
+ * TWSI Software to TWSI Register
+ * This register allows software to:
+ * * Initiate master-mode operations with a write operation, and read the result with a
+ * read operation.
+ * * Load four bytes for later retrieval (slave mode) with a write operation and check validity
+ * with a read operation.
+ * * Launch a configuration read/write operation with a write operation and read the result with
+ * a read operation.
+ *
+ * This register should be read or written by software, and read by the TWSI device. The TWSI
+ * device can use either two-byte or five-byte read operations to reference this register.
+ * The TWSI device considers this register valid when [V] = 1 and [SLONLY] = 1.
+ */
+union bdk_mio_twsx_sw_twsi
+{
+ uint64_t u;
+ struct bdk_mio_twsx_sw_twsi_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t v : 1; /**< [ 63: 63](RC/W/H) Valid bit. Set on a write operation (should always be written with a 1). Cleared when a
+ TWSI master-mode operation completes, a TWSI configuration register access completes, or
+ when the TWSI device reads the register if [SLONLY] = 1. */
+ uint64_t slonly : 1; /**< [ 62: 62](R/W) Slave only mode.
+
+ When this bit is set, no operations are initiated with a write operation. Only the D field
+ is updated in this case.
+
+ When this bit is clear, a write operation initiates either a master-mode operation or a
+ TWSI configuration register access. */
+ uint64_t eia : 1; /**< [ 61: 61](R/W) Extended internal address. Sends an additional internal address byte (the MSB of [IA] is
+ from MIO_TWS()_SW_TWSI_EXT[IA]). */
+ uint64_t op : 4; /**< [ 60: 57](R/W) Opcode field. When the register is written with [SLONLY] = 0, this field initiates one of
+ the following read or write operations:
+ 0x0 = 7-bit byte master-mode operation.
+ 0x1 = 7-bit byte combined-read master-mode operation, 7-bit byte write-with-IA master-mode
+ operation.
+ 0x2 = 10-bit byte master-mode operation.
+ 0x3 = 10-bit byte combined-read master-mode operation, 10-bit byte write-with-IA master-
+ mode operation.
+ 0x4 = TWSI master-clock register, TWSI_CLK in TWSI Master Clock Register.
+ 0x6 = See [EOP_IA] field.
+ 0x8 = 7-bit 4-byte master-mode operation.
+ 0x9 = 7-bit 4-byte combined-read master-mode operation, 7-bit 4-byte write-with-IA master-
+ mode operation.
+ 0xA = 10-bit 4-byte master-mode operation.
+ 0xB = 10-bit 4-byte combined-read master-mode operation, 10-bit 4-byte write-with-IA
+ master-mode operation. */
+ uint64_t r : 1; /**< [ 56: 56](R/W/H) Read bit or result. If this bit is set on a CSR write when [SLONLY] = 0, the
+ operation is a read operation (if clear, it is a write operation).
+ On a CSR read, this bit returns the result indication for the most recent
+ master-mode operation, 1 = success, 0 = failure. */
+ uint64_t sovr : 1; /**< [ 55: 55](R/W) Size override. If this bit is set, use the [SIZE] field to determine the master-mode
+ operation size rather than what [OP] specifies. For operations greater than four bytes, the
+ additional data is contained in MIO_TWS()_SW_TWSI_EXT[DATA]. */
+ uint64_t size : 3; /**< [ 54: 52](R/W) Size minus one. Specifies the size in bytes of the master-mode operation if
+ [SOVR] = 1. (0 = 1 byte, 1 = 2 bytes, ... 7 = 8 bytes). */
+ uint64_t scr : 2; /**< [ 51: 50](R/W) Scratch. Unused, but retain state. */
+ uint64_t addr : 10; /**< [ 49: 40](R/W) Address field. The address of the remote device for a master-mode operation. ADDR\<9:7\> are
+ only used for 10-bit addressing.
+
+ Note that when mastering a 7-bit OP, ADDR\<6:0\> should not take any of the values 0x78,
+ 0x79, 0x7A nor 0x7B. (These 7-bit addresses are reserved to extend to 10-bit addressing). */
+ uint64_t ia : 5; /**< [ 39: 35](R/W) Internal address. Used when launching a combined master-mode operation. The lower 3
+ address bits are contained in [EOP_IA]. */
+ uint64_t eop_ia : 3; /**< [ 34: 32](R/W) Extra opcode, used when OP\<3:0\> = 0x6 and [SLONLY] = 0.
+ 0x0 = TWSI slave address register (TWSI_SLAVE_ADD).
+ 0x1 = TWSI data register (TWSI_DATA).
+ 0x2 = TWSI control register (TWSI_CTL).
+ 0x3 = (when [R] = 0) TWSI clock control register (TWSI_CLKCTL).
+ 0x3 = (when [R] = 1) TWSI status register (TWSI_STAT).
+ 0x4 = TWSI extended slave register (TWSI_SLAVE_ADD_EXT).
+ 0x7 = TWSI soft reset register (TWSI_RST).
+
+ Also provides the lower three bits of internal address when launching a combined master-mode
+ operation. */
+ uint64_t data : 32; /**< [ 31: 0](R/W/H) Data field.
+ Used on a write operation when:
+ * Initiating a master-mode write operation ([SLONLY] = 0).
+ * Writing a TWSI configuration register ([SLONLY] = 0).
+ * A slave-mode write operation ([SLONLY] = 1).
+
+ The read value is updated by:
+ * A write operation to this register.
+ * Master-mode completion (contains error code).
+ * TWSI configuration-register read (contains result). */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W/H) Data field.
+ Used on a write operation when:
+ * Initiating a master-mode write operation ([SLONLY] = 0).
+ * Writing a TWSI configuration register ([SLONLY] = 0).
+ * A slave-mode write operation ([SLONLY] = 1).
+
+ The read value is updated by:
+ * A write operation to this register.
+ * Master-mode completion (contains error code).
+ * TWSI configuration-register read (contains result). */
+ uint64_t eop_ia : 3; /**< [ 34: 32](R/W) Extra opcode, used when OP\<3:0\> = 0x6 and [SLONLY] = 0.
+ 0x0 = TWSI slave address register (TWSI_SLAVE_ADD).
+ 0x1 = TWSI data register (TWSI_DATA).
+ 0x2 = TWSI control register (TWSI_CTL).
+ 0x3 = (when [R] = 0) TWSI clock control register (TWSI_CLKCTL).
+ 0x3 = (when [R] = 1) TWSI status register (TWSI_STAT).
+ 0x4 = TWSI extended slave register (TWSI_SLAVE_ADD_EXT).
+ 0x7 = TWSI soft reset register (TWSI_RST).
+
+ Also provides the lower three bits of internal address when launching a combined master-mode
+ operation. */
+ uint64_t ia : 5; /**< [ 39: 35](R/W) Internal address. Used when launching a combined master-mode operation. The lower 3
+ address bits are contained in [EOP_IA]. */
+ uint64_t addr : 10; /**< [ 49: 40](R/W) Address field. The address of the remote device for a master-mode operation. ADDR\<9:7\> are
+ only used for 10-bit addressing.
+
+ Note that when mastering a 7-bit OP, ADDR\<6:0\> should not take any of the values 0x78,
+ 0x79, 0x7A nor 0x7B. (These 7-bit addresses are reserved to extend to 10-bit addressing). */
+ uint64_t scr : 2; /**< [ 51: 50](R/W) Scratch. Unused, but retain state. */
+ uint64_t size : 3; /**< [ 54: 52](R/W) Size minus one. Specifies the size in bytes of the master-mode operation if
+ [SOVR] = 1. (0 = 1 byte, 1 = 2 bytes, ... 7 = 8 bytes). */
+ uint64_t sovr : 1; /**< [ 55: 55](R/W) Size override. If this bit is set, use the [SIZE] field to determine the master-mode
+ operation size rather than what [OP] specifies. For operations greater than four bytes, the
+ additional data is contained in MIO_TWS()_SW_TWSI_EXT[DATA]. */
+ uint64_t r : 1; /**< [ 56: 56](R/W/H) Read bit or result. If this bit is set on a CSR write when [SLONLY] = 0, the
+ operation is a read operation (if clear, it is a write operation).
+ On a CSR read, this bit returns the result indication for the most recent
+ master-mode operation, 1 = success, 0 = failure. */
+ uint64_t op : 4; /**< [ 60: 57](R/W) Opcode field. When the register is written with [SLONLY] = 0, this field initiates one of
+ the following read or write operations:
+ 0x0 = 7-bit byte master-mode operation.
+ 0x1 = 7-bit byte combined-read master-mode operation, 7-bit byte write-with-IA master-mode
+ operation.
+ 0x2 = 10-bit byte master-mode operation.
+ 0x3 = 10-bit byte combined-read master-mode operation, 10-bit byte write-with-IA master-
+ mode operation.
+ 0x4 = TWSI master-clock register, TWSI_CLK in TWSI Master Clock Register.
+ 0x6 = See [EOP_IA] field.
+ 0x8 = 7-bit 4-byte master-mode operation.
+ 0x9 = 7-bit 4-byte combined-read master-mode operation, 7-bit 4-byte write-with-IA master-
+ mode operation.
+ 0xA = 10-bit 4-byte master-mode operation.
+ 0xB = 10-bit 4-byte combined-read master-mode operation, 10-bit 4-byte write-with-IA
+ master-mode operation. */
+ uint64_t eia : 1; /**< [ 61: 61](R/W) Extended internal address. Sends an additional internal address byte (the MSB of [IA] is
+ from MIO_TWS()_SW_TWSI_EXT[IA]). */
+ uint64_t slonly : 1; /**< [ 62: 62](R/W) Slave only mode.
+
+ When this bit is set, no operations are initiated with a write operation. Only the D field
+ is updated in this case.
+
+ When this bit is clear, a write operation initiates either a master-mode operation or a
+ TWSI configuration register access. */
+ uint64_t v : 1; /**< [ 63: 63](RC/W/H) Valid bit. Set on a write operation (should always be written with a 1). Cleared when a
+ TWSI master-mode operation completes, a TWSI configuration register access completes, or
+ when the TWSI device reads the register if [SLONLY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_sw_twsi_s cn8; */
+ struct bdk_mio_twsx_sw_twsi_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t v : 1; /**< [ 63: 63](RC/W/H) Valid bit. Set on a write operation (should always be written with a 1). Cleared when a
+ TWSI master-mode operation completes, a TWSI configuration register access completes, or
+ when the TWSI device reads the register if [SLONLY] = 1. */
+ uint64_t slonly : 1; /**< [ 62: 62](R/W) Slave only mode.
+
+ When this bit is set, no operations are initiated with a write operation. Only the D field
+ is updated in this case.
+
+ When this bit is clear, a write operation initiates either a master-mode operation or a
+ TWSI configuration register access. */
+ uint64_t eia : 1; /**< [ 61: 61](R/W) Extended internal address. Sends an additional internal address byte (the MSB of [IA] is
+ from MIO_TWS()_SW_TWSI_EXT[IA]). */
+ uint64_t op : 4; /**< [ 60: 57](R/W) Opcode field. When the register is written with [SLONLY] = 0, this field initiates one of
+ the following read or write operations:
+ 0x0 = 7-bit byte master-mode operation.
+ 0x1 = 7-bit byte combined-read master-mode operation, 7-bit byte write-with-IA master-mode
+ operation.
+ 0x2 = 10-bit byte master-mode operation.
+ 0x3 = 10-bit byte combined-read master-mode operation, 10-bit byte write-with-IA master-
+ mode operation.
+ 0x4 = TWSI master-clock register, TWSI_CLK in TWSI Master Clock Register.
+ 0x6 = See [EOP_IA] field.
+ 0x8 = 7-bit 4-byte master-mode operation.
+ 0x9 = 7-bit 4-byte combined-read master-mode operation, 7-bit 4-byte write-with-IA master-
+ mode operation.
+ 0xA = 10-bit 4-byte master-mode operation.
+ 0xB = 10-bit 4-byte combined-read master-mode operation, 10-bit 4-byte write-with-IA
+ master-mode operation. */
+ uint64_t r : 1; /**< [ 56: 56](R/W/H) Read bit or result. If this bit is set on a CSR write when [SLONLY] = 0, the
+ operation is a read operation (if clear, it is a write operation).
+ On a CSR read, this bit returns the result indication for the most recent
+ master-mode operation, 1 = success, 0 = failure. */
+ uint64_t sovr : 1; /**< [ 55: 55](R/W) Size override. If this bit is set, use the [SIZE] field to determine the master-mode
+ operation size rather than what [OP] specifies. For operations greater than four bytes, the
+ additional data is contained in MIO_TWS()_SW_TWSI_EXT[DATA]. In block mode,
+ all data can be accessible from FIFO interface MIO_TWS()_TWSI_BLOCK_FIFO. */
+ uint64_t size : 3; /**< [ 54: 52](R/W) Size minus one for HLC non block mode. Specifies the size in bytes of the master-mode
+ operation if [SOVR] = 1. (0 = 1 byte, 1 = 2 bytes, ... 7 = 8 bytes).
+ block mode's block size is defined by MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE]. */
+ uint64_t scr : 2; /**< [ 51: 50](R/W) Scratch. Unused, but retain state. */
+ uint64_t addr : 10; /**< [ 49: 40](R/W) Address field. The address of the remote device for a master-mode operation. ADDR\<9:7\> are
+ only used for 10-bit addressing.
+
+ Note that when mastering a 7-bit OP, ADDR\<6:0\> should not take any of the values 0x78,
+ 0x79, 0x7A nor 0x7B. (These 7-bit addresses are reserved to extend to 10-bit addressing). */
+ uint64_t ia : 5; /**< [ 39: 35](R/W) Internal address. Used when launching a combined master-mode operation. The lower 3
+ address bits are contained in [EOP_IA]. */
+ uint64_t eop_ia : 3; /**< [ 34: 32](R/W) Extra opcode, used when OP\<3:0\> = 0x6 and [SLONLY] = 0.
+ 0x0 = TWSI slave address register (TWSI_SLAVE_ADD).
+ 0x1 = TWSI data register (TWSI_DATA).
+ 0x2 = TWSI control register (TWSI_CTL).
+ 0x3 = (when [R] = 0) TWSI clock control register (TWSI_CLKCTL).
+ 0x3 = (when [R] = 1) TWSI status register (TWSI_STAT).
+ 0x4 = TWSI extended slave register (TWSI_SLAVE_ADD_EXT).
+ 0x7 = TWSI soft reset register (TWSI_RST).
+
+ Also provides the lower three bits of internal address when launching a combined master-mode
+ operation. */
+ uint64_t data : 32; /**< [ 31: 0](R/W/H) Data field, bytes 0-3.
+ Used on a write operation when:
+ * Initiating a master-mode write operation ([SLONLY] = 0).
+ * Writing a TWSI configuration register ([SLONLY] = 0).
+ * A slave-mode write operation ([SLONLY] = 1).
+
+ The read value is updated by:
+ * A write operation to this register.
+ * Master-mode completion (contains error code).
+ * TWSI configuration-register read (contains result). */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W/H) Data field, bytes 0-3.
+ Used on a write operation when:
+ * Initiating a master-mode write operation ([SLONLY] = 0).
+ * Writing a TWSI configuration register ([SLONLY] = 0).
+ * A slave-mode write operation ([SLONLY] = 1).
+
+ The read value is updated by:
+ * A write operation to this register.
+ * Master-mode completion (contains error code).
+ * TWSI configuration-register read (contains result). */
+ uint64_t eop_ia : 3; /**< [ 34: 32](R/W) Extra opcode, used when OP\<3:0\> = 0x6 and [SLONLY] = 0.
+ 0x0 = TWSI slave address register (TWSI_SLAVE_ADD).
+ 0x1 = TWSI data register (TWSI_DATA).
+ 0x2 = TWSI control register (TWSI_CTL).
+ 0x3 = (when [R] = 0) TWSI clock control register (TWSI_CLKCTL).
+ 0x3 = (when [R] = 1) TWSI status register (TWSI_STAT).
+ 0x4 = TWSI extended slave register (TWSI_SLAVE_ADD_EXT).
+ 0x7 = TWSI soft reset register (TWSI_RST).
+
+ Also provides the lower three bits of internal address when launching a combined master-mode
+ operation. */
+ uint64_t ia : 5; /**< [ 39: 35](R/W) Internal address. Used when launching a combined master-mode operation. The lower 3
+ address bits are contained in [EOP_IA]. */
+ uint64_t addr : 10; /**< [ 49: 40](R/W) Address field. The address of the remote device for a master-mode operation. ADDR\<9:7\> are
+ only used for 10-bit addressing.
+
+ Note that when mastering a 7-bit OP, ADDR\<6:0\> should not take any of the values 0x78,
+ 0x79, 0x7A nor 0x7B. (These 7-bit addresses are reserved to extend to 10-bit addressing). */
+ uint64_t scr : 2; /**< [ 51: 50](R/W) Scratch. Unused, but retain state. */
+ uint64_t size : 3; /**< [ 54: 52](R/W) Size minus one for HLC non block mode. Specifies the size in bytes of the master-mode
+ operation if [SOVR] = 1. (0 = 1 byte, 1 = 2 bytes, ... 7 = 8 bytes).
+ block mode's block size is defined by MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE]. */
+ uint64_t sovr : 1; /**< [ 55: 55](R/W) Size override. If this bit is set, use the [SIZE] field to determine the master-mode
+ operation size rather than what [OP] specifies. For operations greater than four bytes, the
+ additional data is contained in MIO_TWS()_SW_TWSI_EXT[DATA]. In block mode,
+ all data can be accessible from FIFO interface MIO_TWS()_TWSI_BLOCK_FIFO. */
+ uint64_t r : 1; /**< [ 56: 56](R/W/H) Read bit or result. If this bit is set on a CSR write when [SLONLY] = 0, the
+ operation is a read operation (if clear, it is a write operation).
+ On a CSR read, this bit returns the result indication for the most recent
+ master-mode operation, 1 = success, 0 = failure. */
+ uint64_t op : 4; /**< [ 60: 57](R/W) Opcode field. When the register is written with [SLONLY] = 0, this field initiates one of
+ the following read or write operations:
+ 0x0 = 7-bit byte master-mode operation.
+ 0x1 = 7-bit byte combined-read master-mode operation, 7-bit byte write-with-IA master-mode
+ operation.
+ 0x2 = 10-bit byte master-mode operation.
+ 0x3 = 10-bit byte combined-read master-mode operation, 10-bit byte write-with-IA master-
+ mode operation.
+ 0x4 = TWSI master-clock register, TWSI_CLK in TWSI Master Clock Register.
+ 0x6 = See [EOP_IA] field.
+ 0x8 = 7-bit 4-byte master-mode operation.
+ 0x9 = 7-bit 4-byte combined-read master-mode operation, 7-bit 4-byte write-with-IA master-
+ mode operation.
+ 0xA = 10-bit 4-byte master-mode operation.
+ 0xB = 10-bit 4-byte combined-read master-mode operation, 10-bit 4-byte write-with-IA
+ master-mode operation. */
+ uint64_t eia : 1; /**< [ 61: 61](R/W) Extended internal address. Sends an additional internal address byte (the MSB of [IA] is
+ from MIO_TWS()_SW_TWSI_EXT[IA]). */
+ uint64_t slonly : 1; /**< [ 62: 62](R/W) Slave only mode.
+
+ When this bit is set, no operations are initiated with a write operation. Only the D field
+ is updated in this case.
+
+ When this bit is clear, a write operation initiates either a master-mode operation or a
+ TWSI configuration register access. */
+ uint64_t v : 1; /**< [ 63: 63](RC/W/H) Valid bit. Set on a write operation (should always be written with a 1). Cleared when a
+ TWSI master-mode operation completes, a TWSI configuration register access completes, or
+ when the TWSI device reads the register if [SLONLY] = 1. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_mio_twsx_sw_twsi bdk_mio_twsx_sw_twsi_t;
+
+static inline uint64_t BDK_MIO_TWSX_SW_TWSI(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_SW_TWSI(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0d0001000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0d0001000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0d0001000ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_SW_TWSI", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_SW_TWSI(a) bdk_mio_twsx_sw_twsi_t
+#define bustype_BDK_MIO_TWSX_SW_TWSI(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_SW_TWSI(a) "MIO_TWSX_SW_TWSI"
+#define device_bar_BDK_MIO_TWSX_SW_TWSI(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_SW_TWSI(a) (a)
+#define arguments_BDK_MIO_TWSX_SW_TWSI(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_sw_twsi_ext
+ *
+ * TWSI Software to TWSI Extension Register
+ * This register contains an additional byte of internal address and four additional bytes of
+ * data to be used with TWSI master-mode operations.
+ *
+ * The IA field is sent as the first byte of internal address when performing master-mode
+ * combined-read/write-with-IA operations and MIO_TWS()_SW_TWSI[EIA] is set. The D field
+ * extends the data field of MIO_TWS()_SW_TWSI for a total of 8 bytes (SOVR must be set to
+ * perform operations greater than four bytes).
+ */
+union bdk_mio_twsx_sw_twsi_ext
+{
+ uint64_t u;
+ struct bdk_mio_twsx_sw_twsi_ext_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t ia : 8; /**< [ 39: 32](R/W) Extended internal address. Sent as the first byte of internal address when performing
+ master-mode combined-read/write-with-IA operations and MIO_TWS()_SW_TWSI[EIA] is set. */
+ uint64_t data : 32; /**< [ 31: 0](R/W/H) Extended data. Extends the data field of MIO_TWS()_SW_TWSI for a total of eight bytes
+ (MIO_TWS()_SW_TWSI[SOVR] must be set to 1 to perform operations greater than four
+ bytes). */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W/H) Extended data. Extends the data field of MIO_TWS()_SW_TWSI for a total of eight bytes
+ (MIO_TWS()_SW_TWSI[SOVR] must be set to 1 to perform operations greater than four
+ bytes). */
+ uint64_t ia : 8; /**< [ 39: 32](R/W) Extended internal address. Sent as the first byte of internal address when performing
+ master-mode combined-read/write-with-IA operations and MIO_TWS()_SW_TWSI[EIA] is set. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_sw_twsi_ext_s cn; */
+};
+typedef union bdk_mio_twsx_sw_twsi_ext bdk_mio_twsx_sw_twsi_ext_t;
+
+static inline uint64_t BDK_MIO_TWSX_SW_TWSI_EXT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_SW_TWSI_EXT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0d0001018ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0d0001018ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0d0001018ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001018ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_SW_TWSI_EXT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_SW_TWSI_EXT(a) bdk_mio_twsx_sw_twsi_ext_t
+#define bustype_BDK_MIO_TWSX_SW_TWSI_EXT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_SW_TWSI_EXT(a) "MIO_TWSX_SW_TWSI_EXT"
+#define device_bar_BDK_MIO_TWSX_SW_TWSI_EXT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_SW_TWSI_EXT(a) (a)
+#define arguments_BDK_MIO_TWSX_SW_TWSI_EXT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_tstamp_rx
+ *
+ * MIO TWSI Receive Timestamp Register
+ * This register contains the timestamp latched when TWSI device receives the first bit on TWSI
+ * SCL falling edge. This function is only supported in non-block mode.
+ */
+union bdk_mio_twsx_tstamp_rx
+{
+ uint64_t u;
+ struct bdk_mio_twsx_tstamp_rx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t nanosec : 64; /**< [ 63: 0](RO/H) Timestamp in nanoseconds. */
+#else /* Word 0 - Little Endian */
+ uint64_t nanosec : 64; /**< [ 63: 0](RO/H) Timestamp in nanoseconds. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_tstamp_rx_s cn; */
+};
+typedef union bdk_mio_twsx_tstamp_rx bdk_mio_twsx_tstamp_rx_t;
+
+static inline uint64_t BDK_MIO_TWSX_TSTAMP_RX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_TSTAMP_RX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001068ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_TSTAMP_RX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_TSTAMP_RX(a) bdk_mio_twsx_tstamp_rx_t
+#define bustype_BDK_MIO_TWSX_TSTAMP_RX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_TSTAMP_RX(a) "MIO_TWSX_TSTAMP_RX"
+#define device_bar_BDK_MIO_TWSX_TSTAMP_RX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_TSTAMP_RX(a) (a)
+#define arguments_BDK_MIO_TWSX_TSTAMP_RX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_tstamp_tx
+ *
+ * MIO TWSI Transmit Timestamp Register
+ * This register contains the timestamp latched when TWSI device drives the first bit on TWSI
+ * SCL falling edge. This function is only supported in non-block mode.
+ */
+union bdk_mio_twsx_tstamp_tx
+{
+ uint64_t u;
+ struct bdk_mio_twsx_tstamp_tx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t nanosec : 64; /**< [ 63: 0](RO/H) Timestamp in nanoseconds. */
+#else /* Word 0 - Little Endian */
+ uint64_t nanosec : 64; /**< [ 63: 0](RO/H) Timestamp in nanoseconds. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_tstamp_tx_s cn; */
+};
+typedef union bdk_mio_twsx_tstamp_tx bdk_mio_twsx_tstamp_tx_t;
+
+static inline uint64_t BDK_MIO_TWSX_TSTAMP_TX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_TSTAMP_TX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001060ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_TSTAMP_TX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_TSTAMP_TX(a) bdk_mio_twsx_tstamp_tx_t
+#define bustype_BDK_MIO_TWSX_TSTAMP_TX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_TSTAMP_TX(a) "MIO_TWSX_TSTAMP_TX"
+#define device_bar_BDK_MIO_TWSX_TSTAMP_TX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_TSTAMP_TX(a) (a)
+#define arguments_BDK_MIO_TWSX_TSTAMP_TX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_twsi_block_ctl
+ *
+ * TWSI Block Mode Control Register
+ * This register contains the control bits when TWSI is in block mode. Data can be written/read
+ * from MIO_TWS()_TWSI_BLOCK_FIFO[DATA] sequentially in block mode.
+ */
+union bdk_mio_twsx_twsi_block_ctl
+{
+ uint64_t u;
+ struct bdk_mio_twsx_twsi_block_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t slave_vbyte : 1; /**< [ 32: 32](R/W) When this mode set to 1 in block mode, slave mode read response will include
+ an extra V byte rest of the data transmission. The definition of V byte is compatible
+ with 1 byte/4 byte slave response. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t block_thresh : 11; /**< [ 26: 16](R/W) Block mode interrupt threshold, from 0-1024, 0x0 disables the
+ interrupt. MIO_TWS()_INT[BLOCK_INT] interrupt will fire when the number of
+ remaining bytes to be sent/received is less than threshold. If the number of
+ bytes to be sent/received is less than threshold [BLOCK_THRESH], the interrupt
+ will fire immediately. This interrupt is enabled only in HLC block mode. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t block_size : 10; /**< [ 9: 0](R/W) Block mode FIFO transmission/receiving data size minus one,
+ valid value from 0-1023, corresponding to 1-1024 bytes to be sent/received. */
+#else /* Word 0 - Little Endian */
+ uint64_t block_size : 10; /**< [ 9: 0](R/W) Block mode FIFO transmission/receiving data size minus one,
+ valid value from 0-1023, corresponding to 1-1024 bytes to be sent/received. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t block_thresh : 11; /**< [ 26: 16](R/W) Block mode interrupt threshold, from 0-1024, 0x0 disables the
+ interrupt. MIO_TWS()_INT[BLOCK_INT] interrupt will fire when the number of
+ remaining bytes to be sent/received is less than threshold. If the number of
+ bytes to be sent/received is less than threshold [BLOCK_THRESH], the interrupt
+ will fire immediately. This interrupt is enabled only in HLC block mode. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t slave_vbyte : 1; /**< [ 32: 32](R/W) When this mode set to 1 in block mode, slave mode read response will include
+ an extra V byte rest of the data transmission. The definition of V byte is compatible
+ with 1 byte/4 byte slave response. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_twsi_block_ctl_s cn; */
+};
+typedef union bdk_mio_twsx_twsi_block_ctl bdk_mio_twsx_twsi_block_ctl_t;
+
+static inline uint64_t BDK_MIO_TWSX_TWSI_BLOCK_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_TWSI_BLOCK_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001048ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_TWSI_BLOCK_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_TWSI_BLOCK_CTL(a) bdk_mio_twsx_twsi_block_ctl_t
+#define bustype_BDK_MIO_TWSX_TWSI_BLOCK_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_TWSI_BLOCK_CTL(a) "MIO_TWSX_TWSI_BLOCK_CTL"
+#define device_bar_BDK_MIO_TWSX_TWSI_BLOCK_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_TWSI_BLOCK_CTL(a) (a)
+#define arguments_BDK_MIO_TWSX_TWSI_BLOCK_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_twsi_block_fifo
+ *
+ * TWSI Block Mode Data Register
+ * This register is only valid in HLC block mode. This register allows software to
+ * push or pop block of data (up to 1024 bytes) to/from TWSI device through FIFO interface.
+ * For TWSI writes, software does a serial of writes to fill up the FIFO before
+ * starting the TWSI HLC transaction. For TWSI reads, software needs to do a serial of reads
+ * after TWSI transaction finished indicated by MIO_TWS()_TWSI_BLOCK_STS[BUSY] or interrupt.
+ * The order of data transmitted on TWSI bus is:
+ * _ {D1, D2, D3, D4, D5, ...., D[block_size]}, with MSB of each byte transmitted first.
+ *
+ * The FIFO pointer needs to be reset by writing 1 to MIO_TWS()_TWSI_BLOCK_STS[RESET_PTR] before
+ * software accesses.
+ * The order of software writes or reads through MIO_TWS()_TWSI_BLOCK_FIFO[DATA] is:
+ * _ 1st push/pop {D1, D2, D3, D4, D5, D6, D7}.
+ * _ 2nd push/pop {D8, D9, D10, D11, D12, D13, D14, D15}.
+ * _ 3rd ....
+ * _ last push/pop {D[block_size-1], D[block_size], 0, 0, 0, 0, 0, 0}.
+ *
+ * Where block_size is MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE].
+ */
+union bdk_mio_twsx_twsi_block_fifo
+{
+ uint64_t u;
+ struct bdk_mio_twsx_twsi_block_fifo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Data field that linked to the entires in FIFO based on current software pointer.
+ [DATA] contains 8 bytes, corresponding to {Dn, Dn+1, Dn+2, Dn+3, Dn+4, Dn+5, Dn+6,
+ Dn+7}. In case of less than 8 bytes based on MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE],
+ lower bits will be ignored. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Data field that linked to the entires in FIFO based on current software pointer.
+ [DATA] contains 8 bytes, corresponding to {Dn, Dn+1, Dn+2, Dn+3, Dn+4, Dn+5, Dn+6,
+ Dn+7}. In case of less than 8 bytes based on MIO_TWS()_TWSI_BLOCK_CTL[BLOCK_SIZE],
+ lower bits will be ignored. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_twsi_block_fifo_s cn; */
+};
+typedef union bdk_mio_twsx_twsi_block_fifo bdk_mio_twsx_twsi_block_fifo_t;
+
+static inline uint64_t BDK_MIO_TWSX_TWSI_BLOCK_FIFO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_TWSI_BLOCK_FIFO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001058ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_TWSI_BLOCK_FIFO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_TWSI_BLOCK_FIFO(a) bdk_mio_twsx_twsi_block_fifo_t
+#define bustype_BDK_MIO_TWSX_TWSI_BLOCK_FIFO(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_TWSI_BLOCK_FIFO(a) "MIO_TWSX_TWSI_BLOCK_FIFO"
+#define device_bar_BDK_MIO_TWSX_TWSI_BLOCK_FIFO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_TWSI_BLOCK_FIFO(a) (a)
+#define arguments_BDK_MIO_TWSX_TWSI_BLOCK_FIFO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_twsi_block_sts
+ *
+ * TWSI Block Mode Stauts Register
+ * This register contains maintenance and status in block mode.
+ */
+union bdk_mio_twsx_twsi_block_sts
+{
+ uint64_t u;
+ struct bdk_mio_twsx_twsi_block_sts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t sw_ptr : 7; /**< [ 19: 13](RO/H) Latest software reading/writing pointer to MIO_TWS()_TWSI_BLOCK_FIFO, valid from 0-127.
+ For debugging purpose. */
+ uint64_t count : 11; /**< [ 12: 2](RO/H) Remaining number of bytes waiting to be sent/received on TWSI bus in block mode.
+ Value 0x0 is expected when no TWSI transaction pending. */
+ uint64_t busy : 1; /**< [ 1: 1](RO/H) Reading back with value '1' means TWSI BLOCK transaction is still in progress, and
+ 0x0 is expected when no TWSI block transaction pending or in progress. */
+ uint64_t reset_ptr : 1; /**< [ 0: 0](R/W1/H) Reset software side. FIFO pointer that accessible by MIO_TWS()_TWSI_BLOCK_FIFO.
+ This bit needs to be written with 1 before any software accesses to
+ MIO_TWS()_TWSI_BLOCK_FIFO. Resetting the pointer won't affect the data
+ stored in the FIFO as well as hardware side pointer. Reading back 0 indicates
+ the software write/read pointers are reset to initial value zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t reset_ptr : 1; /**< [ 0: 0](R/W1/H) Reset software side. FIFO pointer that accessible by MIO_TWS()_TWSI_BLOCK_FIFO.
+ This bit needs to be written with 1 before any software accesses to
+ MIO_TWS()_TWSI_BLOCK_FIFO. Resetting the pointer won't affect the data
+ stored in the FIFO as well as hardware side pointer. Reading back 0 indicates
+ the software write/read pointers are reset to initial value zero. */
+ uint64_t busy : 1; /**< [ 1: 1](RO/H) Reading back with value '1' means TWSI BLOCK transaction is still in progress, and
+ 0x0 is expected when no TWSI block transaction pending or in progress. */
+ uint64_t count : 11; /**< [ 12: 2](RO/H) Remaining number of bytes waiting to be sent/received on TWSI bus in block mode.
+ Value 0x0 is expected when no TWSI transaction pending. */
+ uint64_t sw_ptr : 7; /**< [ 19: 13](RO/H) Latest software reading/writing pointer to MIO_TWS()_TWSI_BLOCK_FIFO, valid from 0-127.
+ For debugging purpose. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_twsi_block_sts_s cn; */
+};
+typedef union bdk_mio_twsx_twsi_block_sts bdk_mio_twsx_twsi_block_sts_t;
+
+static inline uint64_t BDK_MIO_TWSX_TWSI_BLOCK_STS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_TWSI_BLOCK_STS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001050ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_TWSI_BLOCK_STS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_TWSI_BLOCK_STS(a) bdk_mio_twsx_twsi_block_sts_t
+#define bustype_BDK_MIO_TWSX_TWSI_BLOCK_STS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_TWSI_BLOCK_STS(a) "MIO_TWSX_TWSI_BLOCK_STS"
+#define device_bar_BDK_MIO_TWSX_TWSI_BLOCK_STS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_TWSI_BLOCK_STS(a) (a)
+#define arguments_BDK_MIO_TWSX_TWSI_BLOCK_STS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) mio_tws#_twsi_sw
+ *
+ * TWSI to Software Register
+ * This register allows the TWSI device to transfer data to software and later check that
+ * software has received the information.
+ * This register should be read or written by the TWSI device, and read by software. The TWSI
+ * device can use one-byte or four-byte payload write operations, and two-byte payload read
+ * operations. The TWSI device considers this register valid when [V] = 1.
+ */
+union bdk_mio_twsx_twsi_sw
+{
+ uint64_t u;
+ struct bdk_mio_twsx_twsi_sw_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t v : 2; /**< [ 63: 62](RC/W/H) Valid bits. These bits are not directly writable. They are set to 11 on any write
+ operation by the TWSI device. They are cleared to 00 on any read operation by software. */
+ uint64_t reserved_32_61 : 30;
+ uint64_t data : 32; /**< [ 31: 0](RO/H) Data field. Updated on a write operation by the TWSI device. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](RO/H) Data field. Updated on a write operation by the TWSI device. */
+ uint64_t reserved_32_61 : 30;
+ uint64_t v : 2; /**< [ 63: 62](RC/W/H) Valid bits. These bits are not directly writable. They are set to 11 on any write
+ operation by the TWSI device. They are cleared to 00 on any read operation by software. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_mio_twsx_twsi_sw_s cn8; */
+ struct bdk_mio_twsx_twsi_sw_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t v : 2; /**< [ 63: 62](RC/W/H) Valid bits. These bits are not directly writable. They are set to 11 on any write
+ operation by the TWSI device. They are cleared to 00 on any read operation by software. */
+ uint64_t reserved_32_61 : 30;
+ uint64_t data : 32; /**< [ 31: 0](RO/H) Data field, bytes 3-7. Updated on a write operation by the TWSI device. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](RO/H) Data field, bytes 3-7. Updated on a write operation by the TWSI device. */
+ uint64_t reserved_32_61 : 30;
+ uint64_t v : 2; /**< [ 63: 62](RC/W/H) Valid bits. These bits are not directly writable. They are set to 11 on any write
+ operation by the TWSI device. They are cleared to 00 on any read operation by software. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_mio_twsx_twsi_sw bdk_mio_twsx_twsi_sw_t;
+
+static inline uint64_t BDK_MIO_TWSX_TWSI_SW(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_MIO_TWSX_TWSI_SW(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0d0001008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0d0001008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0d0001008ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=5))
+ return 0x87e0d0001008ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("MIO_TWSX_TWSI_SW", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_MIO_TWSX_TWSI_SW(a) bdk_mio_twsx_twsi_sw_t
+#define bustype_BDK_MIO_TWSX_TWSI_SW(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_MIO_TWSX_TWSI_SW(a) "MIO_TWSX_TWSI_SW"
+#define device_bar_BDK_MIO_TWSX_TWSI_SW(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_MIO_TWSX_TWSI_SW(a) (a)
+#define arguments_BDK_MIO_TWSX_TWSI_SW(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_MIO_TWS_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ocla.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ocla.h
new file mode 100644
index 0000000000..c936ea3630
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ocla.h
@@ -0,0 +1,2756 @@
+#ifndef __BDK_CSRS_OCLA_H__
+#define __BDK_CSRS_OCLA_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium OCLA.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration ocla_bar_e
+ *
+ * OCLA Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_OCLA_BAR_E_OCLAX_PF_BAR0_CN8(a) (0x87e0a8000000ll + 0x1000000ll * (a))
+#define BDK_OCLA_BAR_E_OCLAX_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_OCLA_BAR_E_OCLAX_PF_BAR0_CN9(a) (0x87e0b0000000ll + 0x1000000ll * (a))
+#define BDK_OCLA_BAR_E_OCLAX_PF_BAR0_CN9_SIZE 0x800000ull
+#define BDK_OCLA_BAR_E_OCLAX_PF_BAR4_CN8(a) (0x87e0a8f00000ll + 0x1000000ll * (a))
+#define BDK_OCLA_BAR_E_OCLAX_PF_BAR4_CN8_SIZE 0x100000ull
+#define BDK_OCLA_BAR_E_OCLAX_PF_BAR4_CN9(a) (0x87e0b0f00000ll + 0x1000000ll * (a))
+#define BDK_OCLA_BAR_E_OCLAX_PF_BAR4_CN9_SIZE 0x100000ull
+
+/**
+ * Enumeration ocla_int_vec_e
+ *
+ * OCLA MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_OCLA_INT_VEC_E_INTS (0)
+
+/**
+ * Structure ocla_cap_ctl_s
+ *
+ * OCLA Capture Control Structure
+ * Control packets are indicated by an entry's bit 37 being set, and described by the
+ * OCLA_CAP_CTL_S structure:
+ */
+union bdk_ocla_cap_ctl_s
+{
+ uint64_t u;
+ struct bdk_ocla_cap_ctl_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t ctl : 1; /**< [ 37: 37] Indicates a control word. Always set for control structures. */
+ uint64_t sinfo : 1; /**< [ 36: 36] Indicates OCLA()_FSM()_STATE()[SINFO_SET] was set for the state that led to the
+ capture state. This allows the FSM to optionally communicate its current state
+ to observing software; [SINFO] is otherwise opaque to reassembling the trace
+ information. */
+ uint64_t eot1 : 1; /**< [ 35: 35] End of duplicated capture for high data. Symmetric with EOT0 description; see [EOT0]. */
+ uint64_t eot0 : 1; /**< [ 34: 34] End of duplicated capture for low data. When set, [CYCLE] indicates the cycle at which the
+ previous entry of low data stopped being replicated. This may be set along with SOT0 to
+ indicate a repeat followed by new sequence. */
+ uint64_t sot1 : 1; /**< [ 33: 33] Start transition from no-capture to capture or duplicated data stopped while capturing for
+ high data. When set, [CYCLE] indicates the cycle number of the next new high data, minus one
+ cycle. */
+ uint64_t sot0 : 1; /**< [ 32: 32] Start transition from no-capture to capture or duplicated data stopped while capturing for
+ low data. When set, [CYCLE] indicates the cycle number of the next new low data, minus one
+ cycle. */
+ uint64_t cycle : 32; /**< [ 31: 0] Cycle at which this control entry was written, from OCLA()_TIME. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle : 32; /**< [ 31: 0] Cycle at which this control entry was written, from OCLA()_TIME. */
+ uint64_t sot0 : 1; /**< [ 32: 32] Start transition from no-capture to capture or duplicated data stopped while capturing for
+ low data. When set, [CYCLE] indicates the cycle number of the next new low data, minus one
+ cycle. */
+ uint64_t sot1 : 1; /**< [ 33: 33] Start transition from no-capture to capture or duplicated data stopped while capturing for
+ high data. When set, [CYCLE] indicates the cycle number of the next new high data, minus one
+ cycle. */
+ uint64_t eot0 : 1; /**< [ 34: 34] End of duplicated capture for low data. When set, [CYCLE] indicates the cycle at which the
+ previous entry of low data stopped being replicated. This may be set along with SOT0 to
+ indicate a repeat followed by new sequence. */
+ uint64_t eot1 : 1; /**< [ 35: 35] End of duplicated capture for high data. Symmetric with EOT0 description; see [EOT0]. */
+ uint64_t sinfo : 1; /**< [ 36: 36] Indicates OCLA()_FSM()_STATE()[SINFO_SET] was set for the state that led to the
+ capture state. This allows the FSM to optionally communicate its current state
+ to observing software; [SINFO] is otherwise opaque to reassembling the trace
+ information. */
+ uint64_t ctl : 1; /**< [ 37: 37] Indicates a control word. Always set for control structures. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocla_cap_ctl_s_s cn; */
+};
+
+/**
+ * Structure ocla_cap_dat_s
+ *
+ * OCLA Capture Data Structure
+ * Data packets are indicated by an entry's bit 37 being clear, and described by the
+ * OCLA_CAP_DAT_S structure:
+ */
+union bdk_ocla_cap_dat_s
+{
+ uint64_t u;
+ struct bdk_ocla_cap_dat_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t ctl : 1; /**< [ 37: 37] Indicates a control word. Always clear for data structures. */
+ uint64_t hi : 1; /**< [ 36: 36] Set to indicate a sample of high data, clear for a sample of low data. */
+ uint64_t data : 36; /**< [ 35: 0] Captured trace data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 36; /**< [ 35: 0] Captured trace data. */
+ uint64_t hi : 1; /**< [ 36: 36] Set to indicate a sample of high data, clear for a sample of low data. */
+ uint64_t ctl : 1; /**< [ 37: 37] Indicates a control word. Always clear for data structures. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocla_cap_dat_s_s cn; */
+};
+
+/**
+ * Register (RSL) ocla#_active_pc
+ *
+ * OCLA Conditional Clock Counter Register
+ * This register counts conditional clocks for power management.
+ *
+ * This register is reset on ocla domain reset.
+ */
+union bdk_oclax_active_pc
+{
+ uint64_t u;
+ struct bdk_oclax_active_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Count of conditional clock cycles since reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Count of conditional clock cycles since reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_active_pc_s cn; */
+};
+typedef union bdk_oclax_active_pc bdk_oclax_active_pc_t;
+
+static inline uint64_t BDK_OCLAX_ACTIVE_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_ACTIVE_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000620ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_ACTIVE_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_ACTIVE_PC(a) bdk_oclax_active_pc_t
+#define bustype_BDK_OCLAX_ACTIVE_PC(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_ACTIVE_PC(a) "OCLAX_ACTIVE_PC"
+#define device_bar_BDK_OCLAX_ACTIVE_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_ACTIVE_PC(a) (a)
+#define arguments_BDK_OCLAX_ACTIVE_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_bist_result
+ *
+ * OCLA BIST Result Registers
+ * This register provides access to the internal BIST results. Each bit is the BIST result of an
+ * individual memory (per bit, 0 = pass and 1 = fail).
+ *
+ * Internal:
+ * FIXME remove fields and add deprecated: "RAZ" per MDC common changes.
+ */
+union bdk_oclax_bist_result
+{
+ uint64_t u;
+ struct bdk_oclax_bist_result_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dat : 1; /**< [ 0: 0](RO) BIST result of the DAT memory. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 1; /**< [ 0: 0](RO) BIST result of the DAT memory. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_bist_result_s cn; */
+};
+typedef union bdk_oclax_bist_result bdk_oclax_bist_result_t;
+
+static inline uint64_t BDK_OCLAX_BIST_RESULT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_BIST_RESULT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000040ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000040ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000040ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000040ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_BIST_RESULT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_BIST_RESULT(a) bdk_oclax_bist_result_t
+#define bustype_BDK_OCLAX_BIST_RESULT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_BIST_RESULT(a) "OCLAX_BIST_RESULT"
+#define device_bar_BDK_OCLAX_BIST_RESULT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_BIST_RESULT(a) (a)
+#define arguments_BDK_OCLAX_BIST_RESULT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_cdh#_ctl
+ *
+ * OCLA Capture Data Half Control Registers
+ */
+union bdk_oclax_cdhx_ctl
+{
+ uint64_t u;
+ struct bdk_oclax_cdhx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dup : 1; /**< [ 5: 5](R/W) Retain duplicates in the data stream. */
+ uint64_t dis_stamp : 1; /**< [ 4: 4](R/W) Remove time stamps from data stream. */
+ uint64_t cap_ctl : 4; /**< [ 3: 0](R/W) Minterms that will cause data to be captured. These minterms are the four inputs
+ to a four-to-one mux selected by PLA1 and zero. The output is thus calculated
+ from the equation:
+
+ \<pre\>
+ fsmcap0 = OCLA(0..1)_FSM(0)_STATE[state0][CAP].
+ fsmcap1 = OCLA(0..1)_FSM(1)_STATE[state1][CAP].
+ out = ( (\<3\> & fsmcap1 & fsmcap0)
+ || (\<2\> & fsmcap1 & !fsmcap0)
+ || (\<1\> & !fsmcap1 & fsmcap0)
+ || (\<0\> & !fsmcap1 & !fsmcap0)).
+ \</pre\>
+
+ Common examples:
+ 0x0 = No capture.
+ 0xA = Capture when fsmcap0 requests capture.
+ 0xC = Capture when fsmcap1 requests capture.
+ 0x6 = Capture on fsmcap0 EXOR fsmcap1.
+ 0x8 = Capture on fsmcap0 & fsmcap1.
+ 0xE = Capture on fsmcap0 | fsmcap1.
+ 0xF = Always capture. */
+#else /* Word 0 - Little Endian */
+ uint64_t cap_ctl : 4; /**< [ 3: 0](R/W) Minterms that will cause data to be captured. These minterms are the four inputs
+ to a four-to-one mux selected by PLA1 and zero. The output is thus calculated
+ from the equation:
+
+ \<pre\>
+ fsmcap0 = OCLA(0..1)_FSM(0)_STATE[state0][CAP].
+ fsmcap1 = OCLA(0..1)_FSM(1)_STATE[state1][CAP].
+ out = ( (\<3\> & fsmcap1 & fsmcap0)
+ || (\<2\> & fsmcap1 & !fsmcap0)
+ || (\<1\> & !fsmcap1 & fsmcap0)
+ || (\<0\> & !fsmcap1 & !fsmcap0)).
+ \</pre\>
+
+ Common examples:
+ 0x0 = No capture.
+ 0xA = Capture when fsmcap0 requests capture.
+ 0xC = Capture when fsmcap1 requests capture.
+ 0x6 = Capture on fsmcap0 EXOR fsmcap1.
+ 0x8 = Capture on fsmcap0 & fsmcap1.
+ 0xE = Capture on fsmcap0 | fsmcap1.
+ 0xF = Always capture. */
+ uint64_t dis_stamp : 1; /**< [ 4: 4](R/W) Remove time stamps from data stream. */
+ uint64_t dup : 1; /**< [ 5: 5](R/W) Retain duplicates in the data stream. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_cdhx_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dup : 1; /**< [ 5: 5](R/W) Retain duplicates in the data stream. */
+ uint64_t dis_stamp : 1; /**< [ 4: 4](R/W) Remove time stamps from data stream. */
+ uint64_t cap_ctl : 4; /**< [ 3: 0](R/W) Minterms that will cause data to be captured. These minterms are the four inputs
+ to a four-to-one mux selected by PLA1 and zero. The output is thus calculated
+ from the equation:
+
+ \<pre\>
+ fsmcap0 = OCLA(0..4)_FSM(0)_STATE[state0][CAP].
+ fsmcap1 = OCLA(0..4)_FSM(1)_STATE[state1][CAP].
+ out = ( (\<3\> & fsmcap1 & fsmcap0)
+ || (\<2\> & fsmcap1 & !fsmcap0)
+ || (\<1\> & !fsmcap1 & fsmcap0)
+ || (\<0\> & !fsmcap1 & !fsmcap0)).
+ \</pre\>
+
+ Common examples:
+ 0x0 = No capture.
+ 0xA = Capture when fsmcap0 requests capture.
+ 0xC = Capture when fsmcap1 requests capture.
+ 0x6 = Capture on fsmcap0 EXOR fsmcap1.
+ 0x8 = Capture on fsmcap0 & fsmcap1.
+ 0xE = Capture on fsmcap0 | fsmcap1.
+ 0xF = Always capture. */
+#else /* Word 0 - Little Endian */
+ uint64_t cap_ctl : 4; /**< [ 3: 0](R/W) Minterms that will cause data to be captured. These minterms are the four inputs
+ to a four-to-one mux selected by PLA1 and zero. The output is thus calculated
+ from the equation:
+
+ \<pre\>
+ fsmcap0 = OCLA(0..4)_FSM(0)_STATE[state0][CAP].
+ fsmcap1 = OCLA(0..4)_FSM(1)_STATE[state1][CAP].
+ out = ( (\<3\> & fsmcap1 & fsmcap0)
+ || (\<2\> & fsmcap1 & !fsmcap0)
+ || (\<1\> & !fsmcap1 & fsmcap0)
+ || (\<0\> & !fsmcap1 & !fsmcap0)).
+ \</pre\>
+
+ Common examples:
+ 0x0 = No capture.
+ 0xA = Capture when fsmcap0 requests capture.
+ 0xC = Capture when fsmcap1 requests capture.
+ 0x6 = Capture on fsmcap0 EXOR fsmcap1.
+ 0x8 = Capture on fsmcap0 & fsmcap1.
+ 0xE = Capture on fsmcap0 | fsmcap1.
+ 0xF = Always capture. */
+ uint64_t dis_stamp : 1; /**< [ 4: 4](R/W) Remove time stamps from data stream. */
+ uint64_t dup : 1; /**< [ 5: 5](R/W) Retain duplicates in the data stream. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_oclax_cdhx_ctl_s cn81xx; */
+ /* struct bdk_oclax_cdhx_ctl_cn9 cn88xx; */
+ struct bdk_oclax_cdhx_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dup : 1; /**< [ 5: 5](R/W) Retain duplicates in the data stream. */
+ uint64_t dis_stamp : 1; /**< [ 4: 4](R/W) Remove time stamps from data stream. */
+ uint64_t cap_ctl : 4; /**< [ 3: 0](R/W) Minterms that will cause data to be captured. These minterms are the four inputs
+ to a four-to-one mux selected by PLA1 and zero. The output is thus calculated
+ from the equation:
+
+ \<pre\>
+ fsmcap0 = OCLA(0..2)_FSM(0)_STATE[state0][CAP].
+ fsmcap1 = OCLA(0..2)_FSM(1)_STATE[state1][CAP].
+ out = ( (\<3\> & fsmcap1 & fsmcap0)
+ || (\<2\> & fsmcap1 & !fsmcap0)
+ || (\<1\> & !fsmcap1 & fsmcap0)
+ || (\<0\> & !fsmcap1 & !fsmcap0)).
+ \</pre\>
+
+ Common examples:
+ 0x0 = No capture.
+ 0xA = Capture when fsmcap0 requests capture.
+ 0xC = Capture when fsmcap1 requests capture.
+ 0x6 = Capture on fsmcap0 EXOR fsmcap1.
+ 0x8 = Capture on fsmcap0 & fsmcap1.
+ 0xE = Capture on fsmcap0 | fsmcap1.
+ 0xF = Always capture. */
+#else /* Word 0 - Little Endian */
+ uint64_t cap_ctl : 4; /**< [ 3: 0](R/W) Minterms that will cause data to be captured. These minterms are the four inputs
+ to a four-to-one mux selected by PLA1 and zero. The output is thus calculated
+ from the equation:
+
+ \<pre\>
+ fsmcap0 = OCLA(0..2)_FSM(0)_STATE[state0][CAP].
+ fsmcap1 = OCLA(0..2)_FSM(1)_STATE[state1][CAP].
+ out = ( (\<3\> & fsmcap1 & fsmcap0)
+ || (\<2\> & fsmcap1 & !fsmcap0)
+ || (\<1\> & !fsmcap1 & fsmcap0)
+ || (\<0\> & !fsmcap1 & !fsmcap0)).
+ \</pre\>
+
+ Common examples:
+ 0x0 = No capture.
+ 0xA = Capture when fsmcap0 requests capture.
+ 0xC = Capture when fsmcap1 requests capture.
+ 0x6 = Capture on fsmcap0 EXOR fsmcap1.
+ 0x8 = Capture on fsmcap0 & fsmcap1.
+ 0xE = Capture on fsmcap0 | fsmcap1.
+ 0xF = Always capture. */
+ uint64_t dis_stamp : 1; /**< [ 4: 4](R/W) Remove time stamps from data stream. */
+ uint64_t dup : 1; /**< [ 5: 5](R/W) Retain duplicates in the data stream. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_oclax_cdhx_ctl bdk_oclax_cdhx_ctl_t;
+
+static inline uint64_t BDK_OCLAX_CDHX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_CDHX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0a8000600ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=1)))
+ return 0x87e0a8000600ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=1)))
+ return 0x87e0a8000600ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=1)))
+ return 0x87e0b0000600ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("OCLAX_CDHX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_CDHX_CTL(a,b) bdk_oclax_cdhx_ctl_t
+#define bustype_BDK_OCLAX_CDHX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_CDHX_CTL(a,b) "OCLAX_CDHX_CTL"
+#define device_bar_BDK_OCLAX_CDHX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_CDHX_CTL(a,b) (a)
+#define arguments_BDK_OCLAX_CDHX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_cdh#_inject_state
+ *
+ * OCLA Capture Inject State Register
+ * This register allows various state inputs to be inserted into the captured stream
+ * data, to assist debugging of OCLA FSMs. Each input has two insertion positions
+ * (i.e. [MCD] and [ALT_MCD]), so that some of the normal non-inject capture stream data
+ * may still be observable.
+ */
+union bdk_oclax_cdhx_inject_state
+{
+ uint64_t u;
+ struct bdk_oclax_cdhx_inject_state_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t alt_trig : 1; /**< [ 31: 31](R/W) When set, insert FSM trigger input into captured stream \<31\>. */
+ uint64_t alt_mcd : 3; /**< [ 30: 28](R/W) When set, insert multichip debug (MCD) 0..2 FSM inputs into captured stream \<30:28\>. */
+ uint64_t alt_match : 4; /**< [ 27: 24](R/W) When set, insert matcher FSM inputs into captured stream \<27:24\>. */
+ uint64_t alt_fsm1_state : 4; /**< [ 23: 20](R/W) When set, insert FSM 1 state input into captured stream \<23:20\>. */
+ uint64_t alt_fsm0_state : 4; /**< [ 19: 16](R/W) When set, insert FSM 0 state input into captured stream \<19:16\>. */
+ uint64_t trig : 1; /**< [ 15: 15](R/W) When set, insert FSM trigger input into captured stream \<15\>. */
+ uint64_t mcd : 3; /**< [ 14: 12](R/W) When set, insert multichip debug (MCD) 0..2 FSM inputs into captured stream \<14:12\>. */
+ uint64_t match : 4; /**< [ 11: 8](R/W) When set, insert matcher FSM inputs into captured stream \<11:8\>. */
+ uint64_t fsm1_state : 4; /**< [ 7: 4](R/W) When set, insert FSM 1 state input into captured stream \<7:4\>. */
+ uint64_t fsm0_state : 4; /**< [ 3: 0](R/W) When set, insert FSM 0 state input into captured stream \<3:0\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t fsm0_state : 4; /**< [ 3: 0](R/W) When set, insert FSM 0 state input into captured stream \<3:0\>. */
+ uint64_t fsm1_state : 4; /**< [ 7: 4](R/W) When set, insert FSM 1 state input into captured stream \<7:4\>. */
+ uint64_t match : 4; /**< [ 11: 8](R/W) When set, insert matcher FSM inputs into captured stream \<11:8\>. */
+ uint64_t mcd : 3; /**< [ 14: 12](R/W) When set, insert multichip debug (MCD) 0..2 FSM inputs into captured stream \<14:12\>. */
+ uint64_t trig : 1; /**< [ 15: 15](R/W) When set, insert FSM trigger input into captured stream \<15\>. */
+ uint64_t alt_fsm0_state : 4; /**< [ 19: 16](R/W) When set, insert FSM 0 state input into captured stream \<19:16\>. */
+ uint64_t alt_fsm1_state : 4; /**< [ 23: 20](R/W) When set, insert FSM 1 state input into captured stream \<23:20\>. */
+ uint64_t alt_match : 4; /**< [ 27: 24](R/W) When set, insert matcher FSM inputs into captured stream \<27:24\>. */
+ uint64_t alt_mcd : 3; /**< [ 30: 28](R/W) When set, insert multichip debug (MCD) 0..2 FSM inputs into captured stream \<30:28\>. */
+ uint64_t alt_trig : 1; /**< [ 31: 31](R/W) When set, insert FSM trigger input into captured stream \<31\>. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_cdhx_inject_state_s cn; */
+};
+typedef union bdk_oclax_cdhx_inject_state bdk_oclax_cdhx_inject_state_t;
+
+static inline uint64_t BDK_OCLAX_CDHX_INJECT_STATE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_CDHX_INJECT_STATE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=1)))
+ return 0x87e0b0000610ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("OCLAX_CDHX_INJECT_STATE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_CDHX_INJECT_STATE(a,b) bdk_oclax_cdhx_inject_state_t
+#define bustype_BDK_OCLAX_CDHX_INJECT_STATE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_CDHX_INJECT_STATE(a,b) "OCLAX_CDHX_INJECT_STATE"
+#define device_bar_BDK_OCLAX_CDHX_INJECT_STATE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_CDHX_INJECT_STATE(a,b) (a)
+#define arguments_BDK_OCLAX_CDHX_INJECT_STATE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_const
+ *
+ * OCLA Constants Registers
+ * Internal:
+ * FIXME add new NO_DDR bit \<16\> which is RO 0, indicating:
+ *
+ * No DDR supported.
+ * 0 = DDR dumping is supported, the OCLA()_STACK* registers exist and function.
+ * 1 = DDR dumping is not supported.
+ */
+union bdk_oclax_const
+{
+ uint64_t u;
+ struct bdk_oclax_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t dat_size : 16; /**< [ 15: 0](RO) Size of data RAM in units of 36-bit entries. This value is subject to change between chip
+ passes, and software should thus use this value rather than a hard coded constant.
+ OCLA(0..1) size is 4096, OCLA(2) size is 8192. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat_size : 16; /**< [ 15: 0](RO) Size of data RAM in units of 36-bit entries. This value is subject to change between chip
+ passes, and software should thus use this value rather than a hard coded constant.
+ OCLA(0..1) size is 4096, OCLA(2) size is 8192. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_const_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t dat_size : 16; /**< [ 15: 0](RO) Size of data RAM in units of 36-bit entries. This value is subject to change between chip
+ passes, and software should thus use this value rather than a hard coded constant.
+ OCLA(0..3) size is 4096, OCLA(4) size is 16384. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat_size : 16; /**< [ 15: 0](RO) Size of data RAM in units of 36-bit entries. This value is subject to change between chip
+ passes, and software should thus use this value rather than a hard coded constant.
+ OCLA(0..3) size is 4096, OCLA(4) size is 16384. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_oclax_const_s cn81xx; */
+ struct bdk_oclax_const_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t dat_size : 16; /**< [ 15: 0](RO) Size of data RAM in units of 36-bit entries. This value is subject to change between chip
+ passes, and software should thus use this value rather than a hard coded constant.
+ OCLA(0..3) size is 4096, OCLA(4) size is 8192. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat_size : 16; /**< [ 15: 0](RO) Size of data RAM in units of 36-bit entries. This value is subject to change between chip
+ passes, and software should thus use this value rather than a hard coded constant.
+ OCLA(0..3) size is 4096, OCLA(4) size is 8192. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_oclax_const_s cn83xx; */
+};
+typedef union bdk_oclax_const bdk_oclax_const_t;
+
+static inline uint64_t BDK_OCLAX_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000000ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_CONST(a) bdk_oclax_const_t
+#define bustype_BDK_OCLAX_CONST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_CONST(a) "OCLAX_CONST"
+#define device_bar_BDK_OCLAX_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_CONST(a) (a)
+#define arguments_BDK_OCLAX_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_dat#
+ *
+ * OCLA Data Registers
+ */
+union bdk_oclax_datx
+{
+ uint64_t u;
+ struct bdk_oclax_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t entry : 38; /**< [ 37: 0](RO/H) Captured entry. Data is in the format described by OCLA_CAP_DAT_S or OCLA_CAP_CTL_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t entry : 38; /**< [ 37: 0](RO/H) Captured entry. Data is in the format described by OCLA_CAP_DAT_S or OCLA_CAP_CTL_S. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_datx_s cn; */
+};
+typedef union bdk_oclax_datx bdk_oclax_datx_t;
+
+static inline uint64_t BDK_OCLAX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=8191)))
+ return 0x87e0a8400000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1fff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=8191)))
+ return 0x87e0a8400000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1fff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=8191)))
+ return 0x87e0a8400000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x1fff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=16383)))
+ return 0x87e0b0400000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x3fff);
+ __bdk_csr_fatal("OCLAX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_DATX(a,b) bdk_oclax_datx_t
+#define bustype_BDK_OCLAX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_DATX(a,b) "OCLAX_DATX"
+#define device_bar_BDK_OCLAX_DATX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_DATX(a,b) (a)
+#define arguments_BDK_OCLAX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_dat_pop
+ *
+ * OCLA Data Pop Registers
+ */
+union bdk_oclax_dat_pop
+{
+ uint64_t u;
+ struct bdk_oclax_dat_pop_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](RC/H) Valid entry. Indicates the FIFO contains data, and equivalent to OCLA()_FIFO_DEPTH[DEPTH] != 0. */
+ uint64_t trig : 1; /**< [ 62: 62](RO/H) Internal trigger set. Equivalent to OCLA()_STATE_INT[TRIG]. */
+ uint64_t wmark : 1; /**< [ 61: 61](RO/H) Internal buffer watermark reached. Equivalent to OCLA()_STATE_INT[WMARK]. */
+ uint64_t reserved_38_60 : 23;
+ uint64_t entry : 38; /**< [ 37: 0](RC/H) Captured entry. If [VALID] is set, has read side effect of unloading data by decrementing
+ OCLA()_FIFO_DEPTH[DEPTH]. Data is in the format described by OCLA_CAP_DAT_S or
+ OCLA_CAP_CTL_S.
+
+ Note that unloading data will cause that data not to be sent to memory, therefore
+ OCLA()_DAT_POP should not be read when OCLA()_FIFO_LIMIT[DDR] != all-ones. */
+#else /* Word 0 - Little Endian */
+ uint64_t entry : 38; /**< [ 37: 0](RC/H) Captured entry. If [VALID] is set, has read side effect of unloading data by decrementing
+ OCLA()_FIFO_DEPTH[DEPTH]. Data is in the format described by OCLA_CAP_DAT_S or
+ OCLA_CAP_CTL_S.
+
+ Note that unloading data will cause that data not to be sent to memory, therefore
+ OCLA()_DAT_POP should not be read when OCLA()_FIFO_LIMIT[DDR] != all-ones. */
+ uint64_t reserved_38_60 : 23;
+ uint64_t wmark : 1; /**< [ 61: 61](RO/H) Internal buffer watermark reached. Equivalent to OCLA()_STATE_INT[WMARK]. */
+ uint64_t trig : 1; /**< [ 62: 62](RO/H) Internal trigger set. Equivalent to OCLA()_STATE_INT[TRIG]. */
+ uint64_t valid : 1; /**< [ 63: 63](RC/H) Valid entry. Indicates the FIFO contains data, and equivalent to OCLA()_FIFO_DEPTH[DEPTH] != 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_dat_pop_s cn; */
+};
+typedef union bdk_oclax_dat_pop bdk_oclax_dat_pop_t;
+
+static inline uint64_t BDK_OCLAX_DAT_POP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_DAT_POP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000800ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000800ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000800ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000800ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_DAT_POP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_DAT_POP(a) bdk_oclax_dat_pop_t
+#define bustype_BDK_OCLAX_DAT_POP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_DAT_POP(a) "OCLAX_DAT_POP"
+#define device_bar_BDK_OCLAX_DAT_POP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_DAT_POP(a) (a)
+#define arguments_BDK_OCLAX_DAT_POP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_eco
+ *
+ * INTERNAL: OCLA ECO Register
+ */
+union bdk_oclax_eco
+{
+ uint64_t u;
+ struct bdk_oclax_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) Internal:
+ Reserved for ECO usage. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_eco_s cn; */
+};
+typedef union bdk_oclax_eco bdk_oclax_eco_t;
+
+static inline uint64_t BDK_OCLAX_ECO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_ECO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a83200d0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a83200d0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=4))
+ return 0x87e0a83200d0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b00000d0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_ECO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_ECO(a) bdk_oclax_eco_t
+#define bustype_BDK_OCLAX_ECO(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_ECO(a) "OCLAX_ECO"
+#define device_bar_BDK_OCLAX_ECO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_ECO(a) (a)
+#define arguments_BDK_OCLAX_ECO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_fifo_depth
+ *
+ * OCLA Capture FIFO Depth Registers
+ */
+union bdk_oclax_fifo_depth
+{
+ uint64_t u;
+ struct bdk_oclax_fifo_depth_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t depth : 16; /**< [ 15: 0](RO/H) Current capture FIFO depth in 36-bit words. */
+#else /* Word 0 - Little Endian */
+ uint64_t depth : 16; /**< [ 15: 0](RO/H) Current capture FIFO depth in 36-bit words. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_fifo_depth_s cn; */
+};
+typedef union bdk_oclax_fifo_depth bdk_oclax_fifo_depth_t;
+
+static inline uint64_t BDK_OCLAX_FIFO_DEPTH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_FIFO_DEPTH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000200ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000200ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000200ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000200ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_FIFO_DEPTH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_FIFO_DEPTH(a) bdk_oclax_fifo_depth_t
+#define bustype_BDK_OCLAX_FIFO_DEPTH(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_FIFO_DEPTH(a) "OCLAX_FIFO_DEPTH"
+#define device_bar_BDK_OCLAX_FIFO_DEPTH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_FIFO_DEPTH(a) (a)
+#define arguments_BDK_OCLAX_FIFO_DEPTH(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_fifo_limit
+ *
+ * OCLA Capture FIFO Limit Registers
+ */
+union bdk_oclax_fifo_limit
+{
+ uint64_t u;
+ struct bdk_oclax_fifo_limit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t overfull : 16; /**< [ 63: 48](R/W) Stop level. When OCLA()_FIFO_DEPTH \> [OVERFULL], stop capturing and set
+ OCLA()_STATE_INT[OVERFULL]. This should be set to no more than
+ OCLA()_CONST[DAT_SIZE] minus 26 when using DDR capture to insure that overflow can be
+ detected. */
+ uint64_t ddr : 16; /**< [ 47: 32](R/W) DDR level. When OCLA()_FIFO_DEPTH \> [DDR], FIFO entries will be removed, packed into a
+ cache line, and overflowed to DDR/L2. All-ones disables overflow to DDR/L2. If nonzero
+ must be at least 52. */
+ uint64_t bp : 16; /**< [ 31: 16](R/W) Backpressure level. When OCLA()_FIFO_DEPTH \> [BP], OCLA will signal backpressure to
+ coprocessors. All-ones disables indicating backpressure. */
+ uint64_t wmark : 16; /**< [ 15: 0](R/W) Interrupt watermark level. When OCLA()_FIFO_DEPTH \> [WMARK], OCLA will set
+ OCLA()_STATE_INT[WMARK] interrupt. All-ones disables setting the interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t wmark : 16; /**< [ 15: 0](R/W) Interrupt watermark level. When OCLA()_FIFO_DEPTH \> [WMARK], OCLA will set
+ OCLA()_STATE_INT[WMARK] interrupt. All-ones disables setting the interrupt. */
+ uint64_t bp : 16; /**< [ 31: 16](R/W) Backpressure level. When OCLA()_FIFO_DEPTH \> [BP], OCLA will signal backpressure to
+ coprocessors. All-ones disables indicating backpressure. */
+ uint64_t ddr : 16; /**< [ 47: 32](R/W) DDR level. When OCLA()_FIFO_DEPTH \> [DDR], FIFO entries will be removed, packed into a
+ cache line, and overflowed to DDR/L2. All-ones disables overflow to DDR/L2. If nonzero
+ must be at least 52. */
+ uint64_t overfull : 16; /**< [ 63: 48](R/W) Stop level. When OCLA()_FIFO_DEPTH \> [OVERFULL], stop capturing and set
+ OCLA()_STATE_INT[OVERFULL]. This should be set to no more than
+ OCLA()_CONST[DAT_SIZE] minus 26 when using DDR capture to insure that overflow can be
+ detected. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_fifo_limit_s cn8; */
+ struct bdk_oclax_fifo_limit_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t overfull : 16; /**< [ 63: 48](R/W) Stop level. When OCLA()_FIFO_DEPTH \> [OVERFULL], stop capturing and set
+ OCLA()_STATE_INT[OVERFULL]. This should be set to no more than
+ OCLA()_CONST[DAT_SIZE] minus 26 when using DDR capture to insure that overflow can be
+ detected. */
+ uint64_t ddr : 16; /**< [ 47: 32](R/W) DDR level. When OCLA()_FIFO_DEPTH \> [DDR], FIFO entries will be removed, packed into a
+ cache line, and overflowed to LLC/DRAM. All-ones disables overflow to DDR/L2. If nonzero
+ must be at least 52. */
+ uint64_t bp : 16; /**< [ 31: 16](R/W) Backpressure level. When OCLA()_FIFO_DEPTH \> [BP], OCLA will signal backpressure to
+ coprocessors. All-ones disables indicating backpressure. */
+ uint64_t wmark : 16; /**< [ 15: 0](R/W) Interrupt watermark level. When OCLA()_FIFO_DEPTH \> [WMARK], OCLA will set
+ OCLA()_STATE_INT[WMARK] interrupt. All-ones disables setting the interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t wmark : 16; /**< [ 15: 0](R/W) Interrupt watermark level. When OCLA()_FIFO_DEPTH \> [WMARK], OCLA will set
+ OCLA()_STATE_INT[WMARK] interrupt. All-ones disables setting the interrupt. */
+ uint64_t bp : 16; /**< [ 31: 16](R/W) Backpressure level. When OCLA()_FIFO_DEPTH \> [BP], OCLA will signal backpressure to
+ coprocessors. All-ones disables indicating backpressure. */
+ uint64_t ddr : 16; /**< [ 47: 32](R/W) DDR level. When OCLA()_FIFO_DEPTH \> [DDR], FIFO entries will be removed, packed into a
+ cache line, and overflowed to LLC/DRAM. All-ones disables overflow to DDR/L2. If nonzero
+ must be at least 52. */
+ uint64_t overfull : 16; /**< [ 63: 48](R/W) Stop level. When OCLA()_FIFO_DEPTH \> [OVERFULL], stop capturing and set
+ OCLA()_STATE_INT[OVERFULL]. This should be set to no more than
+ OCLA()_CONST[DAT_SIZE] minus 26 when using DDR capture to insure that overflow can be
+ detected. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_oclax_fifo_limit bdk_oclax_fifo_limit_t;
+
+static inline uint64_t BDK_OCLAX_FIFO_LIMIT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_FIFO_LIMIT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000240ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000240ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000240ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000240ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_FIFO_LIMIT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_FIFO_LIMIT(a) bdk_oclax_fifo_limit_t
+#define bustype_BDK_OCLAX_FIFO_LIMIT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_FIFO_LIMIT(a) "OCLAX_FIFO_LIMIT"
+#define device_bar_BDK_OCLAX_FIFO_LIMIT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_FIFO_LIMIT(a) (a)
+#define arguments_BDK_OCLAX_FIFO_LIMIT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_fifo_tail
+ *
+ * OCLA Capture FIFO Tail Registers
+ */
+union bdk_oclax_fifo_tail
+{
+ uint64_t u;
+ struct bdk_oclax_fifo_tail_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t tail : 16; /**< [ 15: 0](RO/H) Address last written into entry FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t tail : 16; /**< [ 15: 0](RO/H) Address last written into entry FIFO. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_fifo_tail_s cn; */
+};
+typedef union bdk_oclax_fifo_tail bdk_oclax_fifo_tail_t;
+
+static inline uint64_t BDK_OCLAX_FIFO_TAIL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_FIFO_TAIL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000260ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000260ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000260ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000260ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_FIFO_TAIL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_FIFO_TAIL(a) bdk_oclax_fifo_tail_t
+#define bustype_BDK_OCLAX_FIFO_TAIL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_FIFO_TAIL(a) "OCLAX_FIFO_TAIL"
+#define device_bar_BDK_OCLAX_FIFO_TAIL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_FIFO_TAIL(a) (a)
+#define arguments_BDK_OCLAX_FIFO_TAIL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_fifo_trig
+ *
+ * OCLA Capture FIFO Trigger Level Registers
+ */
+union bdk_oclax_fifo_trig
+{
+ uint64_t u;
+ struct bdk_oclax_fifo_trig_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t limit : 16; /**< [ 31: 16](R/W) Post-trigger number of entries to collect before stopping collection. If zero, collection
+ will never stop, which may be desirable when overflowing to DDR/L2. Must be \<
+ OCLA()_CONST[DAT_SIZE] - 5. */
+ uint64_t cnt : 16; /**< [ 15: 0](R/W/H) Number of entries collected since trigger. Cleared when OCLA()_STATE_INT[TRIG] clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 16; /**< [ 15: 0](R/W/H) Number of entries collected since trigger. Cleared when OCLA()_STATE_INT[TRIG] clear. */
+ uint64_t limit : 16; /**< [ 31: 16](R/W) Post-trigger number of entries to collect before stopping collection. If zero, collection
+ will never stop, which may be desirable when overflowing to DDR/L2. Must be \<
+ OCLA()_CONST[DAT_SIZE] - 5. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_fifo_trig_s cn8; */
+ struct bdk_oclax_fifo_trig_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t limit : 16; /**< [ 31: 16](R/W) Post-trigger number of entries to collect before stopping collection. If zero, collection
+ will never stop, which may be desirable when overflowing to LLC/DRAM. Must be \<
+ OCLA()_CONST[DAT_SIZE] - 5. */
+ uint64_t cnt : 16; /**< [ 15: 0](R/W/H) Number of entries collected since trigger. Cleared when OCLA()_STATE_INT[TRIG] clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 16; /**< [ 15: 0](R/W/H) Number of entries collected since trigger. Cleared when OCLA()_STATE_INT[TRIG] clear. */
+ uint64_t limit : 16; /**< [ 31: 16](R/W) Post-trigger number of entries to collect before stopping collection. If zero, collection
+ will never stop, which may be desirable when overflowing to LLC/DRAM. Must be \<
+ OCLA()_CONST[DAT_SIZE] - 5. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_oclax_fifo_trig bdk_oclax_fifo_trig_t;
+
+static inline uint64_t BDK_OCLAX_FIFO_TRIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_FIFO_TRIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a80002a0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a80002a0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a80002a0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b00002a0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_FIFO_TRIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_FIFO_TRIG(a) bdk_oclax_fifo_trig_t
+#define bustype_BDK_OCLAX_FIFO_TRIG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_FIFO_TRIG(a) "OCLAX_FIFO_TRIG"
+#define device_bar_BDK_OCLAX_FIFO_TRIG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_FIFO_TRIG(a) (a)
+#define arguments_BDK_OCLAX_FIFO_TRIG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_fifo_wrap
+ *
+ * OCLA Capture FIFO Wrap Counter Registers
+ */
+union bdk_oclax_fifo_wrap
+{
+ uint64_t u;
+ struct bdk_oclax_fifo_wrap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t wraps : 32; /**< [ 31: 0](R/W/H) Number of times FIFO has wrapped since trigger.
+ Cleared when OCLA()_STATE_INT[TRIG] is clear.
+ This count has a one cycle lag observing when a trigger event occurs. */
+#else /* Word 0 - Little Endian */
+ uint64_t wraps : 32; /**< [ 31: 0](R/W/H) Number of times FIFO has wrapped since trigger.
+ Cleared when OCLA()_STATE_INT[TRIG] is clear.
+ This count has a one cycle lag observing when a trigger event occurs. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_fifo_wrap_s cn; */
+};
+typedef union bdk_oclax_fifo_wrap bdk_oclax_fifo_wrap_t;
+
+static inline uint64_t BDK_OCLAX_FIFO_WRAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_FIFO_WRAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000280ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000280ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000280ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000280ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_FIFO_WRAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_FIFO_WRAP(a) bdk_oclax_fifo_wrap_t
+#define bustype_BDK_OCLAX_FIFO_WRAP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_FIFO_WRAP(a) "OCLAX_FIFO_WRAP"
+#define device_bar_BDK_OCLAX_FIFO_WRAP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_FIFO_WRAP(a) (a)
+#define arguments_BDK_OCLAX_FIFO_WRAP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_fsm#_and#_i#
+ *
+ * OCLA FSM PLA AND Tree Registers
+ * Values for PLA-AND plane. AND(0..15) represents the 16 allowed AND terms. I(0..1) for I=0
+ * indicates the term non-inverted, for I=1 indicates the term inverted. Any AND tree may be
+ * disabled by setting the same bit in both _I(0) and _I(1), as '((1) & !(1))' is always false.
+ */
+union bdk_oclax_fsmx_andx_ix
+{
+ uint64_t u;
+ struct bdk_oclax_fsmx_andx_ix_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t trig : 1; /**< [ 15: 15](R/W) AND plane control for trigger FSM input. */
+ uint64_t mcd : 3; /**< [ 14: 12](R/W) AND plane control for multichip debug (MCD) 0..2 FSM inputs. */
+ uint64_t match : 4; /**< [ 11: 8](R/W) AND plane control for matcher 0..3 FSM inputs. */
+ uint64_t fsm1_state : 4; /**< [ 7: 4](R/W) AND plane control for FSM 1 last state input. */
+ uint64_t fsm0_state : 4; /**< [ 3: 0](R/W) AND plane control for FSM 0 last state input. */
+#else /* Word 0 - Little Endian */
+ uint64_t fsm0_state : 4; /**< [ 3: 0](R/W) AND plane control for FSM 0 last state input. */
+ uint64_t fsm1_state : 4; /**< [ 7: 4](R/W) AND plane control for FSM 1 last state input. */
+ uint64_t match : 4; /**< [ 11: 8](R/W) AND plane control for matcher 0..3 FSM inputs. */
+ uint64_t mcd : 3; /**< [ 14: 12](R/W) AND plane control for multichip debug (MCD) 0..2 FSM inputs. */
+ uint64_t trig : 1; /**< [ 15: 15](R/W) AND plane control for trigger FSM input. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_fsmx_andx_ix_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t trig : 1; /**< [ 15: 15](RAZ) Reserved. */
+ uint64_t mcd : 3; /**< [ 14: 12](R/W) AND plane control for multichip debug (MCD) 0..2 FSM inputs. */
+ uint64_t match : 4; /**< [ 11: 8](R/W) AND plane control for matcher 0..3 FSM inputs. */
+ uint64_t fsm1_state : 4; /**< [ 7: 4](R/W) AND plane control for FSM 1 last state input. */
+ uint64_t fsm0_state : 4; /**< [ 3: 0](R/W) AND plane control for FSM 0 last state input. */
+#else /* Word 0 - Little Endian */
+ uint64_t fsm0_state : 4; /**< [ 3: 0](R/W) AND plane control for FSM 0 last state input. */
+ uint64_t fsm1_state : 4; /**< [ 7: 4](R/W) AND plane control for FSM 1 last state input. */
+ uint64_t match : 4; /**< [ 11: 8](R/W) AND plane control for matcher 0..3 FSM inputs. */
+ uint64_t mcd : 3; /**< [ 14: 12](R/W) AND plane control for multichip debug (MCD) 0..2 FSM inputs. */
+ uint64_t trig : 1; /**< [ 15: 15](RAZ) Reserved. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_oclax_fsmx_andx_ix_s cn9; */
+ /* struct bdk_oclax_fsmx_andx_ix_s cn81xx; */
+ /* struct bdk_oclax_fsmx_andx_ix_s cn83xx; */
+ /* struct bdk_oclax_fsmx_andx_ix_s cn88xxp2; */
+};
+typedef union bdk_oclax_fsmx_andx_ix bdk_oclax_fsmx_andx_ix_t;
+
+static inline uint64_t BDK_OCLAX_FSMX_ANDX_IX(unsigned long a, unsigned long b, unsigned long c, unsigned long d) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_FSMX_ANDX_IX(unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1) && (c<=15) && (d<=1)))
+ return 0x87e0a8300000ll + 0x1000000ll * ((a) & 0x1) + 0x1000ll * ((b) & 0x1) + 0x10ll * ((c) & 0xf) + 8ll * ((d) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=1) && (c<=15) && (d<=1)))
+ return 0x87e0a8300000ll + 0x1000000ll * ((a) & 0x3) + 0x1000ll * ((b) & 0x1) + 0x10ll * ((c) & 0xf) + 8ll * ((d) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=1) && (c<=15) && (d<=1)))
+ return 0x87e0a8300000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x1) + 0x10ll * ((c) & 0xf) + 8ll * ((d) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=1) && (c<=15) && (d<=1)))
+ return 0x87e0b0300000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x1) + 0x10ll * ((c) & 0xf) + 8ll * ((d) & 0x1);
+ __bdk_csr_fatal("OCLAX_FSMX_ANDX_IX", 4, a, b, c, d);
+}
+
+#define typedef_BDK_OCLAX_FSMX_ANDX_IX(a,b,c,d) bdk_oclax_fsmx_andx_ix_t
+#define bustype_BDK_OCLAX_FSMX_ANDX_IX(a,b,c,d) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_FSMX_ANDX_IX(a,b,c,d) "OCLAX_FSMX_ANDX_IX"
+#define device_bar_BDK_OCLAX_FSMX_ANDX_IX(a,b,c,d) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_FSMX_ANDX_IX(a,b,c,d) (a)
+#define arguments_BDK_OCLAX_FSMX_ANDX_IX(a,b,c,d) (a),(b),(c),(d)
+
+/**
+ * Register (RSL) ocla#_fsm#_or#
+ *
+ * OCLA FSM PLA AND Tree Registers
+ */
+union bdk_oclax_fsmx_orx
+{
+ uint64_t u;
+ struct bdk_oclax_fsmx_orx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t or_state : 16; /**< [ 15: 0](R/W) Column to drive on PLA OR-plane. */
+#else /* Word 0 - Little Endian */
+ uint64_t or_state : 16; /**< [ 15: 0](R/W) Column to drive on PLA OR-plane. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_fsmx_orx_s cn; */
+};
+typedef union bdk_oclax_fsmx_orx bdk_oclax_fsmx_orx_t;
+
+static inline uint64_t BDK_OCLAX_FSMX_ORX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_FSMX_ORX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1) && (c<=15)))
+ return 0x87e0a8310000ll + 0x1000000ll * ((a) & 0x1) + 0x1000ll * ((b) & 0x1) + 8ll * ((c) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=1) && (c<=15)))
+ return 0x87e0a8310000ll + 0x1000000ll * ((a) & 0x3) + 0x1000ll * ((b) & 0x1) + 8ll * ((c) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=1) && (c<=15)))
+ return 0x87e0a8310000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x1) + 8ll * ((c) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=1) && (c<=15)))
+ return 0x87e0b0310000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x1) + 8ll * ((c) & 0xf);
+ __bdk_csr_fatal("OCLAX_FSMX_ORX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_OCLAX_FSMX_ORX(a,b,c) bdk_oclax_fsmx_orx_t
+#define bustype_BDK_OCLAX_FSMX_ORX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_FSMX_ORX(a,b,c) "OCLAX_FSMX_ORX"
+#define device_bar_BDK_OCLAX_FSMX_ORX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_FSMX_ORX(a,b,c) (a)
+#define arguments_BDK_OCLAX_FSMX_ORX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) ocla#_fsm#_state#
+ *
+ * OCLA FSM State Registers
+ * See the OCLA chapter text for more details on each of these actions.
+ */
+union bdk_oclax_fsmx_statex
+{
+ uint64_t u;
+ struct bdk_oclax_fsmx_statex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t sinfo_set : 1; /**< [ 30: 30](R/W) If a control packet is generated in this state (due to capture starting
+ in the next cycle), set OCLA_CAP_CTL_S[SINFO]. */
+ uint64_t set_int : 1; /**< [ 29: 29](R/W) In this state set interrupt. */
+ uint64_t cap : 1; /**< [ 28: 28](R/W) In this state request capture this cycle. */
+ uint64_t set_mcd : 3; /**< [ 27: 25](R/W) In this state set MCD. */
+ uint64_t set_trig : 1; /**< [ 24: 24](R/W) In this state set internal trigger indication. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t set_val : 4; /**< [ 19: 16](R/W) In this state store match value into matcher 0..3. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t clr_cnt : 4; /**< [ 11: 8](R/W) In this state clear match counter. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t inc_cnt : 4; /**< [ 3: 0](R/W) In this state increment match counter. */
+#else /* Word 0 - Little Endian */
+ uint64_t inc_cnt : 4; /**< [ 3: 0](R/W) In this state increment match counter. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t clr_cnt : 4; /**< [ 11: 8](R/W) In this state clear match counter. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t set_val : 4; /**< [ 19: 16](R/W) In this state store match value into matcher 0..3. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t set_trig : 1; /**< [ 24: 24](R/W) In this state set internal trigger indication. */
+ uint64_t set_mcd : 3; /**< [ 27: 25](R/W) In this state set MCD. */
+ uint64_t cap : 1; /**< [ 28: 28](R/W) In this state request capture this cycle. */
+ uint64_t set_int : 1; /**< [ 29: 29](R/W) In this state set interrupt. */
+ uint64_t sinfo_set : 1; /**< [ 30: 30](R/W) If a control packet is generated in this state (due to capture starting
+ in the next cycle), set OCLA_CAP_CTL_S[SINFO]. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_fsmx_statex_s cn; */
+};
+typedef union bdk_oclax_fsmx_statex bdk_oclax_fsmx_statex_t;
+
+static inline uint64_t BDK_OCLAX_FSMX_STATEX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_FSMX_STATEX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1) && (c<=15)))
+ return 0x87e0a8320000ll + 0x1000000ll * ((a) & 0x1) + 0x1000ll * ((b) & 0x1) + 8ll * ((c) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=1) && (c<=15)))
+ return 0x87e0a8320000ll + 0x1000000ll * ((a) & 0x3) + 0x1000ll * ((b) & 0x1) + 8ll * ((c) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=1) && (c<=15)))
+ return 0x87e0a8320000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x1) + 8ll * ((c) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=1) && (c<=15)))
+ return 0x87e0b0320000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x1) + 8ll * ((c) & 0xf);
+ __bdk_csr_fatal("OCLAX_FSMX_STATEX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_OCLAX_FSMX_STATEX(a,b,c) bdk_oclax_fsmx_statex_t
+#define bustype_BDK_OCLAX_FSMX_STATEX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_FSMX_STATEX(a,b,c) "OCLAX_FSMX_STATEX"
+#define device_bar_BDK_OCLAX_FSMX_STATEX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_FSMX_STATEX(a,b,c) (a)
+#define arguments_BDK_OCLAX_FSMX_STATEX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) ocla#_gen_ctl
+ *
+ * OCLA General Control Registers
+ */
+union bdk_oclax_gen_ctl
+{
+ uint64_t u;
+ struct bdk_oclax_gen_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t mcdtrig : 3; /**< [ 6: 4](R/W) Enable MCD triggering. For each bit corresponding to the three MCDs:
+ 0 = MCD does not cause trigger.
+ 1 = When the corresponding MCD is received it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. */
+ uint64_t exten : 1; /**< [ 3: 3](R/W) Enable external triggering.
+ 0 = External triggering ignored.
+ 1 = When the external trigger pin selected with GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER
+ is high it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. The external device must de-assert the
+ signal (it is not edge sensitive.) */
+ uint64_t den : 1; /**< [ 2: 2](R/W) Enable data bus and counter clocking. When set, the OCLA inbound data bus may be used and
+ counters may increment. When clear, the bus is always zero and internal flops may be clock
+ gated off to save power. Must be set for normal operation. */
+ uint64_t stt : 1; /**< [ 1: 1](R/W) Store to DDR directly, bypassing L2 cache. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t stt : 1; /**< [ 1: 1](R/W) Store to DDR directly, bypassing L2 cache. */
+ uint64_t den : 1; /**< [ 2: 2](R/W) Enable data bus and counter clocking. When set, the OCLA inbound data bus may be used and
+ counters may increment. When clear, the bus is always zero and internal flops may be clock
+ gated off to save power. Must be set for normal operation. */
+ uint64_t exten : 1; /**< [ 3: 3](R/W) Enable external triggering.
+ 0 = External triggering ignored.
+ 1 = When the external trigger pin selected with GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER
+ is high it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. The external device must de-assert the
+ signal (it is not edge sensitive.) */
+ uint64_t mcdtrig : 3; /**< [ 6: 4](R/W) Enable MCD triggering. For each bit corresponding to the three MCDs:
+ 0 = MCD does not cause trigger.
+ 1 = When the corresponding MCD is received it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_gen_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t mcdtrig : 3; /**< [ 6: 4](RAZ) Reserved. */
+ uint64_t exten : 1; /**< [ 3: 3](R/W) Enable external triggering.
+ 0 = External triggering ignored.
+ 1 = When the external trigger pin selected with GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER
+ is high it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. The external device must de-assert the
+ signal (it is not edge sensitive.) */
+ uint64_t den : 1; /**< [ 2: 2](R/W) Enable data bus and counter clocking. When set, the OCLA inbound data bus may be used and
+ counters may increment. When clear, the bus is always zero and internal flops may be clock
+ gated off to save power. Must be set for normal operation. */
+ uint64_t stt : 1; /**< [ 1: 1](R/W) Store to DDR directly, bypassing L2 cache. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t stt : 1; /**< [ 1: 1](R/W) Store to DDR directly, bypassing L2 cache. */
+ uint64_t den : 1; /**< [ 2: 2](R/W) Enable data bus and counter clocking. When set, the OCLA inbound data bus may be used and
+ counters may increment. When clear, the bus is always zero and internal flops may be clock
+ gated off to save power. Must be set for normal operation. */
+ uint64_t exten : 1; /**< [ 3: 3](R/W) Enable external triggering.
+ 0 = External triggering ignored.
+ 1 = When the external trigger pin selected with GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER
+ is high it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. The external device must de-assert the
+ signal (it is not edge sensitive.) */
+ uint64_t mcdtrig : 3; /**< [ 6: 4](RAZ) Reserved. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_oclax_gen_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t mcdtrig : 3; /**< [ 6: 4](R/W) Enable MCD triggering. For each bit corresponding to the three MCDs:
+ 0 = MCD does not cause trigger.
+ 1 = When the corresponding MCD is received it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. */
+ uint64_t exten : 1; /**< [ 3: 3](R/W) Enable external triggering.
+ 0 = External triggering ignored.
+ 1 = When the external trigger pin selected with GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER
+ is high it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. The external device must de-assert the
+ signal (it is not edge sensitive.) */
+ uint64_t den : 1; /**< [ 2: 2](R/W) Enable data bus and counter clocking. When set, the OCLA inbound data bus may be used and
+ counters may increment. When clear, the bus is always zero and internal flops may be clock
+ gated off to save power. Must be set for normal operation. */
+ uint64_t stt : 1; /**< [ 1: 1](R/W) Store to DRAM directly, bypassing LLC. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t stt : 1; /**< [ 1: 1](R/W) Store to DRAM directly, bypassing LLC. */
+ uint64_t den : 1; /**< [ 2: 2](R/W) Enable data bus and counter clocking. When set, the OCLA inbound data bus may be used and
+ counters may increment. When clear, the bus is always zero and internal flops may be clock
+ gated off to save power. Must be set for normal operation. */
+ uint64_t exten : 1; /**< [ 3: 3](R/W) Enable external triggering.
+ 0 = External triggering ignored.
+ 1 = When the external trigger pin selected with GPIO_PIN_SEL_E::OCLA_EXT_TRIGGER
+ is high it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. The external device must de-assert the
+ signal (it is not edge sensitive.) */
+ uint64_t mcdtrig : 3; /**< [ 6: 4](R/W) Enable MCD triggering. For each bit corresponding to the three MCDs:
+ 0 = MCD does not cause trigger.
+ 1 = When the corresponding MCD is received it will cause
+ triggering and set OCLA()_STATE_SET[TRIG]. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_oclax_gen_ctl_s cn81xx; */
+ /* struct bdk_oclax_gen_ctl_s cn83xx; */
+ /* struct bdk_oclax_gen_ctl_s cn88xxp2; */
+};
+typedef union bdk_oclax_gen_ctl bdk_oclax_gen_ctl_t;
+
+static inline uint64_t BDK_OCLAX_GEN_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_GEN_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000060ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000060ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000060ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000060ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_GEN_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_GEN_CTL(a) bdk_oclax_gen_ctl_t
+#define bustype_BDK_OCLAX_GEN_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_GEN_CTL(a) "OCLAX_GEN_CTL"
+#define device_bar_BDK_OCLAX_GEN_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_GEN_CTL(a) (a)
+#define arguments_BDK_OCLAX_GEN_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_mat#_count
+ *
+ * OCLA Matcher Count Registers
+ */
+union bdk_oclax_matx_count
+{
+ uint64_t u;
+ struct bdk_oclax_matx_count_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t count : 32; /**< [ 31: 0](R/W/H) Current counter value. Note software must reset this to zero (or the appropriate count)
+ before starting capture. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 32; /**< [ 31: 0](R/W/H) Current counter value. Note software must reset this to zero (or the appropriate count)
+ before starting capture. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_matx_count_s cn; */
+};
+typedef union bdk_oclax_matx_count bdk_oclax_matx_count_t;
+
+static inline uint64_t BDK_OCLAX_MATX_COUNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MATX_COUNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0a8230000ll + 0x1000000ll * ((a) & 0x1) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=3)))
+ return 0x87e0a8230000ll + 0x1000000ll * ((a) & 0x3) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=3)))
+ return 0x87e0a8230000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=3)))
+ return 0x87e0b0230000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCLAX_MATX_COUNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_MATX_COUNT(a,b) bdk_oclax_matx_count_t
+#define bustype_BDK_OCLAX_MATX_COUNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MATX_COUNT(a,b) "OCLAX_MATX_COUNT"
+#define device_bar_BDK_OCLAX_MATX_COUNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_MATX_COUNT(a,b) (a)
+#define arguments_BDK_OCLAX_MATX_COUNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_mat#_ctl
+ *
+ * OCLA Matcher Control Registers
+ */
+union bdk_oclax_matx_ctl
+{
+ uint64_t u;
+ struct bdk_oclax_matx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t fsm_ctr : 1; /**< [ 7: 7](R/W) What output matcher provides to FSM:
+ 0 = FSM receives raw match signal, asserting only in those cycles with matches.
+ 1 = FSM receives OCLA()_MAT()_COUNT \>= OCLA()_MAT()_THRESH. */
+ uint64_t inc_match : 1; /**< [ 6: 6](R/W) Increment OCLA()_MAT()_COUNT counter automatically on each match. */
+ uint64_t shift : 6; /**< [ 5: 0](R/W) Right rotation amount to apply to data loaded into OCLA()_MAT()_VALUE()
+ register when FSM requests a value load. */
+#else /* Word 0 - Little Endian */
+ uint64_t shift : 6; /**< [ 5: 0](R/W) Right rotation amount to apply to data loaded into OCLA()_MAT()_VALUE()
+ register when FSM requests a value load. */
+ uint64_t inc_match : 1; /**< [ 6: 6](R/W) Increment OCLA()_MAT()_COUNT counter automatically on each match. */
+ uint64_t fsm_ctr : 1; /**< [ 7: 7](R/W) What output matcher provides to FSM:
+ 0 = FSM receives raw match signal, asserting only in those cycles with matches.
+ 1 = FSM receives OCLA()_MAT()_COUNT \>= OCLA()_MAT()_THRESH. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_matx_ctl_s cn; */
+};
+typedef union bdk_oclax_matx_ctl bdk_oclax_matx_ctl_t;
+
+static inline uint64_t BDK_OCLAX_MATX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MATX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0a8200000ll + 0x1000000ll * ((a) & 0x1) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=3)))
+ return 0x87e0a8200000ll + 0x1000000ll * ((a) & 0x3) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=3)))
+ return 0x87e0a8200000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=3)))
+ return 0x87e0b0200000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCLAX_MATX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_MATX_CTL(a,b) bdk_oclax_matx_ctl_t
+#define bustype_BDK_OCLAX_MATX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MATX_CTL(a,b) "OCLAX_MATX_CTL"
+#define device_bar_BDK_OCLAX_MATX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_MATX_CTL(a,b) (a)
+#define arguments_BDK_OCLAX_MATX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_mat#_mask#
+ *
+ * OCLA Matcher Compare Mask Registers
+ */
+union bdk_oclax_matx_maskx
+{
+ uint64_t u;
+ struct bdk_oclax_matx_maskx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t mask : 36; /**< [ 35: 0](R/W) Bitmask of which bits in OCLA()_MAT()_VALUE() are to be compared.
+
+ Each bit of OCLA()_MAT()_VALUE() and OCLA()_MAT()_MASK() are combined as
+ follows:
+
+ _ If MASK = 1 and VALUE = 0, matches when corresponding bit of data = "0".
+ _ If MASK = 1 and VALUE = 1, matches when corresponding bit of data = "1".
+ _ If MASK = 0, matches regardless of corresponding bit of data. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 36; /**< [ 35: 0](R/W) Bitmask of which bits in OCLA()_MAT()_VALUE() are to be compared.
+
+ Each bit of OCLA()_MAT()_VALUE() and OCLA()_MAT()_MASK() are combined as
+ follows:
+
+ _ If MASK = 1 and VALUE = 0, matches when corresponding bit of data = "0".
+ _ If MASK = 1 and VALUE = 1, matches when corresponding bit of data = "1".
+ _ If MASK = 0, matches regardless of corresponding bit of data. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_matx_maskx_s cn9; */
+ /* struct bdk_oclax_matx_maskx_s cn81xx; */
+ struct bdk_oclax_matx_maskx_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t mask : 36; /**< [ 35: 0](R/W) Bitmask of which bits in OCLA()_MAT()_VALUE() are to be compared.
+
+ Each bit of OCLA()_MAT()_VALUE() and OCLA()_MAT()_MASK() are combined as
+ follows:
+
+ _ If MASK = 1 and VALUE = 0, matches when data = "0".
+ _ If MASK = 1 and VALUE = 1, matches when data = "1".
+ _ If MASK = 0 and VALUE = 0, matches any data.
+ _ If MASK = 0 and VALUE = 1, reserved in pass 1, matches any data pass 2 and later. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 36; /**< [ 35: 0](R/W) Bitmask of which bits in OCLA()_MAT()_VALUE() are to be compared.
+
+ Each bit of OCLA()_MAT()_VALUE() and OCLA()_MAT()_MASK() are combined as
+ follows:
+
+ _ If MASK = 1 and VALUE = 0, matches when data = "0".
+ _ If MASK = 1 and VALUE = 1, matches when data = "1".
+ _ If MASK = 0 and VALUE = 0, matches any data.
+ _ If MASK = 0 and VALUE = 1, reserved in pass 1, matches any data pass 2 and later. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_oclax_matx_maskx_s cn83xx; */
+};
+typedef union bdk_oclax_matx_maskx bdk_oclax_matx_maskx_t;
+
+static inline uint64_t BDK_OCLAX_MATX_MASKX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MATX_MASKX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3) && (c<=1)))
+ return 0x87e0a8220000ll + 0x1000000ll * ((a) & 0x1) + 0x1000ll * ((b) & 0x3) + 8ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=3) && (c<=1)))
+ return 0x87e0a8220000ll + 0x1000000ll * ((a) & 0x3) + 0x1000ll * ((b) & 0x3) + 8ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=3) && (c<=1)))
+ return 0x87e0a8220000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3) + 8ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=3) && (c<=1)))
+ return 0x87e0b0220000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("OCLAX_MATX_MASKX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_OCLAX_MATX_MASKX(a,b,c) bdk_oclax_matx_maskx_t
+#define bustype_BDK_OCLAX_MATX_MASKX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MATX_MASKX(a,b,c) "OCLAX_MATX_MASKX"
+#define device_bar_BDK_OCLAX_MATX_MASKX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_MATX_MASKX(a,b,c) (a)
+#define arguments_BDK_OCLAX_MATX_MASKX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) ocla#_mat#_thresh
+ *
+ * OCLA Matcher Count Threshold Registers
+ */
+union bdk_oclax_matx_thresh
+{
+ uint64_t u;
+ struct bdk_oclax_matx_thresh_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t thresh : 32; /**< [ 31: 0](R/W) Counter threshold value. Compared against OCLA()_MAT()_COUNT to assert matcher
+ output, and set OCLA()_STATE_INT[OVFL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t thresh : 32; /**< [ 31: 0](R/W) Counter threshold value. Compared against OCLA()_MAT()_COUNT to assert matcher
+ output, and set OCLA()_STATE_INT[OVFL]. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_matx_thresh_s cn; */
+};
+typedef union bdk_oclax_matx_thresh bdk_oclax_matx_thresh_t;
+
+static inline uint64_t BDK_OCLAX_MATX_THRESH(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MATX_THRESH(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0a8240000ll + 0x1000000ll * ((a) & 0x1) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=3)))
+ return 0x87e0a8240000ll + 0x1000000ll * ((a) & 0x3) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=3)))
+ return 0x87e0a8240000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=3)))
+ return 0x87e0b0240000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCLAX_MATX_THRESH", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_MATX_THRESH(a,b) bdk_oclax_matx_thresh_t
+#define bustype_BDK_OCLAX_MATX_THRESH(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MATX_THRESH(a,b) "OCLAX_MATX_THRESH"
+#define device_bar_BDK_OCLAX_MATX_THRESH(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_MATX_THRESH(a,b) (a)
+#define arguments_BDK_OCLAX_MATX_THRESH(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_mat#_value#
+ *
+ * OCLA Matcher Compare Value Registers
+ */
+union bdk_oclax_matx_valuex
+{
+ uint64_t u;
+ struct bdk_oclax_matx_valuex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t mask : 36; /**< [ 35: 0](R/W/H) Data value to compare against when corresponding bits of OCLA()_MAT()_MASK()
+ are set. Value may be updated with OCLA()_FSM()_STATE()[SET_VAL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 36; /**< [ 35: 0](R/W/H) Data value to compare against when corresponding bits of OCLA()_MAT()_MASK()
+ are set. Value may be updated with OCLA()_FSM()_STATE()[SET_VAL]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_matx_valuex_s cn; */
+};
+typedef union bdk_oclax_matx_valuex bdk_oclax_matx_valuex_t;
+
+static inline uint64_t BDK_OCLAX_MATX_VALUEX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MATX_VALUEX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3) && (c<=1)))
+ return 0x87e0a8210000ll + 0x1000000ll * ((a) & 0x1) + 0x1000ll * ((b) & 0x3) + 8ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=3) && (c<=1)))
+ return 0x87e0a8210000ll + 0x1000000ll * ((a) & 0x3) + 0x1000ll * ((b) & 0x3) + 8ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=3) && (c<=1)))
+ return 0x87e0a8210000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3) + 8ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=3) && (c<=1)))
+ return 0x87e0b0210000ll + 0x1000000ll * ((a) & 0x7) + 0x1000ll * ((b) & 0x3) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("OCLAX_MATX_VALUEX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_OCLAX_MATX_VALUEX(a,b,c) bdk_oclax_matx_valuex_t
+#define bustype_BDK_OCLAX_MATX_VALUEX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MATX_VALUEX(a,b,c) "OCLAX_MATX_VALUEX"
+#define device_bar_BDK_OCLAX_MATX_VALUEX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_MATX_VALUEX(a,b,c) (a)
+#define arguments_BDK_OCLAX_MATX_VALUEX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) ocla#_mparid
+ *
+ * OCLA Memory Partition ID Register
+ */
+union bdk_oclax_mparid
+{
+ uint64_t u;
+ struct bdk_oclax_mparid_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t mparid : 8; /**< [ 7: 0](R/W) Memory partition ID. Only used for OCLAs inside clusters. */
+#else /* Word 0 - Little Endian */
+ uint64_t mparid : 8; /**< [ 7: 0](R/W) Memory partition ID. Only used for OCLAs inside clusters. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_mparid_s cn; */
+};
+typedef union bdk_oclax_mparid bdk_oclax_mparid_t;
+
+static inline uint64_t BDK_OCLAX_MPARID(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MPARID(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b00000e0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_MPARID", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_MPARID(a) bdk_oclax_mparid_t
+#define bustype_BDK_OCLAX_MPARID(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MPARID(a) "OCLAX_MPARID"
+#define device_bar_BDK_OCLAX_MPARID(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_MPARID(a) (a)
+#define arguments_BDK_OCLAX_MPARID(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_msix_pba#
+ *
+ * OCLA MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table; the bit number is indexed by the OCLA_INT_VEC_E enumeration.
+ */
+union bdk_oclax_msix_pbax
+{
+ uint64_t u;
+ struct bdk_oclax_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated OCLA()_MSIX_VEC()_CTL, enumerated by OCLA_INT_VEC_E.
+ Bits that have no associated OCLA_INT_VEC_E are 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated OCLA()_MSIX_VEC()_CTL, enumerated by OCLA_INT_VEC_E.
+ Bits that have no associated OCLA_INT_VEC_E are 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_msix_pbax_s cn; */
+};
+typedef union bdk_oclax_msix_pbax bdk_oclax_msix_pbax_t;
+
+static inline uint64_t BDK_OCLAX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x87e0a8ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b==0)))
+ return 0x87e0a8ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b==0)))
+ return 0x87e0a8ff0000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b==0)))
+ return 0x87e0b0ff0000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("OCLAX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_MSIX_PBAX(a,b) bdk_oclax_msix_pbax_t
+#define bustype_BDK_OCLAX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MSIX_PBAX(a,b) "OCLAX_MSIX_PBAX"
+#define device_bar_BDK_OCLAX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_OCLAX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_OCLAX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_msix_vec#_addr
+ *
+ * OCLA MSI-X Vector-Table Address Register
+ * This register is the MSI-X vector table, indexed by the OCLA_INT_VEC_E enumeration.
+ */
+union bdk_oclax_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_oclax_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's OCLA()_MSIX_VEC()_ADDR, OCLA()_MSIX_VEC()_CTL, and
+ corresponding bit of OCLA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_OCLA()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's OCLA()_MSIX_VEC()_ADDR, OCLA()_MSIX_VEC()_CTL, and
+ corresponding bit of OCLA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_OCLA()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_msix_vecx_addr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's OCLA()_MSIX_VEC()_ADDR, OCLA()_MSIX_VEC()_CTL, and
+ corresponding bit of OCLA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_OCLA()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's OCLA()_MSIX_VEC()_ADDR, OCLA()_MSIX_VEC()_CTL, and
+ corresponding bit of OCLA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_OCLA()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_oclax_msix_vecx_addr_s cn9; */
+};
+typedef union bdk_oclax_msix_vecx_addr bdk_oclax_msix_vecx_addr_t;
+
+static inline uint64_t BDK_OCLAX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x87e0a8f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b==0)))
+ return 0x87e0a8f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b==0)))
+ return 0x87e0a8f00000ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b==0)))
+ return 0x87e0b0f00000ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("OCLAX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_MSIX_VECX_ADDR(a,b) bdk_oclax_msix_vecx_addr_t
+#define bustype_BDK_OCLAX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MSIX_VECX_ADDR(a,b) "OCLAX_MSIX_VECX_ADDR"
+#define device_bar_BDK_OCLAX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_OCLAX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_OCLAX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_msix_vec#_ctl
+ *
+ * OCLA MSI-X Vector-Table Control and Data Register
+ * This register is the MSI-X vector table, indexed by the OCLA_INT_VEC_E enumeration.
+ */
+union bdk_oclax_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_oclax_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_oclax_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_oclax_msix_vecx_ctl bdk_oclax_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_OCLAX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x87e0a8f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b==0)))
+ return 0x87e0a8f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b==0)))
+ return 0x87e0a8f00008ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b==0)))
+ return 0x87e0b0f00008ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("OCLAX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_MSIX_VECX_CTL(a,b) bdk_oclax_msix_vecx_ctl_t
+#define bustype_BDK_OCLAX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_MSIX_VECX_CTL(a,b) "OCLAX_MSIX_VECX_CTL"
+#define device_bar_BDK_OCLAX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_OCLAX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_OCLAX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_raw#
+ *
+ * OCLA Raw Input Registers
+ */
+union bdk_oclax_rawx
+{
+ uint64_t u;
+ struct bdk_oclax_rawx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw value of debug bus input signals into OCLA. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw value of debug bus input signals into OCLA. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_rawx_s cn; */
+};
+typedef union bdk_oclax_rawx bdk_oclax_rawx_t;
+
+static inline uint64_t BDK_OCLAX_RAWX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_RAWX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0a8000100ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=1)))
+ return 0x87e0a8000100ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=1)))
+ return 0x87e0a8000100ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=1)))
+ return 0x87e0b0000100ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("OCLAX_RAWX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_RAWX(a,b) bdk_oclax_rawx_t
+#define bustype_BDK_OCLAX_RAWX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_RAWX(a,b) "OCLAX_RAWX"
+#define device_bar_BDK_OCLAX_RAWX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_RAWX(a,b) (a)
+#define arguments_BDK_OCLAX_RAWX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_sft_rst
+ *
+ * OCLA Reset Registers
+ */
+union bdk_oclax_sft_rst
+{
+ uint64_t u;
+ struct bdk_oclax_sft_rst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t reset : 1; /**< [ 0: 0](R/W1) Reset. When written with one, reset OCLA excluding the RSL interface. Software
+ must wait at least 1024 coprocessor-clocks after resetting before sending any
+ other CSR read/write operations into OCLA. */
+#else /* Word 0 - Little Endian */
+ uint64_t reset : 1; /**< [ 0: 0](R/W1) Reset. When written with one, reset OCLA excluding the RSL interface. Software
+ must wait at least 1024 coprocessor-clocks after resetting before sending any
+ other CSR read/write operations into OCLA. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_sft_rst_s cn; */
+};
+typedef union bdk_oclax_sft_rst bdk_oclax_sft_rst_t;
+
+static inline uint64_t BDK_OCLAX_SFT_RST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_SFT_RST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000020ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000020ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000020ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_SFT_RST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_SFT_RST(a) bdk_oclax_sft_rst_t
+#define bustype_BDK_OCLAX_SFT_RST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_SFT_RST(a) "OCLAX_SFT_RST"
+#define device_bar_BDK_OCLAX_SFT_RST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_SFT_RST(a) (a)
+#define arguments_BDK_OCLAX_SFT_RST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_stack_base
+ *
+ * OCLA Stack Base Registers
+ */
+union bdk_oclax_stack_base
+{
+ uint64_t u;
+ struct bdk_oclax_stack_base_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t ptr : 46; /**< [ 52: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for OCLA(4) in the coprocessor-clock domain; for OCLA(0..3) in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+#else /* Word 0 - Little Endian */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for OCLA(4) in the coprocessor-clock domain; for OCLA(0..3) in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t ptr : 46; /**< [ 52: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_stack_base_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t ptr : 46; /**< [ 52: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for the OCLA in the coprocessor-clock domain; for OCLAs in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+#else /* Word 0 - Little Endian */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for the OCLA in the coprocessor-clock domain; for OCLAs in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t ptr : 46; /**< [ 52: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_oclax_stack_base_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for OCLA(2) in the coprocessor-clock domain; for OCLA(0..1) in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+#else /* Word 0 - Little Endian */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for OCLA(2) in the coprocessor-clock domain; for OCLA(0..1) in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_oclax_stack_base_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for OCLA(4) in the coprocessor-clock domain; for OCLA(0..3) in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+#else /* Word 0 - Little Endian */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for OCLA(4) in the coprocessor-clock domain; for OCLA(0..3) in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_oclax_stack_base_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for the OCLA in the coprocessor-clock domain; for OCLAs in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+#else /* Word 0 - Little Endian */
+ uint64_t pa : 1; /**< [ 0: 0](R/W) When set, [PTR] and all DMA addresses are physical addresses and will not be translated by
+ the SMMU. When clear, is a virtual address which is subject to SMMU translation.
+
+ Only used for the OCLA in the coprocessor-clock domain; for OCLAs in the core-clock
+ domains this bit is ignored, addresses are always physical. */
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) If set, and physical addressing is used as described under [PA], the physical address
+ is in the secure world. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W) Memory address for base of overflow stack. This address must be on the local node in a
+ CCPI system.
+ This may be an IOVA or physical address; see [PA]. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_oclax_stack_base bdk_oclax_stack_base_t;
+
+static inline uint64_t BDK_OCLAX_STACK_BASE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STACK_BASE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000400ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000400ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000400ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000400ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STACK_BASE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STACK_BASE(a) bdk_oclax_stack_base_t
+#define bustype_BDK_OCLAX_STACK_BASE(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STACK_BASE(a) "OCLAX_STACK_BASE"
+#define device_bar_BDK_OCLAX_STACK_BASE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STACK_BASE(a) (a)
+#define arguments_BDK_OCLAX_STACK_BASE(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_stack_cur
+ *
+ * OCLA Stack Current Registers
+ */
+union bdk_oclax_stack_cur
+{
+ uint64_t u;
+ struct bdk_oclax_stack_cur_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t ptr : 46; /**< [ 52: 7](R/W/H) Next address to write for overflow stack. This address must be on the local node in a
+ CCPI system. During initialization this must be between OCLA()_STACK_BASE and
+ OCLA()_STACK_TOP.
+ This may be an IOVA or physical address; see OCLA()_STACK_BASE[PA]. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t ptr : 46; /**< [ 52: 7](R/W/H) Next address to write for overflow stack. This address must be on the local node in a
+ CCPI system. During initialization this must be between OCLA()_STACK_BASE and
+ OCLA()_STACK_TOP.
+ This may be an IOVA or physical address; see OCLA()_STACK_BASE[PA]. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_stack_cur_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W/H) Next address to write for overflow stack. This address must be on the local node in a
+ CCPI system. During initialization this must be between OCLA()_STACK_BASE and
+ OCLA()_STACK_TOP.
+ This may be an IOVA or physical address; see OCLA()_STACK_BASE[PA]. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W/H) Next address to write for overflow stack. This address must be on the local node in a
+ CCPI system. During initialization this must be between OCLA()_STACK_BASE and
+ OCLA()_STACK_TOP.
+ This may be an IOVA or physical address; see OCLA()_STACK_BASE[PA]. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_oclax_stack_cur_s cn9; */
+};
+typedef union bdk_oclax_stack_cur bdk_oclax_stack_cur_t;
+
+static inline uint64_t BDK_OCLAX_STACK_CUR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STACK_CUR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000480ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000480ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000480ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000480ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STACK_CUR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STACK_CUR(a) bdk_oclax_stack_cur_t
+#define bustype_BDK_OCLAX_STACK_CUR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STACK_CUR(a) "OCLAX_STACK_CUR"
+#define device_bar_BDK_OCLAX_STACK_CUR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STACK_CUR(a) (a)
+#define arguments_BDK_OCLAX_STACK_CUR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_stack_store_cnt
+ *
+ * OCLA Stack Stores Performance Counter Registers
+ */
+union bdk_oclax_stack_store_cnt
+{
+ uint64_t u;
+ struct bdk_oclax_stack_store_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t stores : 32; /**< [ 31: 0](R/W/H) Number of cache line stores sent to memory subsystem. Not cleared by hardware. */
+#else /* Word 0 - Little Endian */
+ uint64_t stores : 32; /**< [ 31: 0](R/W/H) Number of cache line stores sent to memory subsystem. Not cleared by hardware. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_stack_store_cnt_s cn; */
+};
+typedef union bdk_oclax_stack_store_cnt bdk_oclax_stack_store_cnt_t;
+
+static inline uint64_t BDK_OCLAX_STACK_STORE_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STACK_STORE_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000460ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000460ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000460ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000460ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STACK_STORE_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STACK_STORE_CNT(a) bdk_oclax_stack_store_cnt_t
+#define bustype_BDK_OCLAX_STACK_STORE_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STACK_STORE_CNT(a) "OCLAX_STACK_STORE_CNT"
+#define device_bar_BDK_OCLAX_STACK_STORE_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STACK_STORE_CNT(a) (a)
+#define arguments_BDK_OCLAX_STACK_STORE_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_stack_top
+ *
+ * OCLA Stack Top Address Registers
+ */
+union bdk_oclax_stack_top
+{
+ uint64_t u;
+ struct bdk_oclax_stack_top_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t ptr : 46; /**< [ 52: 7](R/W) Memory address for top of overflow stack plus one. This address must be on the local node
+ in a CCPI system.
+ This may be an IOVA or physical address; see OCLA()_STACK_BASE[PA]. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t ptr : 46; /**< [ 52: 7](R/W) Memory address for top of overflow stack plus one. This address must be on the local node
+ in a CCPI system.
+ This may be an IOVA or physical address; see OCLA()_STACK_BASE[PA]. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_stack_top_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W) Memory address for top of overflow stack plus one. This address must be on the local node
+ in a CCPI system.
+ This may be an IOVA or physical address; see OCLA()_STACK_BASE[PA]. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t ptr : 42; /**< [ 48: 7](R/W) Memory address for top of overflow stack plus one. This address must be on the local node
+ in a CCPI system.
+ This may be an IOVA or physical address; see OCLA()_STACK_BASE[PA]. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_oclax_stack_top_s cn9; */
+};
+typedef union bdk_oclax_stack_top bdk_oclax_stack_top_t;
+
+static inline uint64_t BDK_OCLAX_STACK_TOP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STACK_TOP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000420ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000420ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000420ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000420ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STACK_TOP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STACK_TOP(a) bdk_oclax_stack_top_t
+#define bustype_BDK_OCLAX_STACK_TOP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STACK_TOP(a) "OCLAX_STACK_TOP"
+#define device_bar_BDK_OCLAX_STACK_TOP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STACK_TOP(a) (a)
+#define arguments_BDK_OCLAX_STACK_TOP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_stack_wrap
+ *
+ * OCLA Stack Wrap Counter Registers
+ */
+union bdk_oclax_stack_wrap
+{
+ uint64_t u;
+ struct bdk_oclax_stack_wrap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t wraps : 32; /**< [ 31: 0](R/W/H) Number of times stack has been reset to OCLA()_STACK_BASE since trigger. Cleared when
+ OCLA()_STATE_INT[TRIG] clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t wraps : 32; /**< [ 31: 0](R/W/H) Number of times stack has been reset to OCLA()_STACK_BASE since trigger. Cleared when
+ OCLA()_STATE_INT[TRIG] clear. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_stack_wrap_s cn; */
+};
+typedef union bdk_oclax_stack_wrap bdk_oclax_stack_wrap_t;
+
+static inline uint64_t BDK_OCLAX_STACK_WRAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STACK_WRAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000440ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000440ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000440ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000440ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STACK_WRAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STACK_WRAP(a) bdk_oclax_stack_wrap_t
+#define bustype_BDK_OCLAX_STACK_WRAP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STACK_WRAP(a) "OCLAX_STACK_WRAP"
+#define device_bar_BDK_OCLAX_STACK_WRAP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STACK_WRAP(a) (a)
+#define arguments_BDK_OCLAX_STACK_WRAP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_stage#
+ *
+ * OCLA Input Staging Registers
+ */
+union bdk_oclax_stagex
+{
+ uint64_t u;
+ struct bdk_oclax_stagex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t dly : 4; /**< [ 3: 0](R/W) Cycles of delay staging to apply to corresponding input bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t dly : 4; /**< [ 3: 0](R/W) Cycles of delay staging to apply to corresponding input bit. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_stagex_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t dly : 3; /**< [ 2: 0](R/W) Cycles of delay staging to apply to corresponding input bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t dly : 3; /**< [ 2: 0](R/W) Cycles of delay staging to apply to corresponding input bit. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_oclax_stagex_s cn9; */
+};
+typedef union bdk_oclax_stagex bdk_oclax_stagex_t;
+
+static inline uint64_t BDK_OCLAX_STAGEX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STAGEX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=71)))
+ return 0x87e0a8100000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=71)))
+ return 0x87e0a8100000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=4) && (b<=71)))
+ return 0x87e0a8100000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=4) && (b<=71)))
+ return 0x87e0b0100000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x7f);
+ __bdk_csr_fatal("OCLAX_STAGEX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STAGEX(a,b) bdk_oclax_stagex_t
+#define bustype_BDK_OCLAX_STAGEX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STAGEX(a,b) "OCLAX_STAGEX"
+#define device_bar_BDK_OCLAX_STAGEX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STAGEX(a,b) (a)
+#define arguments_BDK_OCLAX_STAGEX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocla#_state_ena_w1c
+ *
+ * OCLA State Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_oclax_state_ena_w1c
+{
+ uint64_t u;
+ struct bdk_oclax_state_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t ddrfull : 1; /**< [ 18: 18](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[DDRFULL]. */
+ uint64_t wmark : 1; /**< [ 17: 17](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[WMARK]. */
+ uint64_t overfull : 1; /**< [ 16: 16](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[OVERFULL]. */
+ uint64_t trigfull : 1; /**< [ 15: 15](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[TRIGFULL]. */
+ uint64_t captured : 1; /**< [ 14: 14](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[CAPTURED]. */
+ uint64_t fsm1_int : 1; /**< [ 13: 13](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[FSM1_INT]. */
+ uint64_t fsm0_int : 1; /**< [ 12: 12](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[FSM0_INT]. */
+ uint64_t mcd : 3; /**< [ 11: 9](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[MCD]. */
+ uint64_t trig : 1; /**< [ 8: 8](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[TRIG]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ovfl : 4; /**< [ 3: 0](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[OVFL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ovfl : 4; /**< [ 3: 0](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[OVFL]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t trig : 1; /**< [ 8: 8](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[TRIG]. */
+ uint64_t mcd : 3; /**< [ 11: 9](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[MCD]. */
+ uint64_t fsm0_int : 1; /**< [ 12: 12](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[FSM0_INT]. */
+ uint64_t fsm1_int : 1; /**< [ 13: 13](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[FSM1_INT]. */
+ uint64_t captured : 1; /**< [ 14: 14](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[CAPTURED]. */
+ uint64_t trigfull : 1; /**< [ 15: 15](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[TRIGFULL]. */
+ uint64_t overfull : 1; /**< [ 16: 16](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[OVERFULL]. */
+ uint64_t wmark : 1; /**< [ 17: 17](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[WMARK]. */
+ uint64_t ddrfull : 1; /**< [ 18: 18](R/W1C/H) Reads or clears OCLA()_STATE_ENA_W1S[DDRFULL]. */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_state_ena_w1c_s cn; */
+};
+typedef union bdk_oclax_state_ena_w1c bdk_oclax_state_ena_w1c_t;
+
+static inline uint64_t BDK_OCLAX_STATE_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STATE_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a80000b8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a80000b8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a80000b8ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b00000b8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STATE_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STATE_ENA_W1C(a) bdk_oclax_state_ena_w1c_t
+#define bustype_BDK_OCLAX_STATE_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STATE_ENA_W1C(a) "OCLAX_STATE_ENA_W1C"
+#define device_bar_BDK_OCLAX_STATE_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STATE_ENA_W1C(a) (a)
+#define arguments_BDK_OCLAX_STATE_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_state_ena_w1s
+ *
+ * OCLA State Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_oclax_state_ena_w1s
+{
+ uint64_t u;
+ struct bdk_oclax_state_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t ddrfull : 1; /**< [ 18: 18](R/W1S/H) Enables reporting of OCLA()_STATE_INT[DDRFULL]. */
+ uint64_t wmark : 1; /**< [ 17: 17](R/W1S/H) Enables reporting of OCLA()_STATE_INT[WMARK]. */
+ uint64_t overfull : 1; /**< [ 16: 16](R/W1S/H) Enables reporting of OCLA()_STATE_INT[OVERFULL]. */
+ uint64_t trigfull : 1; /**< [ 15: 15](R/W1S/H) Enables reporting of OCLA()_STATE_INT[TRIGFULL]. */
+ uint64_t captured : 1; /**< [ 14: 14](R/W1S/H) Enables reporting of OCLA()_STATE_INT[CAPTURED]. */
+ uint64_t fsm1_int : 1; /**< [ 13: 13](R/W1S/H) Enables reporting of OCLA()_STATE_INT[FSM1_INT]. */
+ uint64_t fsm0_int : 1; /**< [ 12: 12](R/W1S/H) Enables reporting of OCLA()_STATE_INT[FSM0_INT]. */
+ uint64_t mcd : 3; /**< [ 11: 9](R/W1S/H) Enables reporting of OCLA()_STATE_INT[MCD]. */
+ uint64_t trig : 1; /**< [ 8: 8](R/W1S/H) Enables reporting of OCLA()_STATE_INT[TRIG]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ovfl : 4; /**< [ 3: 0](R/W1S/H) Enables reporting of OCLA()_STATE_INT[OVFL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ovfl : 4; /**< [ 3: 0](R/W1S/H) Enables reporting of OCLA()_STATE_INT[OVFL]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t trig : 1; /**< [ 8: 8](R/W1S/H) Enables reporting of OCLA()_STATE_INT[TRIG]. */
+ uint64_t mcd : 3; /**< [ 11: 9](R/W1S/H) Enables reporting of OCLA()_STATE_INT[MCD]. */
+ uint64_t fsm0_int : 1; /**< [ 12: 12](R/W1S/H) Enables reporting of OCLA()_STATE_INT[FSM0_INT]. */
+ uint64_t fsm1_int : 1; /**< [ 13: 13](R/W1S/H) Enables reporting of OCLA()_STATE_INT[FSM1_INT]. */
+ uint64_t captured : 1; /**< [ 14: 14](R/W1S/H) Enables reporting of OCLA()_STATE_INT[CAPTURED]. */
+ uint64_t trigfull : 1; /**< [ 15: 15](R/W1S/H) Enables reporting of OCLA()_STATE_INT[TRIGFULL]. */
+ uint64_t overfull : 1; /**< [ 16: 16](R/W1S/H) Enables reporting of OCLA()_STATE_INT[OVERFULL]. */
+ uint64_t wmark : 1; /**< [ 17: 17](R/W1S/H) Enables reporting of OCLA()_STATE_INT[WMARK]. */
+ uint64_t ddrfull : 1; /**< [ 18: 18](R/W1S/H) Enables reporting of OCLA()_STATE_INT[DDRFULL]. */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_state_ena_w1s_s cn; */
+};
+typedef union bdk_oclax_state_ena_w1s bdk_oclax_state_ena_w1s_t;
+
+static inline uint64_t BDK_OCLAX_STATE_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STATE_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a80000b0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a80000b0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a80000b0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b00000b0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STATE_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STATE_ENA_W1S(a) bdk_oclax_state_ena_w1s_t
+#define bustype_BDK_OCLAX_STATE_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STATE_ENA_W1S(a) "OCLAX_STATE_ENA_W1S"
+#define device_bar_BDK_OCLAX_STATE_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STATE_ENA_W1S(a) (a)
+#define arguments_BDK_OCLAX_STATE_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_state_int
+ *
+ * OCLA State and Interrupt Registers
+ */
+union bdk_oclax_state_int
+{
+ uint64_t u;
+ struct bdk_oclax_state_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t fsm1_state : 4; /**< [ 63: 60](RO/H) FSM1 current state. */
+ uint64_t fsm0_state : 4; /**< [ 59: 56](RO/H) FSM0 current state. */
+ uint64_t reserved_36_55 : 20;
+ uint64_t fsm1_rst : 1; /**< [ 35: 35](R/W1C) FSM1 hold in state zero. Writing one to OCLA()_STATE_SET[FSM1_RST] sets this bit and
+ holds FSM1 in state zero, writing one to OCLA()_STATE_INT[FSM1_RST] removes the hold. */
+ uint64_t fsm0_rst : 1; /**< [ 34: 34](R/W1C) FSM0 hold in state zero. Writing one to OCLA()_STATE_SET[FSM0_RST] sets this bit and
+ holds FSM0 in state zero, writing one to OCLA()_STATE_INT[FSM0_RST] removes the hold. */
+ uint64_t fsm1_ena : 1; /**< [ 33: 33](R/W1C/H) FSM1 sequencing enabled. */
+ uint64_t fsm0_ena : 1; /**< [ 32: 32](R/W1C/H) FSM0 sequencing enabled. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t ddrfull : 1; /**< [ 18: 18](R/W1C/H) DDR buffer wrapped. Asserted when OCLA()_STACK_CUR has wrapped and been re-initialized
+ to OCLA()_STACK_BASE. */
+ uint64_t wmark : 1; /**< [ 17: 17](R/W1C/H) Internal buffer watermark reached. Asserted when OCLA()_FIFO_DEPTH \>
+ OCLA()_FIFO_LIMIT[WMARK]. */
+ uint64_t overfull : 1; /**< [ 16: 16](R/W1C/H) Capture ended due to FIFO overflow. Asserted when OCLA()_FIFO_DEPTH \>
+ OCLA()_FIFO_LIMIT[OVERFULL]. */
+ uint64_t trigfull : 1; /**< [ 15: 15](R/W1C/H) Capture ended due to buffer full. Asserted when OCLA()_FIFO_TRIG[LIMIT] \>=
+ OCLA()_FIFO_TRIG[CNT]. */
+ uint64_t captured : 1; /**< [ 14: 14](R/W1C/H) Capture started. Asserted when the first capture is made. Informational only; often masked. */
+ uint64_t fsm1_int : 1; /**< [ 13: 13](R/W1C/H) FSM1 interrupt requested. */
+ uint64_t fsm0_int : 1; /**< [ 12: 12](R/W1C/H) FSM0 interrupt requested. */
+ uint64_t mcd : 3; /**< [ 11: 9](R/W1C/H) Multichip debug (MCD0..2) set. Asserted on MCD received from another coprocessor or code,
+ or FSM MCD request or W1S to OCLA()_STATE_SET[MCD]. */
+ uint64_t trig : 1; /**< [ 8: 8](R/W1C/H) Internal trigger set. Asserted on FSM internal trigger request or W1S to OCLA()_STATE_SET[TRIG]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ovfl : 4; /**< [ 3: 0](R/W1C/H) Match counter has overflowed. Asserted when OCLA()_MAT()_COUNT \>=
+ OCLA()_MAT()_THRESH. Informational only; often masked. Writing 1 clears the
+ counter, not just the interrupt. */
+#else /* Word 0 - Little Endian */
+ uint64_t ovfl : 4; /**< [ 3: 0](R/W1C/H) Match counter has overflowed. Asserted when OCLA()_MAT()_COUNT \>=
+ OCLA()_MAT()_THRESH. Informational only; often masked. Writing 1 clears the
+ counter, not just the interrupt. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t trig : 1; /**< [ 8: 8](R/W1C/H) Internal trigger set. Asserted on FSM internal trigger request or W1S to OCLA()_STATE_SET[TRIG]. */
+ uint64_t mcd : 3; /**< [ 11: 9](R/W1C/H) Multichip debug (MCD0..2) set. Asserted on MCD received from another coprocessor or code,
+ or FSM MCD request or W1S to OCLA()_STATE_SET[MCD]. */
+ uint64_t fsm0_int : 1; /**< [ 12: 12](R/W1C/H) FSM0 interrupt requested. */
+ uint64_t fsm1_int : 1; /**< [ 13: 13](R/W1C/H) FSM1 interrupt requested. */
+ uint64_t captured : 1; /**< [ 14: 14](R/W1C/H) Capture started. Asserted when the first capture is made. Informational only; often masked. */
+ uint64_t trigfull : 1; /**< [ 15: 15](R/W1C/H) Capture ended due to buffer full. Asserted when OCLA()_FIFO_TRIG[LIMIT] \>=
+ OCLA()_FIFO_TRIG[CNT]. */
+ uint64_t overfull : 1; /**< [ 16: 16](R/W1C/H) Capture ended due to FIFO overflow. Asserted when OCLA()_FIFO_DEPTH \>
+ OCLA()_FIFO_LIMIT[OVERFULL]. */
+ uint64_t wmark : 1; /**< [ 17: 17](R/W1C/H) Internal buffer watermark reached. Asserted when OCLA()_FIFO_DEPTH \>
+ OCLA()_FIFO_LIMIT[WMARK]. */
+ uint64_t ddrfull : 1; /**< [ 18: 18](R/W1C/H) DDR buffer wrapped. Asserted when OCLA()_STACK_CUR has wrapped and been re-initialized
+ to OCLA()_STACK_BASE. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t fsm0_ena : 1; /**< [ 32: 32](R/W1C/H) FSM0 sequencing enabled. */
+ uint64_t fsm1_ena : 1; /**< [ 33: 33](R/W1C/H) FSM1 sequencing enabled. */
+ uint64_t fsm0_rst : 1; /**< [ 34: 34](R/W1C) FSM0 hold in state zero. Writing one to OCLA()_STATE_SET[FSM0_RST] sets this bit and
+ holds FSM0 in state zero, writing one to OCLA()_STATE_INT[FSM0_RST] removes the hold. */
+ uint64_t fsm1_rst : 1; /**< [ 35: 35](R/W1C) FSM1 hold in state zero. Writing one to OCLA()_STATE_SET[FSM1_RST] sets this bit and
+ holds FSM1 in state zero, writing one to OCLA()_STATE_INT[FSM1_RST] removes the hold. */
+ uint64_t reserved_36_55 : 20;
+ uint64_t fsm0_state : 4; /**< [ 59: 56](RO/H) FSM0 current state. */
+ uint64_t fsm1_state : 4; /**< [ 63: 60](RO/H) FSM1 current state. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_state_int_s cn; */
+};
+typedef union bdk_oclax_state_int bdk_oclax_state_int_t;
+
+static inline uint64_t BDK_OCLAX_STATE_INT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STATE_INT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a8000080ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a8000080ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a8000080ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b0000080ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STATE_INT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STATE_INT(a) bdk_oclax_state_int_t
+#define bustype_BDK_OCLAX_STATE_INT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STATE_INT(a) "OCLAX_STATE_INT"
+#define device_bar_BDK_OCLAX_STATE_INT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STATE_INT(a) (a)
+#define arguments_BDK_OCLAX_STATE_INT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_state_set
+ *
+ * OCLA State Set Registers
+ * This register reads identically to OCLA()_STATE_INT, but allows R/W1S instead of R/W1C access.
+ */
+union bdk_oclax_state_set
+{
+ uint64_t u;
+ struct bdk_oclax_state_set_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t fsm1_state : 4; /**< [ 63: 60](RO/H) See OCLA()_STATE_INT[FSM1_STATE]. */
+ uint64_t fsm0_state : 4; /**< [ 59: 56](RO/H) See OCLA()_STATE_INT[FSM0_STATE]. */
+ uint64_t reserved_36_55 : 20;
+ uint64_t fsm1_rst : 1; /**< [ 35: 35](R/W1S) See OCLA()_STATE_INT[FSM1_RST]. */
+ uint64_t fsm0_rst : 1; /**< [ 34: 34](R/W1S) See OCLA()_STATE_INT[FSM0_RST]. */
+ uint64_t fsm1_ena : 1; /**< [ 33: 33](R/W1S/H) See OCLA()_STATE_INT[FSM1_ENA]. */
+ uint64_t fsm0_ena : 1; /**< [ 32: 32](R/W1S/H) See OCLA()_STATE_INT[FSM0_ENA]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t ddrfull : 1; /**< [ 18: 18](R/W1S/H) See OCLA()_STATE_INT[DDRFULL]. */
+ uint64_t wmark : 1; /**< [ 17: 17](R/W1S/H) See OCLA()_STATE_INT[WMARK]. */
+ uint64_t overfull : 1; /**< [ 16: 16](R/W1S/H) See OCLA()_STATE_INT[OVERFULL]. */
+ uint64_t trigfull : 1; /**< [ 15: 15](R/W1S/H) See OCLA()_STATE_INT[TRIGFULL]. */
+ uint64_t captured : 1; /**< [ 14: 14](R/W1S/H) See OCLA()_STATE_INT[CAPTURED]. */
+ uint64_t fsm1_int : 1; /**< [ 13: 13](R/W1S/H) See OCLA()_STATE_INT[FSM1_INT]. */
+ uint64_t fsm0_int : 1; /**< [ 12: 12](R/W1S/H) See OCLA()_STATE_INT[FSM0_INT]. */
+ uint64_t mcd : 3; /**< [ 11: 9](R/W1S/H) See OCLA()_STATE_INT[MCD]. */
+ uint64_t trig : 1; /**< [ 8: 8](R/W1S/H) See OCLA()_STATE_INT[TRIG]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ovfl : 4; /**< [ 3: 0](R/W1S/H) See OCLA()_STATE_INT[OVFL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ovfl : 4; /**< [ 3: 0](R/W1S/H) See OCLA()_STATE_INT[OVFL]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t trig : 1; /**< [ 8: 8](R/W1S/H) See OCLA()_STATE_INT[TRIG]. */
+ uint64_t mcd : 3; /**< [ 11: 9](R/W1S/H) See OCLA()_STATE_INT[MCD]. */
+ uint64_t fsm0_int : 1; /**< [ 12: 12](R/W1S/H) See OCLA()_STATE_INT[FSM0_INT]. */
+ uint64_t fsm1_int : 1; /**< [ 13: 13](R/W1S/H) See OCLA()_STATE_INT[FSM1_INT]. */
+ uint64_t captured : 1; /**< [ 14: 14](R/W1S/H) See OCLA()_STATE_INT[CAPTURED]. */
+ uint64_t trigfull : 1; /**< [ 15: 15](R/W1S/H) See OCLA()_STATE_INT[TRIGFULL]. */
+ uint64_t overfull : 1; /**< [ 16: 16](R/W1S/H) See OCLA()_STATE_INT[OVERFULL]. */
+ uint64_t wmark : 1; /**< [ 17: 17](R/W1S/H) See OCLA()_STATE_INT[WMARK]. */
+ uint64_t ddrfull : 1; /**< [ 18: 18](R/W1S/H) See OCLA()_STATE_INT[DDRFULL]. */
+ uint64_t reserved_19_31 : 13;
+ uint64_t fsm0_ena : 1; /**< [ 32: 32](R/W1S/H) See OCLA()_STATE_INT[FSM0_ENA]. */
+ uint64_t fsm1_ena : 1; /**< [ 33: 33](R/W1S/H) See OCLA()_STATE_INT[FSM1_ENA]. */
+ uint64_t fsm0_rst : 1; /**< [ 34: 34](R/W1S) See OCLA()_STATE_INT[FSM0_RST]. */
+ uint64_t fsm1_rst : 1; /**< [ 35: 35](R/W1S) See OCLA()_STATE_INT[FSM1_RST]. */
+ uint64_t reserved_36_55 : 20;
+ uint64_t fsm0_state : 4; /**< [ 59: 56](RO/H) See OCLA()_STATE_INT[FSM0_STATE]. */
+ uint64_t fsm1_state : 4; /**< [ 63: 60](RO/H) See OCLA()_STATE_INT[FSM1_STATE]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_oclax_state_set_s cn; */
+};
+typedef union bdk_oclax_state_set bdk_oclax_state_set_t;
+
+static inline uint64_t BDK_OCLAX_STATE_SET(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_STATE_SET(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a80000a0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a80000a0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a80000a0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b00000a0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_STATE_SET", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_STATE_SET(a) bdk_oclax_state_set_t
+#define bustype_BDK_OCLAX_STATE_SET(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_STATE_SET(a) "OCLAX_STATE_SET"
+#define device_bar_BDK_OCLAX_STATE_SET(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_STATE_SET(a) (a)
+#define arguments_BDK_OCLAX_STATE_SET(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocla#_time
+ *
+ * OCLA Current Time Registers
+ */
+union bdk_oclax_time
+{
+ uint64_t u;
+ struct bdk_oclax_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cycle : 64; /**< [ 63: 0](R/W/H) Current time as free running counter. Loaded into captured control packets.
+ Unconditionally clocked, independent of OCLA()_SFT_RST. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle : 64; /**< [ 63: 0](R/W/H) Current time as free running counter. Loaded into captured control packets.
+ Unconditionally clocked, independent of OCLA()_SFT_RST. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_oclax_time_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cycle : 32; /**< [ 31: 0](R/W/H) Current time as free running counter. Loaded into captured control packets.
+ Unconditionally clocked, independent of OCLA()_SFT_RST. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle : 32; /**< [ 31: 0](R/W/H) Current time as free running counter. Loaded into captured control packets.
+ Unconditionally clocked, independent of OCLA()_SFT_RST. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_oclax_time_s cn9; */
+ /* struct bdk_oclax_time_s cn81xx; */
+ /* struct bdk_oclax_time_s cn83xx; */
+ /* struct bdk_oclax_time_s cn88xxp2; */
+};
+typedef union bdk_oclax_time bdk_oclax_time_t;
+
+static inline uint64_t BDK_OCLAX_TIME(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCLAX_TIME(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0a80000c0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0a80000c0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=4))
+ return 0x87e0a80000c0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e0b00000c0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCLAX_TIME", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCLAX_TIME(a) bdk_oclax_time_t
+#define bustype_BDK_OCLAX_TIME(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCLAX_TIME(a) "OCLAX_TIME"
+#define device_bar_BDK_OCLAX_TIME(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCLAX_TIME(a) (a)
+#define arguments_BDK_OCLAX_TIME(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_OCLA_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ocx.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ocx.h
new file mode 100644
index 0000000000..ce1a4c2527
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ocx.h
@@ -0,0 +1,4951 @@
+#ifndef __BDK_CSRS_OCX_H__
+#define __BDK_CSRS_OCX_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium OCX.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration ocx_bar_e
+ *
+ * OCX Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_OCX_BAR_E_OCX_PF_BAR0 (0x87e011000000ll)
+#define BDK_OCX_BAR_E_OCX_PF_BAR0_SIZE 0x800000ull
+#define BDK_OCX_BAR_E_OCX_PF_BAR4 (0x87e011f00000ll)
+#define BDK_OCX_BAR_E_OCX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration ocx_int_vec_e
+ *
+ * OCX MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_OCX_INT_VEC_E_COM_INTS (3)
+#define BDK_OCX_INT_VEC_E_LNK_INTSX(a) (0 + (a))
+
+/**
+ * Register (RSL) ocx_com_bist_status
+ *
+ * OCX COM Memory BIST Status Register
+ * Contains status from last memory BIST for all RX FIFO memories. BIST status for TX FIFO
+ * memories and REPLAY memories are organized by link and are located in
+ * OCX_TLK()_BIST_STATUS.
+ */
+union bdk_ocx_com_bist_status
+{
+ uint64_t u;
+ struct bdk_ocx_com_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t status : 36; /**< [ 35: 0](RO/H) BIST status.
+ Internal:
+ \<35:34\> = Link 2 VC12 RX FIFOs.
+ \<33:32\> = Link 2 VC4/VC2 RX FIFOs.
+ \<31:30\> = Link 2 VC10/VC8/VC6 RX FIFOs. (Reserved in pass 2)
+ \<29:28\> = Link 1 VC12 RX FIFOs.
+ \<27:26\> = Link 1 VC4/VC2 RX FIFOs.
+ \<25:24\> = Link 1 VC10/VC8/VC6 RX FIFOs. (Reserved in pass 2)
+ \<23:22\> = Link 0 VC12 RX FIFOs.
+ \<21:20\> = Link 0 VC4/VC2 RX FIFOs.
+ \<19:18\> = Link 0 VC10/VC8/VC6 RX FIFOs. (Reserved in pass 2)
+ \<17:16\> = Link 2 VC1/VC0 RX FIFOs.
+ \<15:14\> = Link 2 VC5/VC3 RX FIFOs.
+ \<13:12\> = Link 2 VC11/VC9/VC7 RX FIFOs. (Reserved in pass 2)
+ \<11:10\> = Link 1 VC1/VC0 RX FIFOs.
+ \<9:8\> = Link 1 VC5/VC3 RX FIFOs.
+ \<7:6\> = Link 1 VC11/VC9/VC7 RX FIFOs. (Reserved in pass 2)
+ \<5:4\> = Link 0 VC1/VC0 RX FIFOs.
+ \<3:2\> = Link 0 VC5/VC3 RX FIFOs.
+ \<1:0\> = Link 0 VC11/VC9/VC7 RX FIFOs. (Reserved in pass 2) */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 36; /**< [ 35: 0](RO/H) BIST status.
+ Internal:
+ \<35:34\> = Link 2 VC12 RX FIFOs.
+ \<33:32\> = Link 2 VC4/VC2 RX FIFOs.
+ \<31:30\> = Link 2 VC10/VC8/VC6 RX FIFOs. (Reserved in pass 2)
+ \<29:28\> = Link 1 VC12 RX FIFOs.
+ \<27:26\> = Link 1 VC4/VC2 RX FIFOs.
+ \<25:24\> = Link 1 VC10/VC8/VC6 RX FIFOs. (Reserved in pass 2)
+ \<23:22\> = Link 0 VC12 RX FIFOs.
+ \<21:20\> = Link 0 VC4/VC2 RX FIFOs.
+ \<19:18\> = Link 0 VC10/VC8/VC6 RX FIFOs. (Reserved in pass 2)
+ \<17:16\> = Link 2 VC1/VC0 RX FIFOs.
+ \<15:14\> = Link 2 VC5/VC3 RX FIFOs.
+ \<13:12\> = Link 2 VC11/VC9/VC7 RX FIFOs. (Reserved in pass 2)
+ \<11:10\> = Link 1 VC1/VC0 RX FIFOs.
+ \<9:8\> = Link 1 VC5/VC3 RX FIFOs.
+ \<7:6\> = Link 1 VC11/VC9/VC7 RX FIFOs. (Reserved in pass 2)
+ \<5:4\> = Link 0 VC1/VC0 RX FIFOs.
+ \<3:2\> = Link 0 VC5/VC3 RX FIFOs.
+ \<1:0\> = Link 0 VC11/VC9/VC7 RX FIFOs. (Reserved in pass 2) */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_bist_status_s cn; */
+};
+typedef union bdk_ocx_com_bist_status bdk_ocx_com_bist_status_t;
+
+#define BDK_OCX_COM_BIST_STATUS BDK_OCX_COM_BIST_STATUS_FUNC()
+static inline uint64_t BDK_OCX_COM_BIST_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_BIST_STATUS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0110000f0ll;
+ __bdk_csr_fatal("OCX_COM_BIST_STATUS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_BIST_STATUS bdk_ocx_com_bist_status_t
+#define bustype_BDK_OCX_COM_BIST_STATUS BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_BIST_STATUS "OCX_COM_BIST_STATUS"
+#define device_bar_BDK_OCX_COM_BIST_STATUS 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_BIST_STATUS 0
+#define arguments_BDK_OCX_COM_BIST_STATUS -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_dual_sort
+ *
+ * OCX COM Dual Sort Register
+ */
+union bdk_ocx_com_dual_sort
+{
+ uint64_t u;
+ struct bdk_ocx_com_dual_sort_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t sort : 2; /**< [ 1: 0](R/W) Sorting procedure for multiple links to same node:
+ 0x0 = All to lowest link number.
+ 0x1 = Split by top/bottom L2C buses. (top to lowest link number).
+ 0x2 = IOC 1st, IOR 2nd, Mem VCs to either based on most room in TX FIFOs.
+ 0x3 = Illegal. */
+#else /* Word 0 - Little Endian */
+ uint64_t sort : 2; /**< [ 1: 0](R/W) Sorting procedure for multiple links to same node:
+ 0x0 = All to lowest link number.
+ 0x1 = Split by top/bottom L2C buses. (top to lowest link number).
+ 0x2 = IOC 1st, IOR 2nd, Mem VCs to either based on most room in TX FIFOs.
+ 0x3 = Illegal. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_dual_sort_s cn; */
+};
+typedef union bdk_ocx_com_dual_sort bdk_ocx_com_dual_sort_t;
+
+#define BDK_OCX_COM_DUAL_SORT BDK_OCX_COM_DUAL_SORT_FUNC()
+static inline uint64_t BDK_OCX_COM_DUAL_SORT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_DUAL_SORT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000008ll;
+ __bdk_csr_fatal("OCX_COM_DUAL_SORT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_DUAL_SORT bdk_ocx_com_dual_sort_t
+#define bustype_BDK_OCX_COM_DUAL_SORT BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_DUAL_SORT "OCX_COM_DUAL_SORT"
+#define device_bar_BDK_OCX_COM_DUAL_SORT 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_DUAL_SORT 0
+#define arguments_BDK_OCX_COM_DUAL_SORT -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_int
+ *
+ * OCX COM Interrupt Register
+ */
+union bdk_ocx_com_int
+{
+ uint64_t u;
+ struct bdk_ocx_com_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t io_badid : 1; /**< [ 54: 54](R/W1C/H) I/O request or response cannot be sent because a link was not found with a packet node ID
+ matching the OCX_COM_LINK(0..2)_CTL[ID] with OCX_COM_LINK(0..2)_CTL[VALID] bit set.
+ Transaction has been dropped. Should not occur during normal operation. This may indicate
+ a software/configuration failure and may be considered fatal. */
+ uint64_t mem_badid : 1; /**< [ 53: 53](R/W1C/H) Memory request or response cannot be sent because a link was not found with a packet node
+ ID matching the OCX_COM_LINK(0..2)_CTL[ID] with OCX_COM_LINK(0..2)_CTL[VALID] bit set.
+ Transaction has been dropped. Should not occur during normal operation. This may indicate
+ a software/configuration failure and may be considered fatal. */
+ uint64_t copr_badid : 1; /**< [ 52: 52](R/W1C/H) Scheduler add work or buffer pool return cannot be sent because a link was not found with
+ a node ID matching the OCX_COM_LINK(0..2)_CTL[ID] with OCX_COM_LINK(0..2)_CTL[VALID] bit
+ set.
+ Transaction has been dropped. Should not occur during normal operation. This may indicate
+ a software/configuration failure and may be considered fatal. */
+ uint64_t win_req_badid : 1; /**< [ 51: 51](R/W1C/H) Window request specified in SLI_WIN_RD_ADDR, SLI_WIN_WR_ADDR, OCX_WIN_CMD or OCX_PP_CMD
+ cannot be sent because a link was not found with a request node ID matching the
+ OCX_COM_LINK(0..2)_CTL[ID] with OCX_COM_LINK(0..2)_CTL[VALID] bit set. Transaction has
+ been
+ dropped. Should not occur during normal operation. This may indicate a
+ software/configuration failure and may be considered fatal. */
+ uint64_t win_req_tout : 1; /**< [ 50: 50](R/W1C/H) Window or core request was dropped because it could not be send during the period
+ specified by OCX_WIN_TIMER. Should not occur during normal operation. This may indicate a
+ software/configuration failure and may be considered fatal. */
+ uint64_t win_req_xmit : 1; /**< [ 49: 49](R/W1C/H) Window request specified in SLI_WIN_RD_ADDR, SLI_WIN_WR_ADDR, OCX_WIN_CMD or OCX_PP_CMD
+ has been scheduled for transmission. If the command was not expecting a response, then a
+ new command may be issued. */
+ uint64_t win_rsp : 1; /**< [ 48: 48](R/W1C/H) A response to a previous window request or core request has been received. A new command
+ may be issued. */
+ uint64_t reserved_24_47 : 24;
+ uint64_t rx_lane : 24; /**< [ 23: 0](R/W1C/H) SerDes RX lane interrupt. See OCX_LNE(0..23)_INT for more information. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_lane : 24; /**< [ 23: 0](R/W1C/H) SerDes RX lane interrupt. See OCX_LNE(0..23)_INT for more information. */
+ uint64_t reserved_24_47 : 24;
+ uint64_t win_rsp : 1; /**< [ 48: 48](R/W1C/H) A response to a previous window request or core request has been received. A new command
+ may be issued. */
+ uint64_t win_req_xmit : 1; /**< [ 49: 49](R/W1C/H) Window request specified in SLI_WIN_RD_ADDR, SLI_WIN_WR_ADDR, OCX_WIN_CMD or OCX_PP_CMD
+ has been scheduled for transmission. If the command was not expecting a response, then a
+ new command may be issued. */
+ uint64_t win_req_tout : 1; /**< [ 50: 50](R/W1C/H) Window or core request was dropped because it could not be send during the period
+ specified by OCX_WIN_TIMER. Should not occur during normal operation. This may indicate a
+ software/configuration failure and may be considered fatal. */
+ uint64_t win_req_badid : 1; /**< [ 51: 51](R/W1C/H) Window request specified in SLI_WIN_RD_ADDR, SLI_WIN_WR_ADDR, OCX_WIN_CMD or OCX_PP_CMD
+ cannot be sent because a link was not found with a request node ID matching the
+ OCX_COM_LINK(0..2)_CTL[ID] with OCX_COM_LINK(0..2)_CTL[VALID] bit set. Transaction has
+ been
+ dropped. Should not occur during normal operation. This may indicate a
+ software/configuration failure and may be considered fatal. */
+ uint64_t copr_badid : 1; /**< [ 52: 52](R/W1C/H) Scheduler add work or buffer pool return cannot be sent because a link was not found with
+ a node ID matching the OCX_COM_LINK(0..2)_CTL[ID] with OCX_COM_LINK(0..2)_CTL[VALID] bit
+ set.
+ Transaction has been dropped. Should not occur during normal operation. This may indicate
+ a software/configuration failure and may be considered fatal. */
+ uint64_t mem_badid : 1; /**< [ 53: 53](R/W1C/H) Memory request or response cannot be sent because a link was not found with a packet node
+ ID matching the OCX_COM_LINK(0..2)_CTL[ID] with OCX_COM_LINK(0..2)_CTL[VALID] bit set.
+ Transaction has been dropped. Should not occur during normal operation. This may indicate
+ a software/configuration failure and may be considered fatal. */
+ uint64_t io_badid : 1; /**< [ 54: 54](R/W1C/H) I/O request or response cannot be sent because a link was not found with a packet node ID
+ matching the OCX_COM_LINK(0..2)_CTL[ID] with OCX_COM_LINK(0..2)_CTL[VALID] bit set.
+ Transaction has been dropped. Should not occur during normal operation. This may indicate
+ a software/configuration failure and may be considered fatal. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_int_s cn; */
+};
+typedef union bdk_ocx_com_int bdk_ocx_com_int_t;
+
+#define BDK_OCX_COM_INT BDK_OCX_COM_INT_FUNC()
+static inline uint64_t BDK_OCX_COM_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_INT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000100ll;
+ __bdk_csr_fatal("OCX_COM_INT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_INT bdk_ocx_com_int_t
+#define bustype_BDK_OCX_COM_INT BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_INT "OCX_COM_INT"
+#define device_bar_BDK_OCX_COM_INT 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_INT 0
+#define arguments_BDK_OCX_COM_INT -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_int_ena_w1c
+ *
+ * OCX COM Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_ocx_com_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_ocx_com_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t io_badid : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for OCX_COM_INT[IO_BADID]. */
+ uint64_t mem_badid : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for OCX_COM_INT[MEM_BADID]. */
+ uint64_t copr_badid : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for OCX_COM_INT[COPR_BADID]. */
+ uint64_t win_req_badid : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for OCX_COM_INT[WIN_REQ_BADID]. */
+ uint64_t win_req_tout : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for OCX_COM_INT[WIN_REQ_TOUT]. */
+ uint64_t win_req_xmit : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for OCX_COM_INT[WIN_REQ_XMIT]. */
+ uint64_t win_rsp : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for OCX_COM_INT[WIN_RSP]. */
+ uint64_t reserved_24_47 : 24;
+ uint64_t rx_lane : 24; /**< [ 23: 0](R/W1C/H) Reads or clears enable for OCX_COM_INT[RX_LANE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_lane : 24; /**< [ 23: 0](R/W1C/H) Reads or clears enable for OCX_COM_INT[RX_LANE]. */
+ uint64_t reserved_24_47 : 24;
+ uint64_t win_rsp : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for OCX_COM_INT[WIN_RSP]. */
+ uint64_t win_req_xmit : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for OCX_COM_INT[WIN_REQ_XMIT]. */
+ uint64_t win_req_tout : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for OCX_COM_INT[WIN_REQ_TOUT]. */
+ uint64_t win_req_badid : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for OCX_COM_INT[WIN_REQ_BADID]. */
+ uint64_t copr_badid : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for OCX_COM_INT[COPR_BADID]. */
+ uint64_t mem_badid : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for OCX_COM_INT[MEM_BADID]. */
+ uint64_t io_badid : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for OCX_COM_INT[IO_BADID]. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_int_ena_w1c_s cn; */
+};
+typedef union bdk_ocx_com_int_ena_w1c bdk_ocx_com_int_ena_w1c_t;
+
+#define BDK_OCX_COM_INT_ENA_W1C BDK_OCX_COM_INT_ENA_W1C_FUNC()
+static inline uint64_t BDK_OCX_COM_INT_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_INT_ENA_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000118ll;
+ __bdk_csr_fatal("OCX_COM_INT_ENA_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_INT_ENA_W1C bdk_ocx_com_int_ena_w1c_t
+#define bustype_BDK_OCX_COM_INT_ENA_W1C BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_INT_ENA_W1C "OCX_COM_INT_ENA_W1C"
+#define device_bar_BDK_OCX_COM_INT_ENA_W1C 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_INT_ENA_W1C 0
+#define arguments_BDK_OCX_COM_INT_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_int_ena_w1s
+ *
+ * OCX COM Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_ocx_com_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_ocx_com_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t io_badid : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for OCX_COM_INT[IO_BADID]. */
+ uint64_t mem_badid : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for OCX_COM_INT[MEM_BADID]. */
+ uint64_t copr_badid : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for OCX_COM_INT[COPR_BADID]. */
+ uint64_t win_req_badid : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for OCX_COM_INT[WIN_REQ_BADID]. */
+ uint64_t win_req_tout : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for OCX_COM_INT[WIN_REQ_TOUT]. */
+ uint64_t win_req_xmit : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for OCX_COM_INT[WIN_REQ_XMIT]. */
+ uint64_t win_rsp : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for OCX_COM_INT[WIN_RSP]. */
+ uint64_t reserved_24_47 : 24;
+ uint64_t rx_lane : 24; /**< [ 23: 0](R/W1S/H) Reads or sets enable for OCX_COM_INT[RX_LANE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_lane : 24; /**< [ 23: 0](R/W1S/H) Reads or sets enable for OCX_COM_INT[RX_LANE]. */
+ uint64_t reserved_24_47 : 24;
+ uint64_t win_rsp : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for OCX_COM_INT[WIN_RSP]. */
+ uint64_t win_req_xmit : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for OCX_COM_INT[WIN_REQ_XMIT]. */
+ uint64_t win_req_tout : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for OCX_COM_INT[WIN_REQ_TOUT]. */
+ uint64_t win_req_badid : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for OCX_COM_INT[WIN_REQ_BADID]. */
+ uint64_t copr_badid : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for OCX_COM_INT[COPR_BADID]. */
+ uint64_t mem_badid : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for OCX_COM_INT[MEM_BADID]. */
+ uint64_t io_badid : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for OCX_COM_INT[IO_BADID]. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_int_ena_w1s_s cn; */
+};
+typedef union bdk_ocx_com_int_ena_w1s bdk_ocx_com_int_ena_w1s_t;
+
+#define BDK_OCX_COM_INT_ENA_W1S BDK_OCX_COM_INT_ENA_W1S_FUNC()
+static inline uint64_t BDK_OCX_COM_INT_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_INT_ENA_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000110ll;
+ __bdk_csr_fatal("OCX_COM_INT_ENA_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_INT_ENA_W1S bdk_ocx_com_int_ena_w1s_t
+#define bustype_BDK_OCX_COM_INT_ENA_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_INT_ENA_W1S "OCX_COM_INT_ENA_W1S"
+#define device_bar_BDK_OCX_COM_INT_ENA_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_INT_ENA_W1S 0
+#define arguments_BDK_OCX_COM_INT_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_int_w1s
+ *
+ * OCX COM Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_ocx_com_int_w1s
+{
+ uint64_t u;
+ struct bdk_ocx_com_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t io_badid : 1; /**< [ 54: 54](R/W1S/H) Reads or sets OCX_COM_INT[IO_BADID]. */
+ uint64_t mem_badid : 1; /**< [ 53: 53](R/W1S/H) Reads or sets OCX_COM_INT[MEM_BADID]. */
+ uint64_t copr_badid : 1; /**< [ 52: 52](R/W1S/H) Reads or sets OCX_COM_INT[COPR_BADID]. */
+ uint64_t win_req_badid : 1; /**< [ 51: 51](R/W1S/H) Reads or sets OCX_COM_INT[WIN_REQ_BADID]. */
+ uint64_t win_req_tout : 1; /**< [ 50: 50](R/W1S/H) Reads or sets OCX_COM_INT[WIN_REQ_TOUT]. */
+ uint64_t win_req_xmit : 1; /**< [ 49: 49](R/W1S/H) Reads or sets OCX_COM_INT[WIN_REQ_XMIT]. */
+ uint64_t win_rsp : 1; /**< [ 48: 48](R/W1S/H) Reads or sets OCX_COM_INT[WIN_RSP]. */
+ uint64_t reserved_24_47 : 24;
+ uint64_t rx_lane : 24; /**< [ 23: 0](R/W1S/H) Reads or sets OCX_COM_INT[RX_LANE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_lane : 24; /**< [ 23: 0](R/W1S/H) Reads or sets OCX_COM_INT[RX_LANE]. */
+ uint64_t reserved_24_47 : 24;
+ uint64_t win_rsp : 1; /**< [ 48: 48](R/W1S/H) Reads or sets OCX_COM_INT[WIN_RSP]. */
+ uint64_t win_req_xmit : 1; /**< [ 49: 49](R/W1S/H) Reads or sets OCX_COM_INT[WIN_REQ_XMIT]. */
+ uint64_t win_req_tout : 1; /**< [ 50: 50](R/W1S/H) Reads or sets OCX_COM_INT[WIN_REQ_TOUT]. */
+ uint64_t win_req_badid : 1; /**< [ 51: 51](R/W1S/H) Reads or sets OCX_COM_INT[WIN_REQ_BADID]. */
+ uint64_t copr_badid : 1; /**< [ 52: 52](R/W1S/H) Reads or sets OCX_COM_INT[COPR_BADID]. */
+ uint64_t mem_badid : 1; /**< [ 53: 53](R/W1S/H) Reads or sets OCX_COM_INT[MEM_BADID]. */
+ uint64_t io_badid : 1; /**< [ 54: 54](R/W1S/H) Reads or sets OCX_COM_INT[IO_BADID]. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_int_w1s_s cn; */
+};
+typedef union bdk_ocx_com_int_w1s bdk_ocx_com_int_w1s_t;
+
+#define BDK_OCX_COM_INT_W1S BDK_OCX_COM_INT_W1S_FUNC()
+static inline uint64_t BDK_OCX_COM_INT_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_INT_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000108ll;
+ __bdk_csr_fatal("OCX_COM_INT_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_INT_W1S bdk_ocx_com_int_w1s_t
+#define bustype_BDK_OCX_COM_INT_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_INT_W1S "OCX_COM_INT_W1S"
+#define device_bar_BDK_OCX_COM_INT_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_INT_W1S 0
+#define arguments_BDK_OCX_COM_INT_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_link#_ctl
+ *
+ * OCX COM Link Control Registers
+ * This register controls link operations. In addition, the combination of some of
+ * these conditions are used to generate the link_down status used by the L2C_OCI_CTL[SHTOEN] and
+ * as a reset condition controlled by RST_OCX[RST_LINK]. This link_down status is true when one
+ * of the following occurs:
+ *
+ * * Link is not initialized (see description of [UP]).
+ * * Retry counter expired (see OCX_COM_LINK_TIMER and OCX_COM_LINK()_INT[STOP].
+ * * Receive REINIT request from Link Partner (See description of [REINIT]).
+ * * Detected uncorrectable ECC error while reading the transmit FIFOs (see
+ * OCX_COM_LINK(0..2)_INT[TXFIFO_DBE]).
+ * * Detected uncorrectable ECC error while reading the replay buffer (see
+ * OCX_COM_LINK(0..2)_INT[REPLAY_DBE]).
+ */
+union bdk_ocx_com_linkx_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_com_linkx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t cclk_dis : 1; /**< [ 9: 9](R/W) Reserved.
+ Internal:
+ Disable conditional clocking. Set to force link clocks on
+ unconditionally. */
+ uint64_t loopback : 1; /**< [ 8: 8](R/W) Reserved.
+ Internal:
+ Diagnostic data loopback. Set to force outgoing link to inbound port.
+ All data and link credits are returned and appear to come from link partner. Typically
+ SerDes should be disabled during this operation. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W) Reinitialize link. Setting this bit forces link back into init state and sets [DROP].
+ Setting the bit also causes the link to transmit a REINIT request to the link partner.
+ This bit must be cleared for link to operate normally. */
+ uint64_t reserved_6 : 1;
+ uint64_t auto_clr : 1; /**< [ 5: 5](R/W) When set, automatically clears the local DROP bit if link partner forces
+ a reinitialization. Typically disabled once software is running.
+ If clear, software must manage clearing [DROP] after it has verified
+ that any pending transactions have timed out. */
+ uint64_t drop : 1; /**< [ 4: 4](R/W/H) Drop all requests on given link. Typically set by hardware when link has failed or been
+ reinitialized. Cleared by software once pending link traffic is removed. (See
+ OCX_TLK(0..2)_FIFO(0..13)_CNT.) */
+ uint64_t up : 1; /**< [ 3: 3](RO/H) Link is operating normally and exchanging control information. */
+ uint64_t valid : 1; /**< [ 2: 2](RO/H) Link has valid lanes and is exchanging information. This bit will never be set if
+ OCX_LNK(0..2)_CFG[QLM_SELECT] is zero. */
+ uint64_t id : 2; /**< [ 1: 0](R/W) This ID is used to sort traffic by link. If more than one link has the same value, the
+ OCX_COM_DUAL_SORT[SORT] field and traffic VC are used to choose a link. This field is only
+ reset during a cold reset to an arbitrary value to avoid conflicts with the
+ OCX_COM_NODE[ID] field and should be configured by software before memory traffic is
+ generated. */
+#else /* Word 0 - Little Endian */
+ uint64_t id : 2; /**< [ 1: 0](R/W) This ID is used to sort traffic by link. If more than one link has the same value, the
+ OCX_COM_DUAL_SORT[SORT] field and traffic VC are used to choose a link. This field is only
+ reset during a cold reset to an arbitrary value to avoid conflicts with the
+ OCX_COM_NODE[ID] field and should be configured by software before memory traffic is
+ generated. */
+ uint64_t valid : 1; /**< [ 2: 2](RO/H) Link has valid lanes and is exchanging information. This bit will never be set if
+ OCX_LNK(0..2)_CFG[QLM_SELECT] is zero. */
+ uint64_t up : 1; /**< [ 3: 3](RO/H) Link is operating normally and exchanging control information. */
+ uint64_t drop : 1; /**< [ 4: 4](R/W/H) Drop all requests on given link. Typically set by hardware when link has failed or been
+ reinitialized. Cleared by software once pending link traffic is removed. (See
+ OCX_TLK(0..2)_FIFO(0..13)_CNT.) */
+ uint64_t auto_clr : 1; /**< [ 5: 5](R/W) When set, automatically clears the local DROP bit if link partner forces
+ a reinitialization. Typically disabled once software is running.
+ If clear, software must manage clearing [DROP] after it has verified
+ that any pending transactions have timed out. */
+ uint64_t reserved_6 : 1;
+ uint64_t reinit : 1; /**< [ 7: 7](R/W) Reinitialize link. Setting this bit forces link back into init state and sets [DROP].
+ Setting the bit also causes the link to transmit a REINIT request to the link partner.
+ This bit must be cleared for link to operate normally. */
+ uint64_t loopback : 1; /**< [ 8: 8](R/W) Reserved.
+ Internal:
+ Diagnostic data loopback. Set to force outgoing link to inbound port.
+ All data and link credits are returned and appear to come from link partner. Typically
+ SerDes should be disabled during this operation. */
+ uint64_t cclk_dis : 1; /**< [ 9: 9](R/W) Reserved.
+ Internal:
+ Disable conditional clocking. Set to force link clocks on
+ unconditionally. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_linkx_ctl_s cn; */
+};
+typedef union bdk_ocx_com_linkx_ctl bdk_ocx_com_linkx_ctl_t;
+
+static inline uint64_t BDK_OCX_COM_LINKX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_LINKX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011000020ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_COM_LINKX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_LINKX_CTL(a) bdk_ocx_com_linkx_ctl_t
+#define bustype_BDK_OCX_COM_LINKX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_LINKX_CTL(a) "OCX_COM_LINKX_CTL"
+#define device_bar_BDK_OCX_COM_LINKX_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_LINKX_CTL(a) (a)
+#define arguments_BDK_OCX_COM_LINKX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_link#_int
+ *
+ * OCX COM Link Interrupt Register
+ */
+union bdk_ocx_com_linkx_int
+{
+ uint64_t u;
+ struct bdk_ocx_com_linkx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t bad_word : 1; /**< [ 13: 13](R/W1C/H) Illegal word decoded on at least one lane of link. These receive errors may occur during
+ normal operation, and may likely occur during link bringup. Hardware normally will
+ automatically correct the error. Software may choose to count the number of these errors. */
+ uint64_t align_fail : 1; /**< [ 12: 12](R/W1C/H) Link lanes failed to align. These receive errors may occur during normal operation, and
+ may likely occur during link bringup. Hardware normally will automatically correct the
+ error. Software may choose to count the number of these errors. */
+ uint64_t align_done : 1; /**< [ 11: 11](R/W1C/H) Link lane alignment is complete. These may occur during normal operation, and will occur
+ during link bringup. Software should disable reception of these interrupts during normal
+ operation. */
+ uint64_t up : 1; /**< [ 10: 10](R/W1C/H) Link initialization is complete and is ready to pass traffic. Note: This state occurs
+ some time after the link starts exchanging information as indicated in
+ OCX_COM_LINK(0..2)_CTL[UP]. These should not occur during normal operation. */
+ uint64_t stop : 1; /**< [ 9: 9](R/W1C/H) Link has stopped operating. Link retry count has reached threshold specified in
+ OCX_COM_LINK_TIMER; outgoing traffic has been dropped and an initialization request has
+ been reissued. These should not occur during normal operation. This may be considered
+ fatal. */
+ uint64_t blk_err : 1; /**< [ 8: 8](R/W1C/H) Link block error count has reached threshold specified in OCX_RLK(0..2)_BLK_ERR[LIMIT].
+ These receive errors may occur during normal operation. Hardware normally will
+ automatically correct the error. Software may choose to count the number of these errors. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W1C/H) Link has received a initialization request from link partner after link has been
+ established. These should not occur during normal operation */
+ uint64_t lnk_data : 1; /**< [ 6: 6](R/W1C/H) Set by hardware when a link data block is received in OCX_RLK(0..2)_LNK_DATA. It
+ software's responsibility to clear the bit after reading the data. */
+ uint64_t rxfifo_dbe : 1; /**< [ 5: 5](R/W1C/H) Double-bit error detected in FIFO RAMs. This error may be considered fatal. */
+ uint64_t rxfifo_sbe : 1; /**< [ 4: 4](R/W1C/H) Single-bit error detected/corrected in FIFO RAMs. Hardware automatically corrected the
+ error. Software may choose to count the number of these single-bit errors. */
+ uint64_t txfifo_dbe : 1; /**< [ 3: 3](R/W1C/H) Double-bit error detected in TX FIFO RAMs. This error may be considered fatal. */
+ uint64_t txfifo_sbe : 1; /**< [ 2: 2](R/W1C/H) Single-bit error detected/corrected in TX FIFO RAMs. Hardware automatically corrected the
+ error. Software may choose to count the number of these single-bit errors. */
+ uint64_t replay_dbe : 1; /**< [ 1: 1](R/W1C/H) Double-bit error detected in REPLAY BUFFER RAMs. This error may be considered fatal. */
+ uint64_t replay_sbe : 1; /**< [ 0: 0](R/W1C/H) Single-bit error detected/corrected in REPLAY BUFFER RAMs. Hardware automatically
+ corrected the error. Software may choose to count the number of these single-bit errors. */
+#else /* Word 0 - Little Endian */
+ uint64_t replay_sbe : 1; /**< [ 0: 0](R/W1C/H) Single-bit error detected/corrected in REPLAY BUFFER RAMs. Hardware automatically
+ corrected the error. Software may choose to count the number of these single-bit errors. */
+ uint64_t replay_dbe : 1; /**< [ 1: 1](R/W1C/H) Double-bit error detected in REPLAY BUFFER RAMs. This error may be considered fatal. */
+ uint64_t txfifo_sbe : 1; /**< [ 2: 2](R/W1C/H) Single-bit error detected/corrected in TX FIFO RAMs. Hardware automatically corrected the
+ error. Software may choose to count the number of these single-bit errors. */
+ uint64_t txfifo_dbe : 1; /**< [ 3: 3](R/W1C/H) Double-bit error detected in TX FIFO RAMs. This error may be considered fatal. */
+ uint64_t rxfifo_sbe : 1; /**< [ 4: 4](R/W1C/H) Single-bit error detected/corrected in FIFO RAMs. Hardware automatically corrected the
+ error. Software may choose to count the number of these single-bit errors. */
+ uint64_t rxfifo_dbe : 1; /**< [ 5: 5](R/W1C/H) Double-bit error detected in FIFO RAMs. This error may be considered fatal. */
+ uint64_t lnk_data : 1; /**< [ 6: 6](R/W1C/H) Set by hardware when a link data block is received in OCX_RLK(0..2)_LNK_DATA. It
+ software's responsibility to clear the bit after reading the data. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W1C/H) Link has received a initialization request from link partner after link has been
+ established. These should not occur during normal operation */
+ uint64_t blk_err : 1; /**< [ 8: 8](R/W1C/H) Link block error count has reached threshold specified in OCX_RLK(0..2)_BLK_ERR[LIMIT].
+ These receive errors may occur during normal operation. Hardware normally will
+ automatically correct the error. Software may choose to count the number of these errors. */
+ uint64_t stop : 1; /**< [ 9: 9](R/W1C/H) Link has stopped operating. Link retry count has reached threshold specified in
+ OCX_COM_LINK_TIMER; outgoing traffic has been dropped and an initialization request has
+ been reissued. These should not occur during normal operation. This may be considered
+ fatal. */
+ uint64_t up : 1; /**< [ 10: 10](R/W1C/H) Link initialization is complete and is ready to pass traffic. Note: This state occurs
+ some time after the link starts exchanging information as indicated in
+ OCX_COM_LINK(0..2)_CTL[UP]. These should not occur during normal operation. */
+ uint64_t align_done : 1; /**< [ 11: 11](R/W1C/H) Link lane alignment is complete. These may occur during normal operation, and will occur
+ during link bringup. Software should disable reception of these interrupts during normal
+ operation. */
+ uint64_t align_fail : 1; /**< [ 12: 12](R/W1C/H) Link lanes failed to align. These receive errors may occur during normal operation, and
+ may likely occur during link bringup. Hardware normally will automatically correct the
+ error. Software may choose to count the number of these errors. */
+ uint64_t bad_word : 1; /**< [ 13: 13](R/W1C/H) Illegal word decoded on at least one lane of link. These receive errors may occur during
+ normal operation, and may likely occur during link bringup. Hardware normally will
+ automatically correct the error. Software may choose to count the number of these errors. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_linkx_int_s cn; */
+};
+typedef union bdk_ocx_com_linkx_int bdk_ocx_com_linkx_int_t;
+
+static inline uint64_t BDK_OCX_COM_LINKX_INT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_LINKX_INT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011000120ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_COM_LINKX_INT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_LINKX_INT(a) bdk_ocx_com_linkx_int_t
+#define bustype_BDK_OCX_COM_LINKX_INT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_LINKX_INT(a) "OCX_COM_LINKX_INT"
+#define device_bar_BDK_OCX_COM_LINKX_INT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_LINKX_INT(a) (a)
+#define arguments_BDK_OCX_COM_LINKX_INT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_link#_int_ena_w1c
+ *
+ * OCX COM Link Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_ocx_com_linkx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_ocx_com_linkx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t bad_word : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[BAD_WORD]. */
+ uint64_t align_fail : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[ALIGN_FAIL]. */
+ uint64_t align_done : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[ALIGN_DONE]. */
+ uint64_t up : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[UP]. */
+ uint64_t stop : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[STOP]. */
+ uint64_t blk_err : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[BLK_ERR]. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[REINIT]. */
+ uint64_t lnk_data : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[LNK_DATA]. */
+ uint64_t rxfifo_dbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[RXFIFO_DBE]. */
+ uint64_t rxfifo_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[RXFIFO_SBE]. */
+ uint64_t txfifo_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[TXFIFO_DBE]. */
+ uint64_t txfifo_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[TXFIFO_SBE]. */
+ uint64_t replay_dbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[REPLAY_DBE]. */
+ uint64_t replay_sbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[REPLAY_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t replay_sbe : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[REPLAY_SBE]. */
+ uint64_t replay_dbe : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[REPLAY_DBE]. */
+ uint64_t txfifo_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[TXFIFO_SBE]. */
+ uint64_t txfifo_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[TXFIFO_DBE]. */
+ uint64_t rxfifo_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[RXFIFO_SBE]. */
+ uint64_t rxfifo_dbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[RXFIFO_DBE]. */
+ uint64_t lnk_data : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[LNK_DATA]. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[REINIT]. */
+ uint64_t blk_err : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[BLK_ERR]. */
+ uint64_t stop : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[STOP]. */
+ uint64_t up : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[UP]. */
+ uint64_t align_done : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[ALIGN_DONE]. */
+ uint64_t align_fail : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[ALIGN_FAIL]. */
+ uint64_t bad_word : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for OCX_COM_LINK(0..2)_INT[BAD_WORD]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_linkx_int_ena_w1c_s cn; */
+};
+typedef union bdk_ocx_com_linkx_int_ena_w1c bdk_ocx_com_linkx_int_ena_w1c_t;
+
+static inline uint64_t BDK_OCX_COM_LINKX_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_LINKX_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011000180ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_COM_LINKX_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_LINKX_INT_ENA_W1C(a) bdk_ocx_com_linkx_int_ena_w1c_t
+#define bustype_BDK_OCX_COM_LINKX_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_LINKX_INT_ENA_W1C(a) "OCX_COM_LINKX_INT_ENA_W1C"
+#define device_bar_BDK_OCX_COM_LINKX_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_LINKX_INT_ENA_W1C(a) (a)
+#define arguments_BDK_OCX_COM_LINKX_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_link#_int_ena_w1s
+ *
+ * OCX COM Link Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_ocx_com_linkx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_ocx_com_linkx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t bad_word : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[BAD_WORD]. */
+ uint64_t align_fail : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[ALIGN_FAIL]. */
+ uint64_t align_done : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[ALIGN_DONE]. */
+ uint64_t up : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[UP]. */
+ uint64_t stop : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[STOP]. */
+ uint64_t blk_err : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[BLK_ERR]. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[REINIT]. */
+ uint64_t lnk_data : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[LNK_DATA]. */
+ uint64_t rxfifo_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[RXFIFO_DBE]. */
+ uint64_t rxfifo_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[RXFIFO_SBE]. */
+ uint64_t txfifo_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[TXFIFO_DBE]. */
+ uint64_t txfifo_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[TXFIFO_SBE]. */
+ uint64_t replay_dbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[REPLAY_DBE]. */
+ uint64_t replay_sbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[REPLAY_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t replay_sbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[REPLAY_SBE]. */
+ uint64_t replay_dbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[REPLAY_DBE]. */
+ uint64_t txfifo_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[TXFIFO_SBE]. */
+ uint64_t txfifo_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[TXFIFO_DBE]. */
+ uint64_t rxfifo_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[RXFIFO_SBE]. */
+ uint64_t rxfifo_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[RXFIFO_DBE]. */
+ uint64_t lnk_data : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[LNK_DATA]. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[REINIT]. */
+ uint64_t blk_err : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[BLK_ERR]. */
+ uint64_t stop : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[STOP]. */
+ uint64_t up : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[UP]. */
+ uint64_t align_done : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[ALIGN_DONE]. */
+ uint64_t align_fail : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[ALIGN_FAIL]. */
+ uint64_t bad_word : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for OCX_COM_LINK(0..2)_INT[BAD_WORD]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_linkx_int_ena_w1s_s cn; */
+};
+typedef union bdk_ocx_com_linkx_int_ena_w1s bdk_ocx_com_linkx_int_ena_w1s_t;
+
+static inline uint64_t BDK_OCX_COM_LINKX_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_LINKX_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011000160ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_COM_LINKX_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_LINKX_INT_ENA_W1S(a) bdk_ocx_com_linkx_int_ena_w1s_t
+#define bustype_BDK_OCX_COM_LINKX_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_LINKX_INT_ENA_W1S(a) "OCX_COM_LINKX_INT_ENA_W1S"
+#define device_bar_BDK_OCX_COM_LINKX_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_LINKX_INT_ENA_W1S(a) (a)
+#define arguments_BDK_OCX_COM_LINKX_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_link#_int_w1s
+ *
+ * OCX COM Link Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_ocx_com_linkx_int_w1s
+{
+ uint64_t u;
+ struct bdk_ocx_com_linkx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t bad_word : 1; /**< [ 13: 13](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[BAD_WORD]. */
+ uint64_t align_fail : 1; /**< [ 12: 12](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[ALIGN_FAIL]. */
+ uint64_t align_done : 1; /**< [ 11: 11](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[ALIGN_DONE]. */
+ uint64_t up : 1; /**< [ 10: 10](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[UP]. */
+ uint64_t stop : 1; /**< [ 9: 9](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[STOP]. */
+ uint64_t blk_err : 1; /**< [ 8: 8](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[BLK_ERR]. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[REINIT]. */
+ uint64_t lnk_data : 1; /**< [ 6: 6](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[LNK_DATA]. */
+ uint64_t rxfifo_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[RXFIFO_DBE]. */
+ uint64_t rxfifo_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[RXFIFO_SBE]. */
+ uint64_t txfifo_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[TXFIFO_DBE]. */
+ uint64_t txfifo_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[TXFIFO_SBE]. */
+ uint64_t replay_dbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[REPLAY_DBE]. */
+ uint64_t replay_sbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[REPLAY_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t replay_sbe : 1; /**< [ 0: 0](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[REPLAY_SBE]. */
+ uint64_t replay_dbe : 1; /**< [ 1: 1](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[REPLAY_DBE]. */
+ uint64_t txfifo_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[TXFIFO_SBE]. */
+ uint64_t txfifo_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[TXFIFO_DBE]. */
+ uint64_t rxfifo_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[RXFIFO_SBE]. */
+ uint64_t rxfifo_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[RXFIFO_DBE]. */
+ uint64_t lnk_data : 1; /**< [ 6: 6](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[LNK_DATA]. */
+ uint64_t reinit : 1; /**< [ 7: 7](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[REINIT]. */
+ uint64_t blk_err : 1; /**< [ 8: 8](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[BLK_ERR]. */
+ uint64_t stop : 1; /**< [ 9: 9](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[STOP]. */
+ uint64_t up : 1; /**< [ 10: 10](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[UP]. */
+ uint64_t align_done : 1; /**< [ 11: 11](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[ALIGN_DONE]. */
+ uint64_t align_fail : 1; /**< [ 12: 12](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[ALIGN_FAIL]. */
+ uint64_t bad_word : 1; /**< [ 13: 13](R/W1S/H) Reads or sets OCX_COM_LINK(0..2)_INT[BAD_WORD]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_linkx_int_w1s_s cn; */
+};
+typedef union bdk_ocx_com_linkx_int_w1s bdk_ocx_com_linkx_int_w1s_t;
+
+static inline uint64_t BDK_OCX_COM_LINKX_INT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_LINKX_INT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011000140ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_COM_LINKX_INT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_LINKX_INT_W1S(a) bdk_ocx_com_linkx_int_w1s_t
+#define bustype_BDK_OCX_COM_LINKX_INT_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_LINKX_INT_W1S(a) "OCX_COM_LINKX_INT_W1S"
+#define device_bar_BDK_OCX_COM_LINKX_INT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_LINKX_INT_W1S(a) (a)
+#define arguments_BDK_OCX_COM_LINKX_INT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_link_timer
+ *
+ * OCX COM Link Timer Register
+ */
+union bdk_ocx_com_link_timer
+{
+ uint64_t u;
+ struct bdk_ocx_com_link_timer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t tout : 24; /**< [ 23: 0](R/W) Indicates the number of unacknowledged retry requests issued before link stops
+ operation and OCX_COM_LINK()_INT[STOP] is asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t tout : 24; /**< [ 23: 0](R/W) Indicates the number of unacknowledged retry requests issued before link stops
+ operation and OCX_COM_LINK()_INT[STOP] is asserted. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_link_timer_s cn; */
+};
+typedef union bdk_ocx_com_link_timer bdk_ocx_com_link_timer_t;
+
+#define BDK_OCX_COM_LINK_TIMER BDK_OCX_COM_LINK_TIMER_FUNC()
+static inline uint64_t BDK_OCX_COM_LINK_TIMER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_LINK_TIMER_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000010ll;
+ __bdk_csr_fatal("OCX_COM_LINK_TIMER", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_LINK_TIMER bdk_ocx_com_link_timer_t
+#define bustype_BDK_OCX_COM_LINK_TIMER BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_LINK_TIMER "OCX_COM_LINK_TIMER"
+#define device_bar_BDK_OCX_COM_LINK_TIMER 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_LINK_TIMER 0
+#define arguments_BDK_OCX_COM_LINK_TIMER -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_com_node
+ *
+ * OCX COM Node Register
+ */
+union bdk_ocx_com_node
+{
+ uint64_t u;
+ struct bdk_ocx_com_node_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t fixed_pin : 1; /**< [ 3: 3](RO/H) The current value of the OCI_FIXED_NODE pin. */
+ uint64_t fixed : 1; /**< [ 2: 2](R/W) ID valid associated with the chip. This register is used by the link
+ initialization software to help assign IDs and is transmitted over CCPI. The
+ [FIXED] field set during a cold reset to the value of the OCI_FIXED_NODE
+ pin. The value is also readable in the OCX_LNE()_STS_MSG[TX_META_DAT]\<2\> for
+ each lane. The [FIXED] field of the link partner can be examined by locally
+ reading the OCX_LNE()_STS_MSG[RX_META_DAT]\<2\> on each valid lane or remotely
+ reading the OCX_COM_NODE[FIXED] on the link partner. */
+ uint64_t id : 2; /**< [ 1: 0](R/W) Node ID associated with the chip. This register is used by the rest of the chip
+ to determine what traffic is transmitted over CCPI. The value should not match
+ the OCX_COM_LINK()_CTL[ID] of any active link. The ID field is set during a cold
+ reset to the value of the OCI_NODE_ID pins. The value is also readable in the
+ OCX_LNE()_STS_MSG[TX_META_DAT]\<1:0\> for each lane. The ID field of the link
+ partner can be examined by locally reading the
+ OCX_LNE()_STS_MSG[RX_META_DAT]\<1:0\> on each valid lane or remotely reading the
+ OCX_COM_NODE[ID] on the link partner. */
+#else /* Word 0 - Little Endian */
+ uint64_t id : 2; /**< [ 1: 0](R/W) Node ID associated with the chip. This register is used by the rest of the chip
+ to determine what traffic is transmitted over CCPI. The value should not match
+ the OCX_COM_LINK()_CTL[ID] of any active link. The ID field is set during a cold
+ reset to the value of the OCI_NODE_ID pins. The value is also readable in the
+ OCX_LNE()_STS_MSG[TX_META_DAT]\<1:0\> for each lane. The ID field of the link
+ partner can be examined by locally reading the
+ OCX_LNE()_STS_MSG[RX_META_DAT]\<1:0\> on each valid lane or remotely reading the
+ OCX_COM_NODE[ID] on the link partner. */
+ uint64_t fixed : 1; /**< [ 2: 2](R/W) ID valid associated with the chip. This register is used by the link
+ initialization software to help assign IDs and is transmitted over CCPI. The
+ [FIXED] field set during a cold reset to the value of the OCI_FIXED_NODE
+ pin. The value is also readable in the OCX_LNE()_STS_MSG[TX_META_DAT]\<2\> for
+ each lane. The [FIXED] field of the link partner can be examined by locally
+ reading the OCX_LNE()_STS_MSG[RX_META_DAT]\<2\> on each valid lane or remotely
+ reading the OCX_COM_NODE[FIXED] on the link partner. */
+ uint64_t fixed_pin : 1; /**< [ 3: 3](RO/H) The current value of the OCI_FIXED_NODE pin. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_com_node_s cn; */
+};
+typedef union bdk_ocx_com_node bdk_ocx_com_node_t;
+
+#define BDK_OCX_COM_NODE BDK_OCX_COM_NODE_FUNC()
+static inline uint64_t BDK_OCX_COM_NODE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_COM_NODE_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000000ll;
+ __bdk_csr_fatal("OCX_COM_NODE", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_COM_NODE bdk_ocx_com_node_t
+#define bustype_BDK_OCX_COM_NODE BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_COM_NODE "OCX_COM_NODE"
+#define device_bar_BDK_OCX_COM_NODE 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_COM_NODE 0
+#define arguments_BDK_OCX_COM_NODE -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_dll#_status
+ *
+ * OCX DLL Observability Registers
+ * These registers provides the parameters for DLL observability. Index 0 is the northeast DLL,
+ * index 1 the southeast DLL.
+ */
+union bdk_ocx_dllx_status
+{
+ uint64_t u;
+ struct bdk_ocx_dllx_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t max_dll_setting : 12; /**< [ 59: 48](RO/H) Max reported DLL setting. */
+ uint64_t min_dll_setting : 12; /**< [ 47: 36](RO/H) Min reported DLL setting. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 35: 35](RO/H) Phase detector output. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 34: 34](RO/H) Phase detector output. */
+ uint64_t pdr_rclk_refclk : 1; /**< [ 33: 33](RO/H) Phase detector output. */
+ uint64_t reserved_32 : 1;
+ uint64_t dly_elem_enable : 16; /**< [ 31: 16](RO/H) Delay element enable. */
+ uint64_t dll_setting : 12; /**< [ 15: 4](RO/H) DLL setting. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t dll_lock : 1; /**< [ 0: 0](RO/H) DLL lock: 1 = locked, 0 = unlocked. */
+#else /* Word 0 - Little Endian */
+ uint64_t dll_lock : 1; /**< [ 0: 0](RO/H) DLL lock: 1 = locked, 0 = unlocked. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t dll_setting : 12; /**< [ 15: 4](RO/H) DLL setting. */
+ uint64_t dly_elem_enable : 16; /**< [ 31: 16](RO/H) Delay element enable. */
+ uint64_t reserved_32 : 1;
+ uint64_t pdr_rclk_refclk : 1; /**< [ 33: 33](RO/H) Phase detector output. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 34: 34](RO/H) Phase detector output. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 35: 35](RO/H) Phase detector output. */
+ uint64_t min_dll_setting : 12; /**< [ 47: 36](RO/H) Min reported DLL setting. */
+ uint64_t max_dll_setting : 12; /**< [ 59: 48](RO/H) Max reported DLL setting. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_dllx_status_s cn; */
+};
+typedef union bdk_ocx_dllx_status bdk_ocx_dllx_status_t;
+
+static inline uint64_t BDK_OCX_DLLX_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_DLLX_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e011000080ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("OCX_DLLX_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_DLLX_STATUS(a) bdk_ocx_dllx_status_t
+#define bustype_BDK_OCX_DLLX_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_DLLX_STATUS(a) "OCX_DLLX_STATUS"
+#define device_bar_BDK_OCX_DLLX_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_DLLX_STATUS(a) (a)
+#define arguments_BDK_OCX_DLLX_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_frc#_stat0
+ *
+ * OCX FRC 0-5 Statistics Registers 0
+ */
+union bdk_ocx_frcx_stat0
+{
+ uint64_t u;
+ struct bdk_ocx_frcx_stat0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t align_cnt : 21; /**< [ 20: 0](R/W/H) Indicates the number of alignment sequences received (i.e. those that do not violate the
+ current alignment). */
+#else /* Word 0 - Little Endian */
+ uint64_t align_cnt : 21; /**< [ 20: 0](R/W/H) Indicates the number of alignment sequences received (i.e. those that do not violate the
+ current alignment). */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_frcx_stat0_s cn; */
+};
+typedef union bdk_ocx_frcx_stat0 bdk_ocx_frcx_stat0_t;
+
+static inline uint64_t BDK_OCX_FRCX_STAT0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_FRCX_STAT0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e01100fa00ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCX_FRCX_STAT0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_FRCX_STAT0(a) bdk_ocx_frcx_stat0_t
+#define bustype_BDK_OCX_FRCX_STAT0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_FRCX_STAT0(a) "OCX_FRCX_STAT0"
+#define device_bar_BDK_OCX_FRCX_STAT0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_FRCX_STAT0(a) (a)
+#define arguments_BDK_OCX_FRCX_STAT0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_frc#_stat1
+ *
+ * OCX FRC 0-5 Statistics Registers 1
+ */
+union bdk_ocx_frcx_stat1
+{
+ uint64_t u;
+ struct bdk_ocx_frcx_stat1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t align_err_cnt : 21; /**< [ 20: 0](R/W/H) Indicates the number of alignment sequences received in error (i.e. those that violate the
+ current alignment). */
+#else /* Word 0 - Little Endian */
+ uint64_t align_err_cnt : 21; /**< [ 20: 0](R/W/H) Indicates the number of alignment sequences received in error (i.e. those that violate the
+ current alignment). */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_frcx_stat1_s cn; */
+};
+typedef union bdk_ocx_frcx_stat1 bdk_ocx_frcx_stat1_t;
+
+static inline uint64_t BDK_OCX_FRCX_STAT1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_FRCX_STAT1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e01100fa80ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCX_FRCX_STAT1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_FRCX_STAT1(a) bdk_ocx_frcx_stat1_t
+#define bustype_BDK_OCX_FRCX_STAT1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_FRCX_STAT1(a) "OCX_FRCX_STAT1"
+#define device_bar_BDK_OCX_FRCX_STAT1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_FRCX_STAT1(a) (a)
+#define arguments_BDK_OCX_FRCX_STAT1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_frc#_stat2
+ *
+ * OCX FRC 0-5 Statistics Registers 2
+ */
+union bdk_ocx_frcx_stat2
+{
+ uint64_t u;
+ struct bdk_ocx_frcx_stat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t align_done : 21; /**< [ 20: 0](R/W/H) Indicates the number of attempts at alignment that succeeded. */
+#else /* Word 0 - Little Endian */
+ uint64_t align_done : 21; /**< [ 20: 0](R/W/H) Indicates the number of attempts at alignment that succeeded. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_frcx_stat2_s cn; */
+};
+typedef union bdk_ocx_frcx_stat2 bdk_ocx_frcx_stat2_t;
+
+static inline uint64_t BDK_OCX_FRCX_STAT2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_FRCX_STAT2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e01100fb00ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCX_FRCX_STAT2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_FRCX_STAT2(a) bdk_ocx_frcx_stat2_t
+#define bustype_BDK_OCX_FRCX_STAT2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_FRCX_STAT2(a) "OCX_FRCX_STAT2"
+#define device_bar_BDK_OCX_FRCX_STAT2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_FRCX_STAT2(a) (a)
+#define arguments_BDK_OCX_FRCX_STAT2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_frc#_stat3
+ *
+ * OCX FRC 0-5 Statistics Registers 3
+ */
+union bdk_ocx_frcx_stat3
+{
+ uint64_t u;
+ struct bdk_ocx_frcx_stat3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t align_fail : 21; /**< [ 20: 0](R/W/H) Indicates the number of attempts at alignment that failed. */
+#else /* Word 0 - Little Endian */
+ uint64_t align_fail : 21; /**< [ 20: 0](R/W/H) Indicates the number of attempts at alignment that failed. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_frcx_stat3_s cn; */
+};
+typedef union bdk_ocx_frcx_stat3 bdk_ocx_frcx_stat3_t;
+
+static inline uint64_t BDK_OCX_FRCX_STAT3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_FRCX_STAT3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e01100fb80ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCX_FRCX_STAT3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_FRCX_STAT3(a) bdk_ocx_frcx_stat3_t
+#define bustype_BDK_OCX_FRCX_STAT3(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_FRCX_STAT3(a) "OCX_FRCX_STAT3"
+#define device_bar_BDK_OCX_FRCX_STAT3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_FRCX_STAT3(a) (a)
+#define arguments_BDK_OCX_FRCX_STAT3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_bad_cnt
+ *
+ * OCX Lane Bad Count Register
+ */
+union bdk_ocx_lnex_bad_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_bad_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t tx_bad_crc32 : 1; /**< [ 11: 11](R/W/H) Send one diagnostic word with bad CRC32 to the selected lane.
+ Injects just once. */
+ uint64_t tx_bad_6467_cnt : 5; /**< [ 10: 6](R/W/H) Send N bad 64B/67B code words on selected lane. */
+ uint64_t tx_bad_sync_cnt : 3; /**< [ 5: 3](R/W/H) Send N bad sync words on selected lane. */
+ uint64_t tx_bad_scram_cnt : 3; /**< [ 2: 0](R/W/H) Send N bad scram state on selected lane. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_bad_scram_cnt : 3; /**< [ 2: 0](R/W/H) Send N bad scram state on selected lane. */
+ uint64_t tx_bad_sync_cnt : 3; /**< [ 5: 3](R/W/H) Send N bad sync words on selected lane. */
+ uint64_t tx_bad_6467_cnt : 5; /**< [ 10: 6](R/W/H) Send N bad 64B/67B code words on selected lane. */
+ uint64_t tx_bad_crc32 : 1; /**< [ 11: 11](R/W/H) Send one diagnostic word with bad CRC32 to the selected lane.
+ Injects just once. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_bad_cnt_s cn; */
+};
+typedef union bdk_ocx_lnex_bad_cnt bdk_ocx_lnex_bad_cnt_t;
+
+static inline uint64_t BDK_OCX_LNEX_BAD_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_BAD_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008028ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_BAD_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_BAD_CNT(a) bdk_ocx_lnex_bad_cnt_t
+#define bustype_BDK_OCX_LNEX_BAD_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_BAD_CNT(a) "OCX_LNEX_BAD_CNT"
+#define device_bar_BDK_OCX_LNEX_BAD_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_BAD_CNT(a) (a)
+#define arguments_BDK_OCX_LNEX_BAD_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_cfg
+ *
+ * OCX Lane Config Register
+ */
+union bdk_ocx_lnex_cfg
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t rx_bdry_lock_dis : 1; /**< [ 8: 8](R/W) Disable word boundary lock. While disabled, received data is tossed. Once enabled,
+ received data is searched for legal two-bit patterns. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t rx_stat_wrap_dis : 1; /**< [ 2: 2](R/W) Upon overflow, a statistics counter should saturate instead of wrapping. */
+ uint64_t rx_stat_rdclr : 1; /**< [ 1: 1](R/W) CSR read to OCX_LNEx_STAT* clears the selected counter after returning its current value. */
+ uint64_t rx_stat_ena : 1; /**< [ 0: 0](R/W) Enable RX lane statistics counters. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_stat_ena : 1; /**< [ 0: 0](R/W) Enable RX lane statistics counters. */
+ uint64_t rx_stat_rdclr : 1; /**< [ 1: 1](R/W) CSR read to OCX_LNEx_STAT* clears the selected counter after returning its current value. */
+ uint64_t rx_stat_wrap_dis : 1; /**< [ 2: 2](R/W) Upon overflow, a statistics counter should saturate instead of wrapping. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t rx_bdry_lock_dis : 1; /**< [ 8: 8](R/W) Disable word boundary lock. While disabled, received data is tossed. Once enabled,
+ received data is searched for legal two-bit patterns. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_cfg_s cn; */
+};
+typedef union bdk_ocx_lnex_cfg bdk_ocx_lnex_cfg_t;
+
+static inline uint64_t BDK_OCX_LNEX_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008000ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_CFG(a) bdk_ocx_lnex_cfg_t
+#define bustype_BDK_OCX_LNEX_CFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_CFG(a) "OCX_LNEX_CFG"
+#define device_bar_BDK_OCX_LNEX_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_CFG(a) (a)
+#define arguments_BDK_OCX_LNEX_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_int
+ *
+ * OCX Lane Interrupt Register
+ */
+union bdk_ocx_lnex_int
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t disp_err : 1; /**< [ 9: 9](R/W1C/H) RX disparity error encountered. These receive errors may occur during normal
+ operation, and may likely occur during link bring up. Hardware normally will
+ automatically correct the error. Software may choose to count the number of
+ these errors. */
+ uint64_t bad_64b67b : 1; /**< [ 8: 8](R/W1C/H) Bad 64B/67B codeword encountered. Once the bad word reaches the link, as denoted by
+ OCX_COM_LINK(0..2)_INT[BAD_WORD], a retry handshake is initiated. These receive errors may
+ occur during normal operation, and may likely occur during link bringup. Hardware normally
+ automatically corrects the error. Software may choose to count the number of these
+ errors. */
+ uint64_t stat_cnt_ovfl : 1; /**< [ 7: 7](R/W1C/H) RX lane statistic counter overflow. */
+ uint64_t stat_msg : 1; /**< [ 6: 6](R/W1C/H) Status bits for the link or a lane transitioned from a 1 (healthy) to a 0 (problem). These
+ should not occur during normal operation. This may be considered fatal, depending on the
+ software protocol. */
+ uint64_t dskew_fifo_ovfl : 1; /**< [ 5: 5](R/W1C/H) RX deskew FIFO overflow occurred. These receive errors may occur during normal operation,
+ and may likely occur during link bring up. Hardware normally automatically corrects the
+ error. Software may choose to count the number of these errors. */
+ uint64_t scrm_sync_loss : 1; /**< [ 4: 4](R/W1C/H) Four consecutive bad sync words or three consecutive scramble state
+ mismatches. These receive errors should not occur during normal operation, but
+ may likely occur during link bring up.
+ Hardware normally will automatically correct the error. Software may choose to count the
+ number of these errors. */
+ uint64_t ukwn_cntl_word : 1; /**< [ 3: 3](R/W1C/H) Unknown framing-control word. The block type does not match any of (SYNC, SCRAM, SKIP,
+ DIAG).
+ These receive errors may occur during normal operation. Hardware normally
+ automatically corrects the error. Software may choose to count the number of these errors. */
+ uint64_t crc32_err : 1; /**< [ 2: 2](R/W1C/H) Diagnostic CRC32 errors. These receive errors may occur during normal operation, typically
+ in the presence of other errors, and may likely occur during link bring up. Hardware
+ normally automatically corrects the error. Software may choose to count the number of
+ these errors. */
+ uint64_t bdry_sync_loss : 1; /**< [ 1: 1](R/W1C/H) RX logic lost word boundary sync after 16 tries. Hardware automatically attempts to regain
+ word boundary sync. These receive errors should not occur during normal operation, but may
+ likely occur during link bring up. Hardware normally automatically corrects the error.
+ Software may choose to count the number of these errors. */
+ uint64_t serdes_lock_loss : 1; /**< [ 0: 0](R/W1C/H) RX SerDes loses lock. These receive errors should not occur during normal operation. This
+ may be considered fatal. */
+#else /* Word 0 - Little Endian */
+ uint64_t serdes_lock_loss : 1; /**< [ 0: 0](R/W1C/H) RX SerDes loses lock. These receive errors should not occur during normal operation. This
+ may be considered fatal. */
+ uint64_t bdry_sync_loss : 1; /**< [ 1: 1](R/W1C/H) RX logic lost word boundary sync after 16 tries. Hardware automatically attempts to regain
+ word boundary sync. These receive errors should not occur during normal operation, but may
+ likely occur during link bring up. Hardware normally automatically corrects the error.
+ Software may choose to count the number of these errors. */
+ uint64_t crc32_err : 1; /**< [ 2: 2](R/W1C/H) Diagnostic CRC32 errors. These receive errors may occur during normal operation, typically
+ in the presence of other errors, and may likely occur during link bring up. Hardware
+ normally automatically corrects the error. Software may choose to count the number of
+ these errors. */
+ uint64_t ukwn_cntl_word : 1; /**< [ 3: 3](R/W1C/H) Unknown framing-control word. The block type does not match any of (SYNC, SCRAM, SKIP,
+ DIAG).
+ These receive errors may occur during normal operation. Hardware normally
+ automatically corrects the error. Software may choose to count the number of these errors. */
+ uint64_t scrm_sync_loss : 1; /**< [ 4: 4](R/W1C/H) Four consecutive bad sync words or three consecutive scramble state
+ mismatches. These receive errors should not occur during normal operation, but
+ may likely occur during link bring up.
+ Hardware normally will automatically correct the error. Software may choose to count the
+ number of these errors. */
+ uint64_t dskew_fifo_ovfl : 1; /**< [ 5: 5](R/W1C/H) RX deskew FIFO overflow occurred. These receive errors may occur during normal operation,
+ and may likely occur during link bring up. Hardware normally automatically corrects the
+ error. Software may choose to count the number of these errors. */
+ uint64_t stat_msg : 1; /**< [ 6: 6](R/W1C/H) Status bits for the link or a lane transitioned from a 1 (healthy) to a 0 (problem). These
+ should not occur during normal operation. This may be considered fatal, depending on the
+ software protocol. */
+ uint64_t stat_cnt_ovfl : 1; /**< [ 7: 7](R/W1C/H) RX lane statistic counter overflow. */
+ uint64_t bad_64b67b : 1; /**< [ 8: 8](R/W1C/H) Bad 64B/67B codeword encountered. Once the bad word reaches the link, as denoted by
+ OCX_COM_LINK(0..2)_INT[BAD_WORD], a retry handshake is initiated. These receive errors may
+ occur during normal operation, and may likely occur during link bringup. Hardware normally
+ automatically corrects the error. Software may choose to count the number of these
+ errors. */
+ uint64_t disp_err : 1; /**< [ 9: 9](R/W1C/H) RX disparity error encountered. These receive errors may occur during normal
+ operation, and may likely occur during link bring up. Hardware normally will
+ automatically correct the error. Software may choose to count the number of
+ these errors. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_int_s cn; */
+};
+typedef union bdk_ocx_lnex_int bdk_ocx_lnex_int_t;
+
+static inline uint64_t BDK_OCX_LNEX_INT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_INT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008018ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_INT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_INT(a) bdk_ocx_lnex_int_t
+#define bustype_BDK_OCX_LNEX_INT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_INT(a) "OCX_LNEX_INT"
+#define device_bar_BDK_OCX_LNEX_INT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_INT(a) (a)
+#define arguments_BDK_OCX_LNEX_INT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_int_en
+ *
+ * OCX Lane Interrupt Enable Register
+ */
+union bdk_ocx_lnex_int_en
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_int_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t disp_err : 1; /**< [ 9: 9](RAZ) Reserved. */
+ uint64_t bad_64b67b : 1; /**< [ 8: 8](R/W) Enable bit for bad 64B/67B codeword encountered. */
+ uint64_t stat_cnt_ovfl : 1; /**< [ 7: 7](R/W) Enable bit for RX lane statistic counter overflow. */
+ uint64_t stat_msg : 1; /**< [ 6: 6](R/W) Enable bit for status bits for the link or a lane transitioned from a 1 (healthy) to a 0 (problem). */
+ uint64_t dskew_fifo_ovfl : 1; /**< [ 5: 5](R/W) Enable bit for RX deskew FIFO overflow occurred. */
+ uint64_t scrm_sync_loss : 1; /**< [ 4: 4](R/W) Enable bit for 4 consecutive bad sync words or 3 consecutive scramble state mismatches. */
+ uint64_t ukwn_cntl_word : 1; /**< [ 3: 3](R/W) Enable bit for unknown framing control word. Block type does not match any of (SYNC,
+ SCRAM, SKIP, DIAG). */
+ uint64_t crc32_err : 1; /**< [ 2: 2](R/W) Enable bit for diagnostic CRC32 errors. */
+ uint64_t bdry_sync_loss : 1; /**< [ 1: 1](R/W) Enable bit for RX logic lost word boundary sync after 16 tries. */
+ uint64_t serdes_lock_loss : 1; /**< [ 0: 0](R/W) Enable bit for RX SerDes loses lock. */
+#else /* Word 0 - Little Endian */
+ uint64_t serdes_lock_loss : 1; /**< [ 0: 0](R/W) Enable bit for RX SerDes loses lock. */
+ uint64_t bdry_sync_loss : 1; /**< [ 1: 1](R/W) Enable bit for RX logic lost word boundary sync after 16 tries. */
+ uint64_t crc32_err : 1; /**< [ 2: 2](R/W) Enable bit for diagnostic CRC32 errors. */
+ uint64_t ukwn_cntl_word : 1; /**< [ 3: 3](R/W) Enable bit for unknown framing control word. Block type does not match any of (SYNC,
+ SCRAM, SKIP, DIAG). */
+ uint64_t scrm_sync_loss : 1; /**< [ 4: 4](R/W) Enable bit for 4 consecutive bad sync words or 3 consecutive scramble state mismatches. */
+ uint64_t dskew_fifo_ovfl : 1; /**< [ 5: 5](R/W) Enable bit for RX deskew FIFO overflow occurred. */
+ uint64_t stat_msg : 1; /**< [ 6: 6](R/W) Enable bit for status bits for the link or a lane transitioned from a 1 (healthy) to a 0 (problem). */
+ uint64_t stat_cnt_ovfl : 1; /**< [ 7: 7](R/W) Enable bit for RX lane statistic counter overflow. */
+ uint64_t bad_64b67b : 1; /**< [ 8: 8](R/W) Enable bit for bad 64B/67B codeword encountered. */
+ uint64_t disp_err : 1; /**< [ 9: 9](RAZ) Reserved. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_int_en_s cn88xxp1; */
+ struct bdk_ocx_lnex_int_en_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t disp_err : 1; /**< [ 9: 9](R/W) Enable bit for RX disparity error encountered. */
+ uint64_t bad_64b67b : 1; /**< [ 8: 8](R/W) Enable bit for bad 64B/67B codeword encountered. */
+ uint64_t stat_cnt_ovfl : 1; /**< [ 7: 7](R/W) Enable bit for RX lane statistic counter overflow. */
+ uint64_t stat_msg : 1; /**< [ 6: 6](R/W) Enable bit for status bits for the link or a lane transitioned from a 1 (healthy) to a 0 (problem). */
+ uint64_t dskew_fifo_ovfl : 1; /**< [ 5: 5](R/W) Enable bit for RX deskew FIFO overflow occurred. */
+ uint64_t scrm_sync_loss : 1; /**< [ 4: 4](R/W) Enable bit for 4 consecutive bad sync words or 3 consecutive scramble state mismatches. */
+ uint64_t ukwn_cntl_word : 1; /**< [ 3: 3](R/W) Enable bit for unknown framing control word. Block type does not match any of (SYNC,
+ SCRAM, SKIP, DIAG). */
+ uint64_t crc32_err : 1; /**< [ 2: 2](R/W) Enable bit for diagnostic CRC32 errors. */
+ uint64_t bdry_sync_loss : 1; /**< [ 1: 1](R/W) Enable bit for RX logic lost word boundary sync after 16 tries. */
+ uint64_t serdes_lock_loss : 1; /**< [ 0: 0](R/W) Enable bit for RX SerDes loses lock. */
+#else /* Word 0 - Little Endian */
+ uint64_t serdes_lock_loss : 1; /**< [ 0: 0](R/W) Enable bit for RX SerDes loses lock. */
+ uint64_t bdry_sync_loss : 1; /**< [ 1: 1](R/W) Enable bit for RX logic lost word boundary sync after 16 tries. */
+ uint64_t crc32_err : 1; /**< [ 2: 2](R/W) Enable bit for diagnostic CRC32 errors. */
+ uint64_t ukwn_cntl_word : 1; /**< [ 3: 3](R/W) Enable bit for unknown framing control word. Block type does not match any of (SYNC,
+ SCRAM, SKIP, DIAG). */
+ uint64_t scrm_sync_loss : 1; /**< [ 4: 4](R/W) Enable bit for 4 consecutive bad sync words or 3 consecutive scramble state mismatches. */
+ uint64_t dskew_fifo_ovfl : 1; /**< [ 5: 5](R/W) Enable bit for RX deskew FIFO overflow occurred. */
+ uint64_t stat_msg : 1; /**< [ 6: 6](R/W) Enable bit for status bits for the link or a lane transitioned from a 1 (healthy) to a 0 (problem). */
+ uint64_t stat_cnt_ovfl : 1; /**< [ 7: 7](R/W) Enable bit for RX lane statistic counter overflow. */
+ uint64_t bad_64b67b : 1; /**< [ 8: 8](R/W) Enable bit for bad 64B/67B codeword encountered. */
+ uint64_t disp_err : 1; /**< [ 9: 9](R/W) Enable bit for RX disparity error encountered. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_ocx_lnex_int_en bdk_ocx_lnex_int_en_t;
+
+static inline uint64_t BDK_OCX_LNEX_INT_EN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_INT_EN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008020ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_INT_EN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_INT_EN(a) bdk_ocx_lnex_int_en_t
+#define bustype_BDK_OCX_LNEX_INT_EN(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_INT_EN(a) "OCX_LNEX_INT_EN"
+#define device_bar_BDK_OCX_LNEX_INT_EN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_INT_EN(a) (a)
+#define arguments_BDK_OCX_LNEX_INT_EN(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat00
+ *
+ * OCX Lane Statistic 0 Register
+ */
+union bdk_ocx_lnex_stat00
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat00_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t ser_lock_loss_cnt : 18; /**< [ 17: 0](RO/H) Number of times the lane lost clock-data-recovery. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t ser_lock_loss_cnt : 18; /**< [ 17: 0](RO/H) Number of times the lane lost clock-data-recovery. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat00_s cn; */
+};
+typedef union bdk_ocx_lnex_stat00 bdk_ocx_lnex_stat00_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT00(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT00(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008040ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT00", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT00(a) bdk_ocx_lnex_stat00_t
+#define bustype_BDK_OCX_LNEX_STAT00(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT00(a) "OCX_LNEX_STAT00"
+#define device_bar_BDK_OCX_LNEX_STAT00(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT00(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT00(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat01
+ *
+ * OCX Lane Statistic 1 Register
+ */
+union bdk_ocx_lnex_stat01
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat01_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t bdry_sync_loss_cnt : 18; /**< [ 17: 0](RO/H) Number of times a lane lost word boundary synchronization. Saturates. Interrupt on
+ saturation if OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t bdry_sync_loss_cnt : 18; /**< [ 17: 0](RO/H) Number of times a lane lost word boundary synchronization. Saturates. Interrupt on
+ saturation if OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat01_s cn; */
+};
+typedef union bdk_ocx_lnex_stat01 bdk_ocx_lnex_stat01_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT01(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT01(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008048ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT01", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT01(a) bdk_ocx_lnex_stat01_t
+#define bustype_BDK_OCX_LNEX_STAT01(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT01(a) "OCX_LNEX_STAT01"
+#define device_bar_BDK_OCX_LNEX_STAT01(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT01(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT01(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat02
+ *
+ * OCX Lane Statistic 2 Register
+ */
+union bdk_ocx_lnex_stat02
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat02_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t syncw_bad_cnt : 18; /**< [ 17: 0](RO/H) Number of bad synchronization words. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t syncw_bad_cnt : 18; /**< [ 17: 0](RO/H) Number of bad synchronization words. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat02_s cn; */
+};
+typedef union bdk_ocx_lnex_stat02 bdk_ocx_lnex_stat02_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT02(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT02(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008050ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT02", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT02(a) bdk_ocx_lnex_stat02_t
+#define bustype_BDK_OCX_LNEX_STAT02(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT02(a) "OCX_LNEX_STAT02"
+#define device_bar_BDK_OCX_LNEX_STAT02(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT02(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT02(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat03
+ *
+ * OCX Lane Statistic 3 Register
+ */
+union bdk_ocx_lnex_stat03
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat03_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t syncw_good_cnt : 18; /**< [ 17: 0](RO/H) Number of good synchronization words. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t syncw_good_cnt : 18; /**< [ 17: 0](RO/H) Number of good synchronization words. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat03_s cn; */
+};
+typedef union bdk_ocx_lnex_stat03 bdk_ocx_lnex_stat03_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT03(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT03(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008058ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT03", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT03(a) bdk_ocx_lnex_stat03_t
+#define bustype_BDK_OCX_LNEX_STAT03(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT03(a) "OCX_LNEX_STAT03"
+#define device_bar_BDK_OCX_LNEX_STAT03(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT03(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT03(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat04
+ *
+ * OCX Lane Statistic 4 Register
+ */
+union bdk_ocx_lnex_stat04
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat04_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t bad_64b67b_cnt : 18; /**< [ 17: 0](RO/H) Number of bad 64B/67B words, meaning bit 65 or 64 has been corrupted. Saturates. Interrupt
+ on saturation if OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t bad_64b67b_cnt : 18; /**< [ 17: 0](RO/H) Number of bad 64B/67B words, meaning bit 65 or 64 has been corrupted. Saturates. Interrupt
+ on saturation if OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat04_s cn; */
+};
+typedef union bdk_ocx_lnex_stat04 bdk_ocx_lnex_stat04_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT04(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT04(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008060ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT04", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT04(a) bdk_ocx_lnex_stat04_t
+#define bustype_BDK_OCX_LNEX_STAT04(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT04(a) "OCX_LNEX_STAT04"
+#define device_bar_BDK_OCX_LNEX_STAT04(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT04(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT04(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat05
+ *
+ * OCX Lane Statistic 5 Register
+ */
+union bdk_ocx_lnex_stat05
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat05_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t data_word_cnt : 27; /**< [ 26: 0](RO/H) Number of data words received. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data_word_cnt : 27; /**< [ 26: 0](RO/H) Number of data words received. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat05_s cn; */
+};
+typedef union bdk_ocx_lnex_stat05 bdk_ocx_lnex_stat05_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT05(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT05(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008068ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT05", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT05(a) bdk_ocx_lnex_stat05_t
+#define bustype_BDK_OCX_LNEX_STAT05(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT05(a) "OCX_LNEX_STAT05"
+#define device_bar_BDK_OCX_LNEX_STAT05(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT05(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT05(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat06
+ *
+ * OCX Lane Statistic 6 Register
+ */
+union bdk_ocx_lnex_stat06
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat06_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t cntl_word_cnt : 27; /**< [ 26: 0](RO/H) Number of control words received. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t cntl_word_cnt : 27; /**< [ 26: 0](RO/H) Number of control words received. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat06_s cn; */
+};
+typedef union bdk_ocx_lnex_stat06 bdk_ocx_lnex_stat06_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT06(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT06(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008070ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT06", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT06(a) bdk_ocx_lnex_stat06_t
+#define bustype_BDK_OCX_LNEX_STAT06(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT06(a) "OCX_LNEX_STAT06"
+#define device_bar_BDK_OCX_LNEX_STAT06(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT06(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT06(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat07
+ *
+ * OCX Lane Statistic 7 Register
+ */
+union bdk_ocx_lnex_stat07
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat07_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t unkwn_word_cnt : 18; /**< [ 17: 0](RO/H) Number of unknown control words. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t unkwn_word_cnt : 18; /**< [ 17: 0](RO/H) Number of unknown control words. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat07_s cn; */
+};
+typedef union bdk_ocx_lnex_stat07 bdk_ocx_lnex_stat07_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT07(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT07(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008078ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT07", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT07(a) bdk_ocx_lnex_stat07_t
+#define bustype_BDK_OCX_LNEX_STAT07(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT07(a) "OCX_LNEX_STAT07"
+#define device_bar_BDK_OCX_LNEX_STAT07(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT07(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT07(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat08
+ *
+ * OCX Lane Statistic 8 Register
+ */
+union bdk_ocx_lnex_stat08
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat08_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t scrm_sync_loss_cnt : 18; /**< [ 17: 0](RO/H) Number of times scrambler synchronization was lost (due to either four
+ consecutive bad sync words or three consecutive scrambler state
+ mismatches). Saturates. Interrupt on saturation if
+ OCX_LNE()_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t scrm_sync_loss_cnt : 18; /**< [ 17: 0](RO/H) Number of times scrambler synchronization was lost (due to either four
+ consecutive bad sync words or three consecutive scrambler state
+ mismatches). Saturates. Interrupt on saturation if
+ OCX_LNE()_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat08_s cn; */
+};
+typedef union bdk_ocx_lnex_stat08 bdk_ocx_lnex_stat08_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT08(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT08(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008080ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT08", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT08(a) bdk_ocx_lnex_stat08_t
+#define bustype_BDK_OCX_LNEX_STAT08(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT08(a) "OCX_LNEX_STAT08"
+#define device_bar_BDK_OCX_LNEX_STAT08(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT08(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT08(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat09
+ *
+ * OCX Lane Statistic 9 Register
+ */
+union bdk_ocx_lnex_stat09
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat09_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t scrm_match_cnt : 18; /**< [ 17: 0](RO/H) Number of scrambler state matches received. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t scrm_match_cnt : 18; /**< [ 17: 0](RO/H) Number of scrambler state matches received. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat09_s cn; */
+};
+typedef union bdk_ocx_lnex_stat09 bdk_ocx_lnex_stat09_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT09(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT09(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008088ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT09", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT09(a) bdk_ocx_lnex_stat09_t
+#define bustype_BDK_OCX_LNEX_STAT09(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT09(a) "OCX_LNEX_STAT09"
+#define device_bar_BDK_OCX_LNEX_STAT09(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT09(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT09(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat10
+ *
+ * OCX Lane Statistic 10 Register
+ */
+union bdk_ocx_lnex_stat10
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat10_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t skipw_good_cnt : 18; /**< [ 17: 0](RO/H) Number of good skip words. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t skipw_good_cnt : 18; /**< [ 17: 0](RO/H) Number of good skip words. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat10_s cn; */
+};
+typedef union bdk_ocx_lnex_stat10 bdk_ocx_lnex_stat10_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT10(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT10(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008090ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT10", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT10(a) bdk_ocx_lnex_stat10_t
+#define bustype_BDK_OCX_LNEX_STAT10(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT10(a) "OCX_LNEX_STAT10"
+#define device_bar_BDK_OCX_LNEX_STAT10(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT10(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT10(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat11
+ *
+ * OCX Lane Statistic 11 Register
+ */
+union bdk_ocx_lnex_stat11
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat11_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t crc32_err_cnt : 27; /**< [ 26: 0](RO/H) Number of errors in the lane CRC. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t crc32_err_cnt : 27; /**< [ 26: 0](RO/H) Number of errors in the lane CRC. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat11_s cn; */
+};
+typedef union bdk_ocx_lnex_stat11 bdk_ocx_lnex_stat11_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT11(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT11(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008098ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT11", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT11(a) bdk_ocx_lnex_stat11_t
+#define bustype_BDK_OCX_LNEX_STAT11(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT11(a) "OCX_LNEX_STAT11"
+#define device_bar_BDK_OCX_LNEX_STAT11(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT11(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT11(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat12
+ *
+ * OCX Lane Statistic 12 Register
+ */
+union bdk_ocx_lnex_stat12
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t crc32_match_cnt : 27; /**< [ 26: 0](RO/H) Number of CRC32 matches received. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t crc32_match_cnt : 27; /**< [ 26: 0](RO/H) Number of CRC32 matches received. Saturates. Interrupt on saturation if
+ OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat12_s cn; */
+};
+typedef union bdk_ocx_lnex_stat12 bdk_ocx_lnex_stat12_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT12(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT12(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e0110080a0ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT12", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT12(a) bdk_ocx_lnex_stat12_t
+#define bustype_BDK_OCX_LNEX_STAT12(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT12(a) "OCX_LNEX_STAT12"
+#define device_bar_BDK_OCX_LNEX_STAT12(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT12(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT12(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat13
+ *
+ * OCX Lane Statistic 13 Register
+ */
+union bdk_ocx_lnex_stat13
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat13_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t trn_bad_cnt : 16; /**< [ 15: 0](RO/H) Number of training frames received with an invalid control channel. Saturates. Interrupt
+ on saturation if OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t trn_bad_cnt : 16; /**< [ 15: 0](RO/H) Number of training frames received with an invalid control channel. Saturates. Interrupt
+ on saturation if OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat13_s cn; */
+};
+typedef union bdk_ocx_lnex_stat13 bdk_ocx_lnex_stat13_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT13(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT13(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e0110080a8ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT13", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT13(a) bdk_ocx_lnex_stat13_t
+#define bustype_BDK_OCX_LNEX_STAT13(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT13(a) "OCX_LNEX_STAT13"
+#define device_bar_BDK_OCX_LNEX_STAT13(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT13(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT13(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_stat14
+ *
+ * OCX Lane Statistic 14 Register
+ */
+union bdk_ocx_lnex_stat14
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_stat14_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t trn_prbs_bad_cnt : 16; /**< [ 15: 0](RO/H) Number of training frames received with a bad PRBS pattern. Saturates. Interrupt on
+ saturation if OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t trn_prbs_bad_cnt : 16; /**< [ 15: 0](RO/H) Number of training frames received with a bad PRBS pattern. Saturates. Interrupt on
+ saturation if OCX_LNE(0..23)_INT_EN[STAT_CNT_OVFL] = 1. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_stat14_s cn; */
+};
+typedef union bdk_ocx_lnex_stat14 bdk_ocx_lnex_stat14_t;
+
+static inline uint64_t BDK_OCX_LNEX_STAT14(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STAT14(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e0110080b0ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STAT14", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STAT14(a) bdk_ocx_lnex_stat14_t
+#define bustype_BDK_OCX_LNEX_STAT14(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STAT14(a) "OCX_LNEX_STAT14"
+#define device_bar_BDK_OCX_LNEX_STAT14(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STAT14(a) (a)
+#define arguments_BDK_OCX_LNEX_STAT14(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_status
+ *
+ * OCX Lane Status Register
+ */
+union bdk_ocx_lnex_status
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t rx_trn_val : 1; /**< [ 2: 2](R/W/H) The control channel of a link training was received without any errors. */
+ uint64_t rx_scrm_sync : 1; /**< [ 1: 1](RO/H) RX scrambler synchronization status. Set to 1 when synchronization achieved. */
+ uint64_t rx_bdry_sync : 1; /**< [ 0: 0](RO/H) RX word boundary sync status. Set to 1 when synchronization achieved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_bdry_sync : 1; /**< [ 0: 0](RO/H) RX word boundary sync status. Set to 1 when synchronization achieved. */
+ uint64_t rx_scrm_sync : 1; /**< [ 1: 1](RO/H) RX scrambler synchronization status. Set to 1 when synchronization achieved. */
+ uint64_t rx_trn_val : 1; /**< [ 2: 2](R/W/H) The control channel of a link training was received without any errors. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_status_s cn; */
+};
+typedef union bdk_ocx_lnex_status bdk_ocx_lnex_status_t;
+
+static inline uint64_t BDK_OCX_LNEX_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008008ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STATUS(a) bdk_ocx_lnex_status_t
+#define bustype_BDK_OCX_LNEX_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STATUS(a) "OCX_LNEX_STATUS"
+#define device_bar_BDK_OCX_LNEX_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STATUS(a) (a)
+#define arguments_BDK_OCX_LNEX_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_sts_msg
+ *
+ * OCX Lane Status Message Register
+ */
+union bdk_ocx_lnex_sts_msg
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_sts_msg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_meta_val : 1; /**< [ 63: 63](RO/H) Meta-data received in the diagnostic word (per-lane) is valid. */
+ uint64_t reserved_37_62 : 26;
+ uint64_t rx_meta_dat : 3; /**< [ 36: 34](RO/H) Meta-data received in the diagnostic word (per-lane). */
+ uint64_t rx_lne_stat : 1; /**< [ 33: 33](RO/H) Lane status received in the diagnostic word (per-lane). Set to 1 when healthy
+ (according to the Interlaken spec). */
+ uint64_t rx_lnk_stat : 1; /**< [ 32: 32](RO/H) Link status received in the diagnostic word (per-lane). Set to 1 when healthy
+ (according to the Interlaken spec). */
+ uint64_t reserved_5_31 : 27;
+ uint64_t tx_meta_dat : 3; /**< [ 4: 2](RO/H) Meta-data transmitted in the diagnostic word (per-lane). */
+ uint64_t tx_lne_stat : 1; /**< [ 1: 1](R/W/H) Lane status transmitted in the diagnostic word (per-lane). Set to 1 means
+ healthy (according to the Interlaken spec). */
+ uint64_t tx_lnk_stat : 1; /**< [ 0: 0](R/W/H) Link status transmitted in the diagnostic word (per-lane). Set to 1 means
+ healthy (according to the Interlaken spec). */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_lnk_stat : 1; /**< [ 0: 0](R/W/H) Link status transmitted in the diagnostic word (per-lane). Set to 1 means
+ healthy (according to the Interlaken spec). */
+ uint64_t tx_lne_stat : 1; /**< [ 1: 1](R/W/H) Lane status transmitted in the diagnostic word (per-lane). Set to 1 means
+ healthy (according to the Interlaken spec). */
+ uint64_t tx_meta_dat : 3; /**< [ 4: 2](RO/H) Meta-data transmitted in the diagnostic word (per-lane). */
+ uint64_t reserved_5_31 : 27;
+ uint64_t rx_lnk_stat : 1; /**< [ 32: 32](RO/H) Link status received in the diagnostic word (per-lane). Set to 1 when healthy
+ (according to the Interlaken spec). */
+ uint64_t rx_lne_stat : 1; /**< [ 33: 33](RO/H) Lane status received in the diagnostic word (per-lane). Set to 1 when healthy
+ (according to the Interlaken spec). */
+ uint64_t rx_meta_dat : 3; /**< [ 36: 34](RO/H) Meta-data received in the diagnostic word (per-lane). */
+ uint64_t reserved_37_62 : 26;
+ uint64_t rx_meta_val : 1; /**< [ 63: 63](RO/H) Meta-data received in the diagnostic word (per-lane) is valid. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_sts_msg_s cn; */
+};
+typedef union bdk_ocx_lnex_sts_msg bdk_ocx_lnex_sts_msg_t;
+
+static inline uint64_t BDK_OCX_LNEX_STS_MSG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_STS_MSG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e011008010ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_STS_MSG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_STS_MSG(a) bdk_ocx_lnex_sts_msg_t
+#define bustype_BDK_OCX_LNEX_STS_MSG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_STS_MSG(a) "OCX_LNEX_STS_MSG"
+#define device_bar_BDK_OCX_LNEX_STS_MSG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_STS_MSG(a) (a)
+#define arguments_BDK_OCX_LNEX_STS_MSG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_trn_ctl
+ *
+ * OCX Lane Training Link Partner Register
+ */
+union bdk_ocx_lnex_trn_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_trn_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) Training frame boundary locked. */
+ uint64_t done : 1; /**< [ 2: 2](R/W/H) Training done. For diagnostic use only may be written to 1 to force training done. */
+ uint64_t ena : 1; /**< [ 1: 1](RO/H) OCX_LNEX_TRN_CTL[TRN_ENA]=1 indicates that the lane is currently training. It is a status
+ bit used for debug. It will read as zero when training has completed or when the QLM
+ isn't ready for training. */
+ uint64_t eie_detect : 1; /**< [ 0: 0](RO/H) Electrical idle exit (EIE) detected. */
+#else /* Word 0 - Little Endian */
+ uint64_t eie_detect : 1; /**< [ 0: 0](RO/H) Electrical idle exit (EIE) detected. */
+ uint64_t ena : 1; /**< [ 1: 1](RO/H) OCX_LNEX_TRN_CTL[TRN_ENA]=1 indicates that the lane is currently training. It is a status
+ bit used for debug. It will read as zero when training has completed or when the QLM
+ isn't ready for training. */
+ uint64_t done : 1; /**< [ 2: 2](R/W/H) Training done. For diagnostic use only may be written to 1 to force training done. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) Training frame boundary locked. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_trn_ctl_s cn; */
+};
+typedef union bdk_ocx_lnex_trn_ctl bdk_ocx_lnex_trn_ctl_t;
+
+static inline uint64_t BDK_OCX_LNEX_TRN_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_TRN_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e0110080d0ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_TRN_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_TRN_CTL(a) bdk_ocx_lnex_trn_ctl_t
+#define bustype_BDK_OCX_LNEX_TRN_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_TRN_CTL(a) "OCX_LNEX_TRN_CTL"
+#define device_bar_BDK_OCX_LNEX_TRN_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_TRN_CTL(a) (a)
+#define arguments_BDK_OCX_LNEX_TRN_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_trn_ld
+ *
+ * OCX Lane Training Local Device Register
+ */
+union bdk_ocx_lnex_trn_ld
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_trn_ld_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t lp_manual : 1; /**< [ 63: 63](R/W) Allow software to manually manipulate local device CU/SR by ignoring hardware update. */
+ uint64_t reserved_49_62 : 14;
+ uint64_t ld_cu_val : 1; /**< [ 48: 48](RO/H) Local device coefficient update field valid. */
+ uint64_t ld_cu_dat : 16; /**< [ 47: 32](R/W/H) Local device coefficient update field data.
+ The format of this field is BGX_SPU_BR_TRAIN_CUP_S. */
+ uint64_t reserved_17_31 : 15;
+ uint64_t ld_sr_val : 1; /**< [ 16: 16](RO/H) Local device status report field valid. */
+ uint64_t ld_sr_dat : 16; /**< [ 15: 0](R/W/H) Local device status report field data.
+ The format of this field is BGX_SPU_BR_TRAIN_REP_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t ld_sr_dat : 16; /**< [ 15: 0](R/W/H) Local device status report field data.
+ The format of this field is BGX_SPU_BR_TRAIN_REP_S. */
+ uint64_t ld_sr_val : 1; /**< [ 16: 16](RO/H) Local device status report field valid. */
+ uint64_t reserved_17_31 : 15;
+ uint64_t ld_cu_dat : 16; /**< [ 47: 32](R/W/H) Local device coefficient update field data.
+ The format of this field is BGX_SPU_BR_TRAIN_CUP_S. */
+ uint64_t ld_cu_val : 1; /**< [ 48: 48](RO/H) Local device coefficient update field valid. */
+ uint64_t reserved_49_62 : 14;
+ uint64_t lp_manual : 1; /**< [ 63: 63](R/W) Allow software to manually manipulate local device CU/SR by ignoring hardware update. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_trn_ld_s cn; */
+};
+typedef union bdk_ocx_lnex_trn_ld bdk_ocx_lnex_trn_ld_t;
+
+static inline uint64_t BDK_OCX_LNEX_TRN_LD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_TRN_LD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e0110080c0ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_TRN_LD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_TRN_LD(a) bdk_ocx_lnex_trn_ld_t
+#define bustype_BDK_OCX_LNEX_TRN_LD(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_TRN_LD(a) "OCX_LNEX_TRN_LD"
+#define device_bar_BDK_OCX_LNEX_TRN_LD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_TRN_LD(a) (a)
+#define arguments_BDK_OCX_LNEX_TRN_LD(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne#_trn_lp
+ *
+ * OCX Lane Training Link Partner Register
+ */
+union bdk_ocx_lnex_trn_lp
+{
+ uint64_t u;
+ struct bdk_ocx_lnex_trn_lp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t lp_cu_val : 1; /**< [ 48: 48](RO/H) Link partner coefficient update field valid. */
+ uint64_t lp_cu_dat : 16; /**< [ 47: 32](RO/H) Link partner coefficient update field data.
+ The format of this field is BGX_SPU_BR_TRAIN_CUP_S. */
+ uint64_t reserved_17_31 : 15;
+ uint64_t lp_sr_val : 1; /**< [ 16: 16](RO/H) Link partner status report field valid. */
+ uint64_t lp_sr_dat : 16; /**< [ 15: 0](RO/H) Link partner status report field data.
+ The format of this field is BGX_SPU_BR_TRAIN_REP_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t lp_sr_dat : 16; /**< [ 15: 0](RO/H) Link partner status report field data.
+ The format of this field is BGX_SPU_BR_TRAIN_REP_S. */
+ uint64_t lp_sr_val : 1; /**< [ 16: 16](RO/H) Link partner status report field valid. */
+ uint64_t reserved_17_31 : 15;
+ uint64_t lp_cu_dat : 16; /**< [ 47: 32](RO/H) Link partner coefficient update field data.
+ The format of this field is BGX_SPU_BR_TRAIN_CUP_S. */
+ uint64_t lp_cu_val : 1; /**< [ 48: 48](RO/H) Link partner coefficient update field valid. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnex_trn_lp_s cn; */
+};
+typedef union bdk_ocx_lnex_trn_lp bdk_ocx_lnex_trn_lp_t;
+
+static inline uint64_t BDK_OCX_LNEX_TRN_LP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNEX_TRN_LP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=23))
+ return 0x87e0110080c8ll + 0x100ll * ((a) & 0x1f);
+ __bdk_csr_fatal("OCX_LNEX_TRN_LP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNEX_TRN_LP(a) bdk_ocx_lnex_trn_lp_t
+#define bustype_BDK_OCX_LNEX_TRN_LP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNEX_TRN_LP(a) "OCX_LNEX_TRN_LP"
+#define device_bar_BDK_OCX_LNEX_TRN_LP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNEX_TRN_LP(a) (a)
+#define arguments_BDK_OCX_LNEX_TRN_LP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lne_dbg
+ *
+ * OCX Lane Debug Register
+ */
+union bdk_ocx_lne_dbg
+{
+ uint64_t u;
+ struct bdk_ocx_lne_dbg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t timeout : 24; /**< [ 63: 40](R/W/H) Number of core-clock cycles (RCLKs) used by the bad lane timer. If this timer
+ expires before all enabled lanes can be made ready, then any lane that is not
+ ready is disabled via OCX_QLM()_CFG[SER_LANE_BAD]. For diagnostic use only. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t frc_stats_ena : 1; /**< [ 37: 37](R/W) Enable FRC statistic counters. */
+ uint64_t rx_dis_psh_skip : 1; /**< [ 36: 36](R/W/H) When [RX_DIS_PSH_SKIP] = 0, skip words are destriped. When [RX_DIS_PSH_SKIP] =
+ 1, skip words are discarded in the lane logic. If the lane is in internal
+ loopback mode, [RX_DIS_PSH_SKIP] is ignored and skip words are always discarded
+ in the lane logic. */
+ uint64_t rx_mfrm_len : 2; /**< [ 35: 34](R/W/H) The quantity of data received on each lane including one sync word, scrambler state,
+ diagnostic word, zero or more skip words, and the data payload.
+ 0x0 = 2048 words.
+ 0x1 = 1024 words.
+ 0x2 = 512 words.
+ 0x3 = 128 words. */
+ uint64_t rx_dis_ukwn : 1; /**< [ 33: 33](R/W) Disable normal response to unknown words. They are still logged but do not cause an error
+ to all open channels. */
+ uint64_t rx_dis_scram : 1; /**< [ 32: 32](R/W) Disable lane scrambler. */
+ uint64_t reserved_5_31 : 27;
+ uint64_t tx_lane_rev : 1; /**< [ 4: 4](R/W) TX lane reversal. When enabled, lane destriping is performed from the most significant
+ lane enabled to least significant lane enabled [QLM_SELECT] must be 0x0 before changing
+ [LANE_REV]. */
+ uint64_t tx_mfrm_len : 2; /**< [ 3: 2](R/W/H) The quantity of data sent on each lane including one sync word, scrambler state,
+ diagnostic word, zero or more skip words, and the data payload.
+ 0x0 = 2048 words.
+ 0x1 = 1024 words.
+ 0x2 = 512 words.
+ 0x3 = 128 words. */
+ uint64_t tx_dis_dispr : 1; /**< [ 1: 1](R/W) Disparity disable. */
+ uint64_t tx_dis_scram : 1; /**< [ 0: 0](R/W) Scrambler disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_dis_scram : 1; /**< [ 0: 0](R/W) Scrambler disable. */
+ uint64_t tx_dis_dispr : 1; /**< [ 1: 1](R/W) Disparity disable. */
+ uint64_t tx_mfrm_len : 2; /**< [ 3: 2](R/W/H) The quantity of data sent on each lane including one sync word, scrambler state,
+ diagnostic word, zero or more skip words, and the data payload.
+ 0x0 = 2048 words.
+ 0x1 = 1024 words.
+ 0x2 = 512 words.
+ 0x3 = 128 words. */
+ uint64_t tx_lane_rev : 1; /**< [ 4: 4](R/W) TX lane reversal. When enabled, lane destriping is performed from the most significant
+ lane enabled to least significant lane enabled [QLM_SELECT] must be 0x0 before changing
+ [LANE_REV]. */
+ uint64_t reserved_5_31 : 27;
+ uint64_t rx_dis_scram : 1; /**< [ 32: 32](R/W) Disable lane scrambler. */
+ uint64_t rx_dis_ukwn : 1; /**< [ 33: 33](R/W) Disable normal response to unknown words. They are still logged but do not cause an error
+ to all open channels. */
+ uint64_t rx_mfrm_len : 2; /**< [ 35: 34](R/W/H) The quantity of data received on each lane including one sync word, scrambler state,
+ diagnostic word, zero or more skip words, and the data payload.
+ 0x0 = 2048 words.
+ 0x1 = 1024 words.
+ 0x2 = 512 words.
+ 0x3 = 128 words. */
+ uint64_t rx_dis_psh_skip : 1; /**< [ 36: 36](R/W/H) When [RX_DIS_PSH_SKIP] = 0, skip words are destriped. When [RX_DIS_PSH_SKIP] =
+ 1, skip words are discarded in the lane logic. If the lane is in internal
+ loopback mode, [RX_DIS_PSH_SKIP] is ignored and skip words are always discarded
+ in the lane logic. */
+ uint64_t frc_stats_ena : 1; /**< [ 37: 37](R/W) Enable FRC statistic counters. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t timeout : 24; /**< [ 63: 40](R/W/H) Number of core-clock cycles (RCLKs) used by the bad lane timer. If this timer
+ expires before all enabled lanes can be made ready, then any lane that is not
+ ready is disabled via OCX_QLM()_CFG[SER_LANE_BAD]. For diagnostic use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lne_dbg_s cn; */
+};
+typedef union bdk_ocx_lne_dbg bdk_ocx_lne_dbg_t;
+
+#define BDK_OCX_LNE_DBG BDK_OCX_LNE_DBG_FUNC()
+static inline uint64_t BDK_OCX_LNE_DBG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNE_DBG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e01100ff00ll;
+ __bdk_csr_fatal("OCX_LNE_DBG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNE_DBG bdk_ocx_lne_dbg_t
+#define bustype_BDK_OCX_LNE_DBG BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNE_DBG "OCX_LNE_DBG"
+#define device_bar_BDK_OCX_LNE_DBG 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNE_DBG 0
+#define arguments_BDK_OCX_LNE_DBG -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_lnk#_cfg
+ *
+ * OCX Link 0-2 Configuration Registers
+ */
+union bdk_ocx_lnkx_cfg
+{
+ uint64_t u;
+ struct bdk_ocx_lnkx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t qlm_manual : 6; /**< [ 53: 48](R/W/H) QLM manual mask, where each bit corresponds to a QLM. A link automatically selects a QLM
+ unless either QLM_MANUAL[QLM] = 1 or a QLM is not eligible for the link.
+
+ _ QLM_MANUAL\<0\> = LNE(0..3) = QLM0.
+ _ QLM_MANUAL\<1\> = LNE(7..4) = QLM1.
+ _ QLM_MANUAL\<2\> = LNE(11..8) = QLM2.
+ _ QLM_MANUAL\<3\> = LNE(15..12) = QLM3.
+ _ QLM_MANUAL\<4\> = LNE(19..16) = QLM4.
+ _ QLM_MANUAL\<5\> = LNE(23..23) = QLM5.
+ _ LINK 0 may not select QLM4, QLM5.
+ _ LINK 1 may not select QLM0, QLM1, QLM4, QLM5.
+ _ LINK 2 may not select QLM0, QLM1.
+
+ During a cold reset, this field is initialized to 0x3F when OCI_SPD\<3:0\> == 0xF.
+
+ During a cold reset, this field is initialized to 0x0 when OCI_SPD\<3:0\> != 0xF.
+
+ This field is not modified by hardware at any other time.
+
+ This field is not affected by soft or warm reset. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t qlm_select : 6; /**< [ 37: 32](R/W/H) QLM select mask, where each bit corresponds to a QLM. A link will transmit/receive data
+ using only the selected QLMs. A link is enabled if any QLM is selected. The same QLM
+ should not be selected for multiple links.
+ [LANE_REV] has no effect on this mapping.
+
+ _ QLM_SELECT\<0\> = LNE(0..3) = QLM0.
+ _ QLM_SELECT\<1\> = LNE(7..4) = QLM1.
+ _ QLM_SELECT\<2\> = LNE(11..8) = QLM2.
+ _ QLM_SELECT\<3\> = LNE(15..12) = QLM3.
+ _ QLM_SELECT\<4\> = LNE(19..16) = QLM4.
+ _ QLM_SELECT\<5\> = LNE(23..23) = QLM5.
+ _ LINK 0 may not select QLM4, QLM5.
+ _ LINK 1 may not select QLM0, QLM1, QLM4, QLM5.
+ _ LINK 2 may not select QLM0, QLM1.
+ _ LINK 2 may not select QLM2 or QLM3 when LINK1 selects any QLM.
+ _ LINK 0 may not select QLM2 or QLM3 when LINK1 selects any QLM.
+ _ LINK 0 automatically selects QLM0 when [QLM_MANUAL]\<0\>=0.
+ _ LINK 0 automatically selects QLM1 when [QLM_MANUAL]\<1\>=0.
+ _ LINK 0 automatically selects QLM2 when [QLM_MANUAL]\<2\>=0 and OCX_QLM2_CFG[SER_LOCAL]=0.
+ _ LINK 1 automatically selects QLM2 when [QLM_MANUAL]\<2\>=0 and OCX_QLM2_CFG[SER_LOCAL]=1.
+ _ LINK 1 automatically selects QLM3 when [QLM_MANUAL]\<3\>=0 and OCX_QLM3_CFG[SER_LOCAL]=1.
+ _ LINK 2 automatically selects QLM3 when [QLM_MANUAL]\<3\>=0 and OCX_QLM3_CFG[SER_LOCAL]=0.
+ _ LINK 3 automatically selects QLM4 when [QLM_MANUAL]\<4\>=0.
+ _ LINK 3 automatically selects QLM5 when [QLM_MANUAL]\<5\>=0.
+
+ A link with [QLM_SELECT] = 0x0 is invalid and will never exchange traffic with the
+ link partner. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t data_rate : 13; /**< [ 28: 16](R/W/H) The number of core-clock cycles (RCLKs) to transmit 32 words, where each word is
+ 67 bits. Hardware automatically calculates a conservative value for this
+ field. Software can override the calculation by writing
+ TX_DAT_RATE=roundup((67*RCLK / GBAUD)*32). */
+ uint64_t low_delay : 6; /**< [ 15: 10](R/W) The delay before reacting to a lane low data indication, as a multiple of 64 core-clock
+ cycles (RCLKs). */
+ uint64_t lane_align_dis : 1; /**< [ 9: 9](R/W/H) Disable the RX lane alignment. */
+ uint64_t lane_rev : 1; /**< [ 8: 8](R/W/H) RX lane reversal. When enabled, lane destriping is performed from the most significant
+ lane enabled to least significant lane enabled [QLM_SELECT] must be 0x0 before changing
+ [LANE_REV]. */
+ uint64_t lane_rev_auto : 1; /**< [ 7: 7](RAZ) Reserved. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t lane_rev_auto : 1; /**< [ 7: 7](RAZ) Reserved. */
+ uint64_t lane_rev : 1; /**< [ 8: 8](R/W/H) RX lane reversal. When enabled, lane destriping is performed from the most significant
+ lane enabled to least significant lane enabled [QLM_SELECT] must be 0x0 before changing
+ [LANE_REV]. */
+ uint64_t lane_align_dis : 1; /**< [ 9: 9](R/W/H) Disable the RX lane alignment. */
+ uint64_t low_delay : 6; /**< [ 15: 10](R/W) The delay before reacting to a lane low data indication, as a multiple of 64 core-clock
+ cycles (RCLKs). */
+ uint64_t data_rate : 13; /**< [ 28: 16](R/W/H) The number of core-clock cycles (RCLKs) to transmit 32 words, where each word is
+ 67 bits. Hardware automatically calculates a conservative value for this
+ field. Software can override the calculation by writing
+ TX_DAT_RATE=roundup((67*RCLK / GBAUD)*32). */
+ uint64_t reserved_29_31 : 3;
+ uint64_t qlm_select : 6; /**< [ 37: 32](R/W/H) QLM select mask, where each bit corresponds to a QLM. A link will transmit/receive data
+ using only the selected QLMs. A link is enabled if any QLM is selected. The same QLM
+ should not be selected for multiple links.
+ [LANE_REV] has no effect on this mapping.
+
+ _ QLM_SELECT\<0\> = LNE(0..3) = QLM0.
+ _ QLM_SELECT\<1\> = LNE(7..4) = QLM1.
+ _ QLM_SELECT\<2\> = LNE(11..8) = QLM2.
+ _ QLM_SELECT\<3\> = LNE(15..12) = QLM3.
+ _ QLM_SELECT\<4\> = LNE(19..16) = QLM4.
+ _ QLM_SELECT\<5\> = LNE(23..23) = QLM5.
+ _ LINK 0 may not select QLM4, QLM5.
+ _ LINK 1 may not select QLM0, QLM1, QLM4, QLM5.
+ _ LINK 2 may not select QLM0, QLM1.
+ _ LINK 2 may not select QLM2 or QLM3 when LINK1 selects any QLM.
+ _ LINK 0 may not select QLM2 or QLM3 when LINK1 selects any QLM.
+ _ LINK 0 automatically selects QLM0 when [QLM_MANUAL]\<0\>=0.
+ _ LINK 0 automatically selects QLM1 when [QLM_MANUAL]\<1\>=0.
+ _ LINK 0 automatically selects QLM2 when [QLM_MANUAL]\<2\>=0 and OCX_QLM2_CFG[SER_LOCAL]=0.
+ _ LINK 1 automatically selects QLM2 when [QLM_MANUAL]\<2\>=0 and OCX_QLM2_CFG[SER_LOCAL]=1.
+ _ LINK 1 automatically selects QLM3 when [QLM_MANUAL]\<3\>=0 and OCX_QLM3_CFG[SER_LOCAL]=1.
+ _ LINK 2 automatically selects QLM3 when [QLM_MANUAL]\<3\>=0 and OCX_QLM3_CFG[SER_LOCAL]=0.
+ _ LINK 3 automatically selects QLM4 when [QLM_MANUAL]\<4\>=0.
+ _ LINK 3 automatically selects QLM5 when [QLM_MANUAL]\<5\>=0.
+
+ A link with [QLM_SELECT] = 0x0 is invalid and will never exchange traffic with the
+ link partner. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t qlm_manual : 6; /**< [ 53: 48](R/W/H) QLM manual mask, where each bit corresponds to a QLM. A link automatically selects a QLM
+ unless either QLM_MANUAL[QLM] = 1 or a QLM is not eligible for the link.
+
+ _ QLM_MANUAL\<0\> = LNE(0..3) = QLM0.
+ _ QLM_MANUAL\<1\> = LNE(7..4) = QLM1.
+ _ QLM_MANUAL\<2\> = LNE(11..8) = QLM2.
+ _ QLM_MANUAL\<3\> = LNE(15..12) = QLM3.
+ _ QLM_MANUAL\<4\> = LNE(19..16) = QLM4.
+ _ QLM_MANUAL\<5\> = LNE(23..23) = QLM5.
+ _ LINK 0 may not select QLM4, QLM5.
+ _ LINK 1 may not select QLM0, QLM1, QLM4, QLM5.
+ _ LINK 2 may not select QLM0, QLM1.
+
+ During a cold reset, this field is initialized to 0x3F when OCI_SPD\<3:0\> == 0xF.
+
+ During a cold reset, this field is initialized to 0x0 when OCI_SPD\<3:0\> != 0xF.
+
+ This field is not modified by hardware at any other time.
+
+ This field is not affected by soft or warm reset. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_lnkx_cfg_s cn88xxp1; */
+ struct bdk_ocx_lnkx_cfg_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t qlm_manual : 6; /**< [ 53: 48](R/W/H) QLM manual mask, where each bit corresponds to a QLM. A link automatically selects a QLM
+ unless either QLM_MANUAL[QLM] = 1 or a QLM is not eligible for the link.
+
+ _ QLM_MANUAL\<0\> = LNE(0..3) = QLM0.
+ _ QLM_MANUAL\<1\> = LNE(7..4) = QLM1.
+ _ QLM_MANUAL\<2\> = LNE(11..8) = QLM2.
+ _ QLM_MANUAL\<3\> = LNE(15..12) = QLM3.
+ _ QLM_MANUAL\<4\> = LNE(19..16) = QLM4.
+ _ QLM_MANUAL\<5\> = LNE(23..23) = QLM5.
+ _ LINK 0 may not select QLM4, QLM5.
+ _ LINK 1 may not select QLM0, QLM1, QLM4, QLM5.
+ _ LINK 2 may not select QLM0, QLM1.
+
+ During a cold reset, this field is initialized to 0x3F when OCI_SPD\<3:0\> == 0xF.
+
+ During a cold reset, this field is initialized to 0x0 when OCI_SPD\<3:0\> != 0xF.
+
+ This field is not modified by hardware at any other time.
+
+ This field is not affected by soft or warm reset. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t qlm_select : 6; /**< [ 37: 32](R/W/H) QLM select mask, where each bit corresponds to a QLM. A link will transmit/receive data
+ using only the selected QLMs. A link is enabled if any QLM is selected. The same QLM
+ should not be selected for multiple links.
+ [LANE_REV] has no effect on this mapping.
+
+ _ QLM_SELECT\<0\> = LNE(0..3) = QLM0.
+ _ QLM_SELECT\<1\> = LNE(7..4) = QLM1.
+ _ QLM_SELECT\<2\> = LNE(11..8) = QLM2.
+ _ QLM_SELECT\<3\> = LNE(15..12) = QLM3.
+ _ QLM_SELECT\<4\> = LNE(19..16) = QLM4.
+ _ QLM_SELECT\<5\> = LNE(23..23) = QLM5.
+ _ LINK 0 may not select QLM4, QLM5.
+ _ LINK 1 may not select QLM0, QLM1, QLM4, QLM5.
+ _ LINK 2 may not select QLM0, QLM1.
+ _ LINK 2 may not select QLM2 or QLM3 when LINK1 selects any QLM.
+ _ LINK 0 may not select QLM2 or QLM3 when LINK1 selects any QLM.
+ _ LINK 0 automatically selects QLM0 when [QLM_MANUAL]\<0\>=0.
+ _ LINK 0 automatically selects QLM1 when [QLM_MANUAL]\<1\>=0.
+ _ LINK 0 automatically selects QLM2 when [QLM_MANUAL]\<2\>=0 and OCX_QLM2_CFG[SER_LOCAL]=0.
+ _ LINK 1 automatically selects QLM2 when [QLM_MANUAL]\<2\>=0 and OCX_QLM2_CFG[SER_LOCAL]=1.
+ _ LINK 1 automatically selects QLM3 when [QLM_MANUAL]\<3\>=0 and OCX_QLM3_CFG[SER_LOCAL]=1.
+ _ LINK 2 automatically selects QLM3 when [QLM_MANUAL]\<3\>=0 and OCX_QLM3_CFG[SER_LOCAL]=0.
+ _ LINK 3 automatically selects QLM4 when [QLM_MANUAL]\<4\>=0.
+ _ LINK 3 automatically selects QLM5 when [QLM_MANUAL]\<5\>=0.
+
+ A link with [QLM_SELECT] = 0x0 is invalid and will never exchange traffic with the
+ link partner. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t data_rate : 13; /**< [ 28: 16](R/W/H) The number of core-clock cycles (RCLKs) to transmit 32 words, where each word is
+ 67 bits. Hardware automatically calculates a conservative value for this
+ field. Software can override the calculation by writing
+ TX_DAT_RATE=roundup((67*RCLK / GBAUD)*32). */
+ uint64_t low_delay : 6; /**< [ 15: 10](R/W) The delay before reacting to a lane low data indication, as a multiple of 64 core-clock
+ cycles (RCLKs). */
+ uint64_t lane_align_dis : 1; /**< [ 9: 9](R/W/H) Disable the RX lane alignment. */
+ uint64_t lane_rev : 1; /**< [ 8: 8](R/W/H) RX lane reversal. When enabled, lane destriping is performed from the most significant
+ lane enabled to least significant lane enabled [QLM_SELECT] must be 0x0 before changing
+ [LANE_REV]. */
+ uint64_t lane_rev_auto : 1; /**< [ 7: 7](R/W) Automatically detect RX lane reversal. When enabled, [LANE_REV] is updated by
+ hardware. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t lane_rev_auto : 1; /**< [ 7: 7](R/W) Automatically detect RX lane reversal. When enabled, [LANE_REV] is updated by
+ hardware. */
+ uint64_t lane_rev : 1; /**< [ 8: 8](R/W/H) RX lane reversal. When enabled, lane destriping is performed from the most significant
+ lane enabled to least significant lane enabled [QLM_SELECT] must be 0x0 before changing
+ [LANE_REV]. */
+ uint64_t lane_align_dis : 1; /**< [ 9: 9](R/W/H) Disable the RX lane alignment. */
+ uint64_t low_delay : 6; /**< [ 15: 10](R/W) The delay before reacting to a lane low data indication, as a multiple of 64 core-clock
+ cycles (RCLKs). */
+ uint64_t data_rate : 13; /**< [ 28: 16](R/W/H) The number of core-clock cycles (RCLKs) to transmit 32 words, where each word is
+ 67 bits. Hardware automatically calculates a conservative value for this
+ field. Software can override the calculation by writing
+ TX_DAT_RATE=roundup((67*RCLK / GBAUD)*32). */
+ uint64_t reserved_29_31 : 3;
+ uint64_t qlm_select : 6; /**< [ 37: 32](R/W/H) QLM select mask, where each bit corresponds to a QLM. A link will transmit/receive data
+ using only the selected QLMs. A link is enabled if any QLM is selected. The same QLM
+ should not be selected for multiple links.
+ [LANE_REV] has no effect on this mapping.
+
+ _ QLM_SELECT\<0\> = LNE(0..3) = QLM0.
+ _ QLM_SELECT\<1\> = LNE(7..4) = QLM1.
+ _ QLM_SELECT\<2\> = LNE(11..8) = QLM2.
+ _ QLM_SELECT\<3\> = LNE(15..12) = QLM3.
+ _ QLM_SELECT\<4\> = LNE(19..16) = QLM4.
+ _ QLM_SELECT\<5\> = LNE(23..23) = QLM5.
+ _ LINK 0 may not select QLM4, QLM5.
+ _ LINK 1 may not select QLM0, QLM1, QLM4, QLM5.
+ _ LINK 2 may not select QLM0, QLM1.
+ _ LINK 2 may not select QLM2 or QLM3 when LINK1 selects any QLM.
+ _ LINK 0 may not select QLM2 or QLM3 when LINK1 selects any QLM.
+ _ LINK 0 automatically selects QLM0 when [QLM_MANUAL]\<0\>=0.
+ _ LINK 0 automatically selects QLM1 when [QLM_MANUAL]\<1\>=0.
+ _ LINK 0 automatically selects QLM2 when [QLM_MANUAL]\<2\>=0 and OCX_QLM2_CFG[SER_LOCAL]=0.
+ _ LINK 1 automatically selects QLM2 when [QLM_MANUAL]\<2\>=0 and OCX_QLM2_CFG[SER_LOCAL]=1.
+ _ LINK 1 automatically selects QLM3 when [QLM_MANUAL]\<3\>=0 and OCX_QLM3_CFG[SER_LOCAL]=1.
+ _ LINK 2 automatically selects QLM3 when [QLM_MANUAL]\<3\>=0 and OCX_QLM3_CFG[SER_LOCAL]=0.
+ _ LINK 3 automatically selects QLM4 when [QLM_MANUAL]\<4\>=0.
+ _ LINK 3 automatically selects QLM5 when [QLM_MANUAL]\<5\>=0.
+
+ A link with [QLM_SELECT] = 0x0 is invalid and will never exchange traffic with the
+ link partner. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t qlm_manual : 6; /**< [ 53: 48](R/W/H) QLM manual mask, where each bit corresponds to a QLM. A link automatically selects a QLM
+ unless either QLM_MANUAL[QLM] = 1 or a QLM is not eligible for the link.
+
+ _ QLM_MANUAL\<0\> = LNE(0..3) = QLM0.
+ _ QLM_MANUAL\<1\> = LNE(7..4) = QLM1.
+ _ QLM_MANUAL\<2\> = LNE(11..8) = QLM2.
+ _ QLM_MANUAL\<3\> = LNE(15..12) = QLM3.
+ _ QLM_MANUAL\<4\> = LNE(19..16) = QLM4.
+ _ QLM_MANUAL\<5\> = LNE(23..23) = QLM5.
+ _ LINK 0 may not select QLM4, QLM5.
+ _ LINK 1 may not select QLM0, QLM1, QLM4, QLM5.
+ _ LINK 2 may not select QLM0, QLM1.
+
+ During a cold reset, this field is initialized to 0x3F when OCI_SPD\<3:0\> == 0xF.
+
+ During a cold reset, this field is initialized to 0x0 when OCI_SPD\<3:0\> != 0xF.
+
+ This field is not modified by hardware at any other time.
+
+ This field is not affected by soft or warm reset. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_ocx_lnkx_cfg bdk_ocx_lnkx_cfg_t;
+
+static inline uint64_t BDK_OCX_LNKX_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_LNKX_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e01100f900ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_LNKX_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_LNKX_CFG(a) bdk_ocx_lnkx_cfg_t
+#define bustype_BDK_OCX_LNKX_CFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_LNKX_CFG(a) "OCX_LNKX_CFG"
+#define device_bar_BDK_OCX_LNKX_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_LNKX_CFG(a) (a)
+#define arguments_BDK_OCX_LNKX_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_msix_pba#
+ *
+ * OCX MSI-X Pending Bit Array Register
+ * This register is the MSI-X PBA table; the bit number is indexed by the OCX_INT_VEC_E
+ * enumeration.
+ */
+union bdk_ocx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_ocx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated OCX_MSIX_VEC()_CTL, enumerated by
+ OCX_INT_VEC_E. Bits that have no associated OCX_INT_VEC_E are 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated OCX_MSIX_VEC()_CTL, enumerated by
+ OCX_INT_VEC_E. Bits that have no associated OCX_INT_VEC_E are 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_msix_pbax_s cn; */
+};
+typedef union bdk_ocx_msix_pbax bdk_ocx_msix_pbax_t;
+
+static inline uint64_t BDK_OCX_MSIX_PBAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_MSIX_PBAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a==0))
+ return 0x87e011ff0000ll + 8ll * ((a) & 0x0);
+ __bdk_csr_fatal("OCX_MSIX_PBAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_MSIX_PBAX(a) bdk_ocx_msix_pbax_t
+#define bustype_BDK_OCX_MSIX_PBAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_MSIX_PBAX(a) "OCX_MSIX_PBAX"
+#define device_bar_BDK_OCX_MSIX_PBAX(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_OCX_MSIX_PBAX(a) (a)
+#define arguments_BDK_OCX_MSIX_PBAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_msix_vec#_addr
+ *
+ * OCX MSI-X Vector-Table Address Registers
+ * This register is the MSI-X vector table, indexed by the OCX_INT_VEC_E enumeration.
+ */
+union bdk_ocx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_ocx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's OCX_MSIX_VEC()_ADDR, OCX_MSIX_VEC()_CTL, and
+ corresponding bit of OCX_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_OCX_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) =
+ 1, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's OCX_MSIX_VEC()_ADDR, OCX_MSIX_VEC()_CTL, and
+ corresponding bit of OCX_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_OCX_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) =
+ 1, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_msix_vecx_addr_s cn; */
+};
+typedef union bdk_ocx_msix_vecx_addr bdk_ocx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_OCX_MSIX_VECX_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_MSIX_VECX_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e011f00000ll + 0x10ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_MSIX_VECX_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_MSIX_VECX_ADDR(a) bdk_ocx_msix_vecx_addr_t
+#define bustype_BDK_OCX_MSIX_VECX_ADDR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_MSIX_VECX_ADDR(a) "OCX_MSIX_VECX_ADDR"
+#define device_bar_BDK_OCX_MSIX_VECX_ADDR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_OCX_MSIX_VECX_ADDR(a) (a)
+#define arguments_BDK_OCX_MSIX_VECX_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_msix_vec#_ctl
+ *
+ * OCX MSI-X Vector-Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the OCX_INT_VEC_E enumeration.
+ */
+union bdk_ocx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_msix_vecx_ctl_s cn; */
+};
+typedef union bdk_ocx_msix_vecx_ctl bdk_ocx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_OCX_MSIX_VECX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_MSIX_VECX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e011f00008ll + 0x10ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_MSIX_VECX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_MSIX_VECX_CTL(a) bdk_ocx_msix_vecx_ctl_t
+#define bustype_BDK_OCX_MSIX_VECX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_MSIX_VECX_CTL(a) "OCX_MSIX_VECX_CTL"
+#define device_bar_BDK_OCX_MSIX_VECX_CTL(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_OCX_MSIX_VECX_CTL(a) (a)
+#define arguments_BDK_OCX_MSIX_VECX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_pp_cmd
+ *
+ * OCX Core Address Register
+ * Contains the address, read size and write mask to used for the core operation. Write data
+ * should be written first and placed in the OCX_PP_WR_DATA register. Writing this register
+ * starts the operation. A second write to this register while an operation is in progress will
+ * stall. Data is placed in the OCX_PP_RD_DATA register.
+ * This register has the same bit fields as OCX_WIN_CMD.
+ */
+union bdk_ocx_pp_cmd
+{
+ uint64_t u;
+ struct bdk_ocx_pp_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wr_mask : 8; /**< [ 63: 56](R/W) Mask for the data to be written. When a bit is 1, the corresponding byte will be written.
+ The values of this field must be contiguous and for 1, 2, 4, or 8 byte operations and
+ aligned to operation size. A value of 0 will produce unpredictable results. Field is
+ ignored during a read (LD_OP=1). */
+ uint64_t reserved_54_55 : 2;
+ uint64_t el : 2; /**< [ 53: 52](R/W) Execution level. This field is used to supply the execution level of the generated load
+ or store command. */
+ uint64_t nsecure : 1; /**< [ 51: 51](R/W) Nonsecure mode. Setting this bit causes the generated load or store command to be
+ considered nonsecure. */
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command sent with the read:
+ 0x0 = Load 1-bytes.
+ 0x1 = Load 2-bytes.
+ 0x2 = Load 4-bytes.
+ 0x3 = Load 8-bytes. */
+ uint64_t ld_op : 1; /**< [ 48: 48](R/W) Operation type:
+ 0 = Store.
+ 1 = Load operation. */
+ uint64_t addr : 48; /**< [ 47: 0](R/W) The address used in both the load and store operations:
+ \<47:46\> = Reserved.
+ \<45:44\> = CCPI_ID.
+ \<43:36\> = NCB_ID.
+ \<35:0\> = Address.
+
+ When \<43:36\> NCB_ID is RSL (0x7E) address field is defined as:
+ \<47:46\> = Reserved.
+ \<45:44\> = CCPI_ID.
+ \<43:36\> = 0x7E.
+ \<35:32\> = Reserved.
+ \<31:24\> = RSL_ID.
+ \<23:0\> = RSL register offset.
+
+ \<2:0\> are ignored in a store operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 48; /**< [ 47: 0](R/W) The address used in both the load and store operations:
+ \<47:46\> = Reserved.
+ \<45:44\> = CCPI_ID.
+ \<43:36\> = NCB_ID.
+ \<35:0\> = Address.
+
+ When \<43:36\> NCB_ID is RSL (0x7E) address field is defined as:
+ \<47:46\> = Reserved.
+ \<45:44\> = CCPI_ID.
+ \<43:36\> = 0x7E.
+ \<35:32\> = Reserved.
+ \<31:24\> = RSL_ID.
+ \<23:0\> = RSL register offset.
+
+ \<2:0\> are ignored in a store operation. */
+ uint64_t ld_op : 1; /**< [ 48: 48](R/W) Operation type:
+ 0 = Store.
+ 1 = Load operation. */
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command sent with the read:
+ 0x0 = Load 1-bytes.
+ 0x1 = Load 2-bytes.
+ 0x2 = Load 4-bytes.
+ 0x3 = Load 8-bytes. */
+ uint64_t nsecure : 1; /**< [ 51: 51](R/W) Nonsecure mode. Setting this bit causes the generated load or store command to be
+ considered nonsecure. */
+ uint64_t el : 2; /**< [ 53: 52](R/W) Execution level. This field is used to supply the execution level of the generated load
+ or store command. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t wr_mask : 8; /**< [ 63: 56](R/W) Mask for the data to be written. When a bit is 1, the corresponding byte will be written.
+ The values of this field must be contiguous and for 1, 2, 4, or 8 byte operations and
+ aligned to operation size. A value of 0 will produce unpredictable results. Field is
+ ignored during a read (LD_OP=1). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_pp_cmd_s cn; */
+};
+typedef union bdk_ocx_pp_cmd bdk_ocx_pp_cmd_t;
+
+#define BDK_OCX_PP_CMD BDK_OCX_PP_CMD_FUNC()
+static inline uint64_t BDK_OCX_PP_CMD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_PP_CMD_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0110000c8ll;
+ __bdk_csr_fatal("OCX_PP_CMD", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_PP_CMD bdk_ocx_pp_cmd_t
+#define bustype_BDK_OCX_PP_CMD BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_PP_CMD "OCX_PP_CMD"
+#define device_bar_BDK_OCX_PP_CMD 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_PP_CMD 0
+#define arguments_BDK_OCX_PP_CMD -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_pp_rd_data
+ *
+ * OCX Core Read Data Register
+ * This register is the read response data associated with core command. Reads all-ones until
+ * response is received.
+ * This register has the same bit fields as OCX_WIN_RD_DATA.
+ */
+union bdk_ocx_pp_rd_data
+{
+ uint64_t u;
+ struct bdk_ocx_pp_rd_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO/H) Read response data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO/H) Read response data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_pp_rd_data_s cn; */
+};
+typedef union bdk_ocx_pp_rd_data bdk_ocx_pp_rd_data_t;
+
+#define BDK_OCX_PP_RD_DATA BDK_OCX_PP_RD_DATA_FUNC()
+static inline uint64_t BDK_OCX_PP_RD_DATA_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_PP_RD_DATA_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0110000d0ll;
+ __bdk_csr_fatal("OCX_PP_RD_DATA", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_PP_RD_DATA bdk_ocx_pp_rd_data_t
+#define bustype_BDK_OCX_PP_RD_DATA BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_PP_RD_DATA "OCX_PP_RD_DATA"
+#define device_bar_BDK_OCX_PP_RD_DATA 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_PP_RD_DATA 0
+#define arguments_BDK_OCX_PP_RD_DATA -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_pp_wr_data
+ *
+ * OCX Core Data Register
+ * Contains the data to write to the address located in OCX_PP_CMD. Writing this register will
+ * cause a write operation to take place.
+ * This register has the same bit fields as OCX_WIN_WR_DATA.
+ */
+union bdk_ocx_pp_wr_data
+{
+ uint64_t u;
+ struct bdk_ocx_pp_wr_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wr_data : 64; /**< [ 63: 0](R/W) The data to be written. */
+#else /* Word 0 - Little Endian */
+ uint64_t wr_data : 64; /**< [ 63: 0](R/W) The data to be written. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_pp_wr_data_s cn; */
+};
+typedef union bdk_ocx_pp_wr_data bdk_ocx_pp_wr_data_t;
+
+#define BDK_OCX_PP_WR_DATA BDK_OCX_PP_WR_DATA_FUNC()
+static inline uint64_t BDK_OCX_PP_WR_DATA_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_PP_WR_DATA_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0110000c0ll;
+ __bdk_csr_fatal("OCX_PP_WR_DATA", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_PP_WR_DATA bdk_ocx_pp_wr_data_t
+#define bustype_BDK_OCX_PP_WR_DATA BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_PP_WR_DATA "OCX_PP_WR_DATA"
+#define device_bar_BDK_OCX_PP_WR_DATA 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_PP_WR_DATA 0
+#define arguments_BDK_OCX_PP_WR_DATA -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_qlm#_cfg
+ *
+ * OCX QLM 0-5 Configuration Registers
+ */
+union bdk_ocx_qlmx_cfg
+{
+ uint64_t u;
+ struct bdk_ocx_qlmx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ser_low : 4; /**< [ 63: 60](R/W/H) Reduce latency by limiting the amount of data in flight for each SerDes. Writing to 0
+ causes hardware to determine a typically optimal value. */
+ uint64_t reserved_42_59 : 18;
+ uint64_t ser_limit : 10; /**< [ 41: 32](RAZ) Reserved. */
+ uint64_t crd_dis : 1; /**< [ 31: 31](R/W) For diagnostic use only. */
+ uint64_t reserved_27_30 : 4;
+ uint64_t trn_rxeq_only : 1; /**< [ 26: 26](R/W/H) Shortened training sequence. Initialized to 1 during cold reset when OCI_SPD\<3:0\> pins
+ indicate 5 GBAUD \<=speed \< 8 GBAUD. Otherwise, initialized to 0 during a cold reset. This
+ field is not affected by soft or warm reset. For diagnostic use only. */
+ uint64_t timer_dis : 1; /**< [ 25: 25](R/W/H) Disable bad lane timer. A timer counts core clocks (RCLKs) when any enabled lane is not
+ ready, i.e. not in the scrambler sync state. If this timer expires before all enabled
+ lanes can be made ready, then any lane which is not ready is disabled via
+ OCX_QLM(0..5)_CFG[SER_LANE_BAD]. This field is not affected by soft or warm reset. */
+ uint64_t trn_ena : 1; /**< [ 24: 24](R/W/H) Link training enable. Link training is performed during auto link bring up. Initialized to
+ 1 during cold reset when OCI_SPD\<3:0\> pins indicate speed \>= 5 GBAUD. Otherwise,
+ initialized to 0 during a cold reset. This field is not affected by soft or warm reset. */
+ uint64_t ser_lane_ready : 4; /**< [ 23: 20](R/W/H) SerDes lanes that are ready for bundling into the link. */
+ uint64_t ser_lane_bad : 4; /**< [ 19: 16](R/W/H) SerDes lanes excluded from use. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t ser_lane_rev : 1; /**< [ 6: 6](RO/H) SerDes lane reversal has been detected. */
+ uint64_t ser_rxpol_auto : 1; /**< [ 5: 5](R/W) SerDes lane receive polarity auto detection mode. */
+ uint64_t ser_rxpol : 1; /**< [ 4: 4](R/W) SerDes lane receive polarity:
+ 0 = RX without inversion.
+ 1 = RX with inversion. */
+ uint64_t ser_txpol : 1; /**< [ 3: 3](R/W) SerDes lane transmit polarity:
+ 0 = TX without inversion.
+ 1 = TX with inversion. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t ser_local : 1; /**< [ 0: 0](R/W/H) Auto initialization may set OCX_LNK0_CFG[QLM_SELECT\<2\>] = 1 only if
+ OCX_QLM2_CFG[SER_LOCAL] = 0.
+ Auto initialization may set OCX_LNK1_CFG[QLM_SELECT\<2\>] = 1 only if
+ OCX_QLM2_CFG[SER_LOCAL] = 1.
+ Auto initialization may set OCX_LNK1_CFG[QLM_SELECT\<3\>] = 1 only if
+ OCX_QLM3_CFG[SER_LOCAL] = 1.
+ Auto initialization may set OCX_LNK2_CFG[QLM_SELECT\<3\>] = 1 only if
+ OCX_QLM3_CFG[SER_LOCAL] = 0.
+
+ QLM0/1 can only participate in LNK0; therefore
+ OCX_QLM0/1_CFG[SER_LOCAL] has no effect.
+ QLM4/5 can only participate in LNK2; therefore
+ OCX_QLM4/5_CFG[SER_LOCAL] has no effect.
+
+ During a cold reset, initialized as follows:
+ _ OCX_QLM2_CFG[SER_LOCAL] = pi_oci2_link1.
+ _ OCX_QLM3_CFG[SER_LOCAL] = pi_oci3_link1.
+
+ The combo of pi_oci2_link1=1 and pi_oci3_link1=0 is illegal.
+
+ The combo of OCX_QLM2_CFG[SER_LOCAL]=1 and OCX_QLM3_CFG[SER_LOCAL] = 0 is illegal. */
+#else /* Word 0 - Little Endian */
+ uint64_t ser_local : 1; /**< [ 0: 0](R/W/H) Auto initialization may set OCX_LNK0_CFG[QLM_SELECT\<2\>] = 1 only if
+ OCX_QLM2_CFG[SER_LOCAL] = 0.
+ Auto initialization may set OCX_LNK1_CFG[QLM_SELECT\<2\>] = 1 only if
+ OCX_QLM2_CFG[SER_LOCAL] = 1.
+ Auto initialization may set OCX_LNK1_CFG[QLM_SELECT\<3\>] = 1 only if
+ OCX_QLM3_CFG[SER_LOCAL] = 1.
+ Auto initialization may set OCX_LNK2_CFG[QLM_SELECT\<3\>] = 1 only if
+ OCX_QLM3_CFG[SER_LOCAL] = 0.
+
+ QLM0/1 can only participate in LNK0; therefore
+ OCX_QLM0/1_CFG[SER_LOCAL] has no effect.
+ QLM4/5 can only participate in LNK2; therefore
+ OCX_QLM4/5_CFG[SER_LOCAL] has no effect.
+
+ During a cold reset, initialized as follows:
+ _ OCX_QLM2_CFG[SER_LOCAL] = pi_oci2_link1.
+ _ OCX_QLM3_CFG[SER_LOCAL] = pi_oci3_link1.
+
+ The combo of pi_oci2_link1=1 and pi_oci3_link1=0 is illegal.
+
+ The combo of OCX_QLM2_CFG[SER_LOCAL]=1 and OCX_QLM3_CFG[SER_LOCAL] = 0 is illegal. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t ser_txpol : 1; /**< [ 3: 3](R/W) SerDes lane transmit polarity:
+ 0 = TX without inversion.
+ 1 = TX with inversion. */
+ uint64_t ser_rxpol : 1; /**< [ 4: 4](R/W) SerDes lane receive polarity:
+ 0 = RX without inversion.
+ 1 = RX with inversion. */
+ uint64_t ser_rxpol_auto : 1; /**< [ 5: 5](R/W) SerDes lane receive polarity auto detection mode. */
+ uint64_t ser_lane_rev : 1; /**< [ 6: 6](RO/H) SerDes lane reversal has been detected. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t ser_lane_bad : 4; /**< [ 19: 16](R/W/H) SerDes lanes excluded from use. */
+ uint64_t ser_lane_ready : 4; /**< [ 23: 20](R/W/H) SerDes lanes that are ready for bundling into the link. */
+ uint64_t trn_ena : 1; /**< [ 24: 24](R/W/H) Link training enable. Link training is performed during auto link bring up. Initialized to
+ 1 during cold reset when OCI_SPD\<3:0\> pins indicate speed \>= 5 GBAUD. Otherwise,
+ initialized to 0 during a cold reset. This field is not affected by soft or warm reset. */
+ uint64_t timer_dis : 1; /**< [ 25: 25](R/W/H) Disable bad lane timer. A timer counts core clocks (RCLKs) when any enabled lane is not
+ ready, i.e. not in the scrambler sync state. If this timer expires before all enabled
+ lanes can be made ready, then any lane which is not ready is disabled via
+ OCX_QLM(0..5)_CFG[SER_LANE_BAD]. This field is not affected by soft or warm reset. */
+ uint64_t trn_rxeq_only : 1; /**< [ 26: 26](R/W/H) Shortened training sequence. Initialized to 1 during cold reset when OCI_SPD\<3:0\> pins
+ indicate 5 GBAUD \<=speed \< 8 GBAUD. Otherwise, initialized to 0 during a cold reset. This
+ field is not affected by soft or warm reset. For diagnostic use only. */
+ uint64_t reserved_27_30 : 4;
+ uint64_t crd_dis : 1; /**< [ 31: 31](R/W) For diagnostic use only. */
+ uint64_t ser_limit : 10; /**< [ 41: 32](RAZ) Reserved. */
+ uint64_t reserved_42_59 : 18;
+ uint64_t ser_low : 4; /**< [ 63: 60](R/W/H) Reduce latency by limiting the amount of data in flight for each SerDes. Writing to 0
+ causes hardware to determine a typically optimal value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_qlmx_cfg_s cn; */
+};
+typedef union bdk_ocx_qlmx_cfg bdk_ocx_qlmx_cfg_t;
+
+static inline uint64_t BDK_OCX_QLMX_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_QLMX_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e01100f800ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("OCX_QLMX_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_QLMX_CFG(a) bdk_ocx_qlmx_cfg_t
+#define bustype_BDK_OCX_QLMX_CFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_QLMX_CFG(a) "OCX_QLMX_CFG"
+#define device_bar_BDK_OCX_QLMX_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_QLMX_CFG(a) (a)
+#define arguments_BDK_OCX_QLMX_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_align
+ *
+ * OCX Receive Link Align Registers
+ */
+union bdk_ocx_rlkx_align
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_align_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bad_cnt : 32; /**< [ 63: 32](R/W/H) Number of alignment sequences received in error (i.e. those that violate the current
+ alignment). Count saturates at max value. */
+ uint64_t good_cnt : 32; /**< [ 31: 0](R/W/H) Number of alignment sequences received (i.e. those that do not violate the current
+ alignment). Count saturates at max value. */
+#else /* Word 0 - Little Endian */
+ uint64_t good_cnt : 32; /**< [ 31: 0](R/W/H) Number of alignment sequences received (i.e. those that do not violate the current
+ alignment). Count saturates at max value. */
+ uint64_t bad_cnt : 32; /**< [ 63: 32](R/W/H) Number of alignment sequences received in error (i.e. those that violate the current
+ alignment). Count saturates at max value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_align_s cn; */
+};
+typedef union bdk_ocx_rlkx_align bdk_ocx_rlkx_align_t;
+
+static inline uint64_t BDK_OCX_RLKX_ALIGN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_ALIGN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011018060ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_ALIGN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_ALIGN(a) bdk_ocx_rlkx_align_t
+#define bustype_BDK_OCX_RLKX_ALIGN(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_ALIGN(a) "OCX_RLKX_ALIGN"
+#define device_bar_BDK_OCX_RLKX_ALIGN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_ALIGN(a) (a)
+#define arguments_BDK_OCX_RLKX_ALIGN(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_blk_err
+ *
+ * OCX Receive Link Block Error Registers
+ */
+union bdk_ocx_rlkx_blk_err
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_blk_err_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t limit : 16; /**< [ 31: 16](R/W) Contains the number of blocks received with errors before the
+ OCX_COM_LINK()_INT[BLK_ERR] interrupt is generated. */
+ uint64_t count : 16; /**< [ 15: 0](R/W) Shows the number of blocks received with one or more errors detected. Multiple
+ errors may be detected as the link starts up. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 16; /**< [ 15: 0](R/W) Shows the number of blocks received with one or more errors detected. Multiple
+ errors may be detected as the link starts up. */
+ uint64_t limit : 16; /**< [ 31: 16](R/W) Contains the number of blocks received with errors before the
+ OCX_COM_LINK()_INT[BLK_ERR] interrupt is generated. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_blk_err_s cn; */
+};
+typedef union bdk_ocx_rlkx_blk_err bdk_ocx_rlkx_blk_err_t;
+
+static inline uint64_t BDK_OCX_RLKX_BLK_ERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_BLK_ERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011018050ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_BLK_ERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_BLK_ERR(a) bdk_ocx_rlkx_blk_err_t
+#define bustype_BDK_OCX_RLKX_BLK_ERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_BLK_ERR(a) "OCX_RLKX_BLK_ERR"
+#define device_bar_BDK_OCX_RLKX_BLK_ERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_BLK_ERR(a) (a)
+#define arguments_BDK_OCX_RLKX_BLK_ERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_ecc_ctl
+ *
+ * OCX Receive ECC Control Registers
+ */
+union bdk_ocx_rlkx_ecc_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_ecc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t fifo1_flip : 2; /**< [ 35: 34](R/W) Test pattern to cause ECC errors in top RX FIFO syndromes. */
+ uint64_t fifo0_flip : 2; /**< [ 33: 32](R/W) Test pattern to cause ECC errors in bottom RX FIFO syndromes. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t fifo1_cdis : 1; /**< [ 1: 1](R/W) ECC correction disable for top RX FIFO RAM. */
+ uint64_t fifo0_cdis : 1; /**< [ 0: 0](R/W) ECC correction disable for bottom RX FIFO RAM. */
+#else /* Word 0 - Little Endian */
+ uint64_t fifo0_cdis : 1; /**< [ 0: 0](R/W) ECC correction disable for bottom RX FIFO RAM. */
+ uint64_t fifo1_cdis : 1; /**< [ 1: 1](R/W) ECC correction disable for top RX FIFO RAM. */
+ uint64_t reserved_2_31 : 30;
+ uint64_t fifo0_flip : 2; /**< [ 33: 32](R/W) Test pattern to cause ECC errors in bottom RX FIFO syndromes. */
+ uint64_t fifo1_flip : 2; /**< [ 35: 34](R/W) Test pattern to cause ECC errors in top RX FIFO syndromes. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_ecc_ctl_s cn; */
+};
+typedef union bdk_ocx_rlkx_ecc_ctl bdk_ocx_rlkx_ecc_ctl_t;
+
+static inline uint64_t BDK_OCX_RLKX_ECC_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_ECC_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011018018ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_ECC_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_ECC_CTL(a) bdk_ocx_rlkx_ecc_ctl_t
+#define bustype_BDK_OCX_RLKX_ECC_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_ECC_CTL(a) "OCX_RLKX_ECC_CTL"
+#define device_bar_BDK_OCX_RLKX_ECC_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_ECC_CTL(a) (a)
+#define arguments_BDK_OCX_RLKX_ECC_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_enables
+ *
+ * OCX Receive Link Enable Registers
+ */
+union bdk_ocx_rlkx_enables
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_enables_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t mcd : 1; /**< [ 4: 4](R/W) Master enable for all inbound MCD bits. This bit must be enabled by software. once any
+ trusted-mode validation has occurred and before any [MCD] traffic is generated. [MCD]
+ traffic
+ is typically controlled by the OCX_TLK(0..2)_MCD_CTL register. */
+ uint64_t m_req : 1; /**< [ 3: 3](R/W/H) Master enable for all inbound memory requests. This bit is typically set at reset but is
+ cleared when operating in trusted-mode and must be enabled by software. */
+ uint64_t io_req : 1; /**< [ 2: 2](R/W/H) Master enable for all inbound I/O requests. This bit is typically set at reset but is
+ cleared when operating in trusted-mode and must be enabled by software. */
+ uint64_t fwd : 1; /**< [ 1: 1](R/W/H) Master enable for all inbound forward commands. This bit is typically set at reset but is
+ cleared when operating in trusted-mode and must be enabled by software. */
+ uint64_t co_proc : 1; /**< [ 0: 0](R/W/H) Master enable for all inbound coprocessor commands. This bit is typically set at reset but
+ is cleared when operating in trusted-mode and must be enabled by software. */
+#else /* Word 0 - Little Endian */
+ uint64_t co_proc : 1; /**< [ 0: 0](R/W/H) Master enable for all inbound coprocessor commands. This bit is typically set at reset but
+ is cleared when operating in trusted-mode and must be enabled by software. */
+ uint64_t fwd : 1; /**< [ 1: 1](R/W/H) Master enable for all inbound forward commands. This bit is typically set at reset but is
+ cleared when operating in trusted-mode and must be enabled by software. */
+ uint64_t io_req : 1; /**< [ 2: 2](R/W/H) Master enable for all inbound I/O requests. This bit is typically set at reset but is
+ cleared when operating in trusted-mode and must be enabled by software. */
+ uint64_t m_req : 1; /**< [ 3: 3](R/W/H) Master enable for all inbound memory requests. This bit is typically set at reset but is
+ cleared when operating in trusted-mode and must be enabled by software. */
+ uint64_t mcd : 1; /**< [ 4: 4](R/W) Master enable for all inbound MCD bits. This bit must be enabled by software. once any
+ trusted-mode validation has occurred and before any [MCD] traffic is generated. [MCD]
+ traffic
+ is typically controlled by the OCX_TLK(0..2)_MCD_CTL register. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_enables_s cn; */
+};
+typedef union bdk_ocx_rlkx_enables bdk_ocx_rlkx_enables_t;
+
+static inline uint64_t BDK_OCX_RLKX_ENABLES(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_ENABLES(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011018000ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_ENABLES", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_ENABLES(a) bdk_ocx_rlkx_enables_t
+#define bustype_BDK_OCX_RLKX_ENABLES(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_ENABLES(a) "OCX_RLKX_ENABLES"
+#define device_bar_BDK_OCX_RLKX_ENABLES(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_ENABLES(a) (a)
+#define arguments_BDK_OCX_RLKX_ENABLES(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_fifo#_cnt
+ *
+ * OCX Receive Link FIFO Count Registers
+ */
+union bdk_ocx_rlkx_fifox_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_fifox_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t count : 16; /**< [ 15: 0](RO/H) RX FIFO count of 64-bit words to send to core. VC13 traffic is used immediately so the
+ FIFO count is always 0. See OCX_RLK(0..2)_LNK_DATA. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 16; /**< [ 15: 0](RO/H) RX FIFO count of 64-bit words to send to core. VC13 traffic is used immediately so the
+ FIFO count is always 0. See OCX_RLK(0..2)_LNK_DATA. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_fifox_cnt_s cn; */
+};
+typedef union bdk_ocx_rlkx_fifox_cnt bdk_ocx_rlkx_fifox_cnt_t;
+
+static inline uint64_t BDK_OCX_RLKX_FIFOX_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_FIFOX_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=13)))
+ return 0x87e011018100ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("OCX_RLKX_FIFOX_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_FIFOX_CNT(a,b) bdk_ocx_rlkx_fifox_cnt_t
+#define bustype_BDK_OCX_RLKX_FIFOX_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_FIFOX_CNT(a,b) "OCX_RLKX_FIFOX_CNT"
+#define device_bar_BDK_OCX_RLKX_FIFOX_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_FIFOX_CNT(a,b) (a)
+#define arguments_BDK_OCX_RLKX_FIFOX_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_key_high#
+ *
+ * OCX Receive Encryption Key Registers
+ */
+union bdk_ocx_rlkx_key_highx
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_key_highx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit key data \<127:64\>.
+ Reads as zero if OCX_RLK(0..2)_PROTECT[WO_KEY] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit key data \<127:64\>.
+ Reads as zero if OCX_RLK(0..2)_PROTECT[WO_KEY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_key_highx_s cn; */
+};
+typedef union bdk_ocx_rlkx_key_highx bdk_ocx_rlkx_key_highx_t;
+
+static inline uint64_t BDK_OCX_RLKX_KEY_HIGHX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_KEY_HIGHX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=2)))
+ return 0x87e011018208ll + 0x2000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_KEY_HIGHX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_KEY_HIGHX(a,b) bdk_ocx_rlkx_key_highx_t
+#define bustype_BDK_OCX_RLKX_KEY_HIGHX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_KEY_HIGHX(a,b) "OCX_RLKX_KEY_HIGHX"
+#define device_bar_BDK_OCX_RLKX_KEY_HIGHX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_KEY_HIGHX(a,b) (a)
+#define arguments_BDK_OCX_RLKX_KEY_HIGHX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_key_low#
+ *
+ * OCX Receive Encryption Key Registers
+ */
+union bdk_ocx_rlkx_key_lowx
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_key_lowx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Receive key data \<63:0\>.
+ Reads as zero if OCX_RLK(0..2)_PROTECT[WO_KEY] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Receive key data \<63:0\>.
+ Reads as zero if OCX_RLK(0..2)_PROTECT[WO_KEY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_key_lowx_s cn; */
+};
+typedef union bdk_ocx_rlkx_key_lowx bdk_ocx_rlkx_key_lowx_t;
+
+static inline uint64_t BDK_OCX_RLKX_KEY_LOWX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_KEY_LOWX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=2)))
+ return 0x87e011018200ll + 0x2000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_KEY_LOWX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_KEY_LOWX(a,b) bdk_ocx_rlkx_key_lowx_t
+#define bustype_BDK_OCX_RLKX_KEY_LOWX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_KEY_LOWX(a,b) "OCX_RLKX_KEY_LOWX"
+#define device_bar_BDK_OCX_RLKX_KEY_LOWX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_KEY_LOWX(a,b) (a)
+#define arguments_BDK_OCX_RLKX_KEY_LOWX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_lnk_data
+ *
+ * OCX Receive Link Data Registers
+ */
+union bdk_ocx_rlkx_lnk_data
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_lnk_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rcvd : 1; /**< [ 63: 63](RO/H) Reads state of OCX_COM_LINK(0..2)_INT[LNK_DATA]; set by hardware when a link data block is
+ received. */
+ uint64_t reserved_56_62 : 7;
+ uint64_t data : 56; /**< [ 55: 0](RO/H) Contents of this register are received from the OCX_TLK(0..2)_LNK_DATA register on the
+ link partner. Each time a new value is received the RX_LDAT interrupt is generated. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 56; /**< [ 55: 0](RO/H) Contents of this register are received from the OCX_TLK(0..2)_LNK_DATA register on the
+ link partner. Each time a new value is received the RX_LDAT interrupt is generated. */
+ uint64_t reserved_56_62 : 7;
+ uint64_t rcvd : 1; /**< [ 63: 63](RO/H) Reads state of OCX_COM_LINK(0..2)_INT[LNK_DATA]; set by hardware when a link data block is
+ received. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_lnk_data_s cn; */
+};
+typedef union bdk_ocx_rlkx_lnk_data bdk_ocx_rlkx_lnk_data_t;
+
+static inline uint64_t BDK_OCX_RLKX_LNK_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_LNK_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011018028ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_LNK_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_LNK_DATA(a) bdk_ocx_rlkx_lnk_data_t
+#define bustype_BDK_OCX_RLKX_LNK_DATA(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_LNK_DATA(a) "OCX_RLKX_LNK_DATA"
+#define device_bar_BDK_OCX_RLKX_LNK_DATA(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_LNK_DATA(a) (a)
+#define arguments_BDK_OCX_RLKX_LNK_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_mcd_ctl
+ *
+ * OCX Receive MCD Control Registers
+ * This debug register captures which new MCD bits have been received from the link partner. The
+ * MCD bits are received when the both the OCX_RLK(0..2)_ENABLES[MCD] bit is set and the MCD was
+ * not previously transmitted.
+ */
+union bdk_ocx_rlkx_mcd_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_mcd_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t clr : 3; /**< [ 2: 0](R/W1C/H) Shows the inbound MCD value being driven by link(0..2). Set by hardware
+ receiving an MCD packet and cleared by this register. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 3; /**< [ 2: 0](R/W1C/H) Shows the inbound MCD value being driven by link(0..2). Set by hardware
+ receiving an MCD packet and cleared by this register. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_mcd_ctl_s cn; */
+};
+typedef union bdk_ocx_rlkx_mcd_ctl bdk_ocx_rlkx_mcd_ctl_t;
+
+static inline uint64_t BDK_OCX_RLKX_MCD_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_MCD_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011018020ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_MCD_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_MCD_CTL(a) bdk_ocx_rlkx_mcd_ctl_t
+#define bustype_BDK_OCX_RLKX_MCD_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_MCD_CTL(a) "OCX_RLKX_MCD_CTL"
+#define device_bar_BDK_OCX_RLKX_MCD_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_MCD_CTL(a) (a)
+#define arguments_BDK_OCX_RLKX_MCD_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_protect
+ *
+ * OCX Receive Data Protection Control Registers
+ */
+union bdk_ocx_rlkx_protect
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_protect_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t wo_key : 1; /**< [ 7: 7](R/W1S) Reserved. Setting this bit blocks read access to OCX_RLK()_KEY_LOW,
+ OCX_RLK()_KEY_HIGH, OCX_RLK()_SALT_LOW and OCX_RLK()_SALT_HIGH registers. */
+ uint64_t reserved_1_6 : 6;
+ uint64_t enable : 1; /**< [ 0: 0](RO/H) Data encryption enabled. This bit is set and cleared by the transmitting link
+ partner. */
+#else /* Word 0 - Little Endian */
+ uint64_t enable : 1; /**< [ 0: 0](RO/H) Data encryption enabled. This bit is set and cleared by the transmitting link
+ partner. */
+ uint64_t reserved_1_6 : 6;
+ uint64_t wo_key : 1; /**< [ 7: 7](R/W1S) Reserved. Setting this bit blocks read access to OCX_RLK()_KEY_LOW,
+ OCX_RLK()_KEY_HIGH, OCX_RLK()_SALT_LOW and OCX_RLK()_SALT_HIGH registers. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_protect_s cn; */
+};
+typedef union bdk_ocx_rlkx_protect bdk_ocx_rlkx_protect_t;
+
+static inline uint64_t BDK_OCX_RLKX_PROTECT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_PROTECT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e0110182c0ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_PROTECT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_PROTECT(a) bdk_ocx_rlkx_protect_t
+#define bustype_BDK_OCX_RLKX_PROTECT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_PROTECT(a) "OCX_RLKX_PROTECT"
+#define device_bar_BDK_OCX_RLKX_PROTECT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_PROTECT(a) (a)
+#define arguments_BDK_OCX_RLKX_PROTECT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_salt_high
+ *
+ * OCX Receive Encryption Salt Registers
+ */
+union bdk_ocx_rlkx_salt_high
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_salt_high_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Receive salt data \<127:64\>.
+ Reads as zero if OCX_RLK(0..2)_PROTECT[WO_KEY] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Receive salt data \<127:64\>.
+ Reads as zero if OCX_RLK(0..2)_PROTECT[WO_KEY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_salt_high_s cn; */
+};
+typedef union bdk_ocx_rlkx_salt_high bdk_ocx_rlkx_salt_high_t;
+
+static inline uint64_t BDK_OCX_RLKX_SALT_HIGH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_SALT_HIGH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011018288ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_SALT_HIGH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_SALT_HIGH(a) bdk_ocx_rlkx_salt_high_t
+#define bustype_BDK_OCX_RLKX_SALT_HIGH(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_SALT_HIGH(a) "OCX_RLKX_SALT_HIGH"
+#define device_bar_BDK_OCX_RLKX_SALT_HIGH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_SALT_HIGH(a) (a)
+#define arguments_BDK_OCX_RLKX_SALT_HIGH(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_rlk#_salt_low
+ *
+ * OCX Receive Encryption Salt Registers
+ */
+union bdk_ocx_rlkx_salt_low
+{
+ uint64_t u;
+ struct bdk_ocx_rlkx_salt_low_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Receive salt data \<63:0\>.
+ Reads as zero if OCX_RLK(0..2)_PROTECT[WO_KEY] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Receive salt data \<63:0\>.
+ Reads as zero if OCX_RLK(0..2)_PROTECT[WO_KEY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_rlkx_salt_low_s cn; */
+};
+typedef union bdk_ocx_rlkx_salt_low bdk_ocx_rlkx_salt_low_t;
+
+static inline uint64_t BDK_OCX_RLKX_SALT_LOW(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_RLKX_SALT_LOW(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011018280ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_RLKX_SALT_LOW", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_RLKX_SALT_LOW(a) bdk_ocx_rlkx_salt_low_t
+#define bustype_BDK_OCX_RLKX_SALT_LOW(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_RLKX_SALT_LOW(a) "OCX_RLKX_SALT_LOW"
+#define device_bar_BDK_OCX_RLKX_SALT_LOW(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_RLKX_SALT_LOW(a) (a)
+#define arguments_BDK_OCX_RLKX_SALT_LOW(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_strap
+ *
+ * OCX Strap Register
+ * This register provide read-only access to OCI straps.
+ */
+union bdk_ocx_strap
+{
+ uint64_t u;
+ struct bdk_ocx_strap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t oci3_lnk1 : 1; /**< [ 25: 25](RO) OCI3_LNK1 strap. */
+ uint64_t oci2_lnk1 : 1; /**< [ 24: 24](RO) OCI2_LNK1 strap. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t oci_fixed_node : 1; /**< [ 16: 16](RO) OCI_FIXED_NODE strap. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t oci_node_id : 2; /**< [ 9: 8](RO) OCI_NODE_ID\<1:0\> straps. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t oci_spd : 4; /**< [ 3: 0](RO) OCI_SPD\<3:0\> straps. */
+#else /* Word 0 - Little Endian */
+ uint64_t oci_spd : 4; /**< [ 3: 0](RO) OCI_SPD\<3:0\> straps. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t oci_node_id : 2; /**< [ 9: 8](RO) OCI_NODE_ID\<1:0\> straps. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t oci_fixed_node : 1; /**< [ 16: 16](RO) OCI_FIXED_NODE strap. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t oci2_lnk1 : 1; /**< [ 24: 24](RO) OCI2_LNK1 strap. */
+ uint64_t oci3_lnk1 : 1; /**< [ 25: 25](RO) OCI3_LNK1 strap. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_strap_s cn; */
+};
+typedef union bdk_ocx_strap bdk_ocx_strap_t;
+
+#define BDK_OCX_STRAP BDK_OCX_STRAP_FUNC()
+static inline uint64_t BDK_OCX_STRAP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_STRAP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x87e01100ff08ll;
+ __bdk_csr_fatal("OCX_STRAP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_STRAP bdk_ocx_strap_t
+#define bustype_BDK_OCX_STRAP BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_STRAP "OCX_STRAP"
+#define device_bar_BDK_OCX_STRAP 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_STRAP 0
+#define arguments_BDK_OCX_STRAP -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_bist_status
+ *
+ * OCX Link REPLAY Memories and TX FIFOs BIST Status Register
+ * Contains status from last memory BIST for all TX FIFO memories and REPLAY memories in this
+ * link. RX FIFO status can be found in OCX_COM_BIST_STATUS.
+ */
+union bdk_ocx_tlkx_bist_status
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t status : 16; /**< [ 15: 0](RO/H) \<15:14\> = REPLAY Memories BIST Status \<1:0\>.
+ \<13:12\> = MOC TX_FIFO BIST Status \<1:0\>.
+ \<11:0\> = TX_FIFO\<11:0\> by Link VC number. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 16; /**< [ 15: 0](RO/H) \<15:14\> = REPLAY Memories BIST Status \<1:0\>.
+ \<13:12\> = MOC TX_FIFO BIST Status \<1:0\>.
+ \<11:0\> = TX_FIFO\<11:0\> by Link VC number. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_bist_status_s cn; */
+};
+typedef union bdk_ocx_tlkx_bist_status bdk_ocx_tlkx_bist_status_t;
+
+static inline uint64_t BDK_OCX_TLKX_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010008ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_BIST_STATUS(a) bdk_ocx_tlkx_bist_status_t
+#define bustype_BDK_OCX_TLKX_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_BIST_STATUS(a) "OCX_TLKX_BIST_STATUS"
+#define device_bar_BDK_OCX_TLKX_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_BIST_STATUS(a) (a)
+#define arguments_BDK_OCX_TLKX_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_byp_ctl
+ *
+ * OCX Transmit FIFO Bypass Control Registers
+ * This register is for diagnostic use.
+ */
+union bdk_ocx_tlkx_byp_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_byp_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t vc_dis : 11; /**< [ 11: 1](R/W) VC bypass disable. When set, the corresponding VC is restricted from using
+ the low latency TX FIFO bypass logic. This logic is typically disabled for
+ VC0 only. For diagnostic use only. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t vc_dis : 11; /**< [ 11: 1](R/W) VC bypass disable. When set, the corresponding VC is restricted from using
+ the low latency TX FIFO bypass logic. This logic is typically disabled for
+ VC0 only. For diagnostic use only. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_byp_ctl_s cn; */
+};
+typedef union bdk_ocx_tlkx_byp_ctl bdk_ocx_tlkx_byp_ctl_t;
+
+static inline uint64_t BDK_OCX_TLKX_BYP_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_BYP_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=2))
+ return 0x87e011010030ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_BYP_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_BYP_CTL(a) bdk_ocx_tlkx_byp_ctl_t
+#define bustype_BDK_OCX_TLKX_BYP_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_BYP_CTL(a) "OCX_TLKX_BYP_CTL"
+#define device_bar_BDK_OCX_TLKX_BYP_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_BYP_CTL(a) (a)
+#define arguments_BDK_OCX_TLKX_BYP_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_ecc_ctl
+ *
+ * OCX Transmit Link ECC Control Registers
+ */
+union bdk_ocx_tlkx_ecc_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_ecc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t rply1_flip : 2; /**< [ 37: 36](R/W) Test pattern to cause ECC errors in RPLY1 RAM. */
+ uint64_t rply0_flip : 2; /**< [ 35: 34](R/W) Test pattern to cause ECC errors in RPLY0 RAM. */
+ uint64_t fifo_flip : 2; /**< [ 33: 32](R/W) Test pattern to cause ECC errors in TX FIFO RAM. */
+ uint64_t reserved_3_31 : 29;
+ uint64_t rply1_cdis : 1; /**< [ 2: 2](R/W) ECC correction disable for replay top memories. */
+ uint64_t rply0_cdis : 1; /**< [ 1: 1](R/W) ECC correction disable for replay bottom memories. */
+ uint64_t fifo_cdis : 1; /**< [ 0: 0](R/W) ECC correction disable for TX FIFO memories. */
+#else /* Word 0 - Little Endian */
+ uint64_t fifo_cdis : 1; /**< [ 0: 0](R/W) ECC correction disable for TX FIFO memories. */
+ uint64_t rply0_cdis : 1; /**< [ 1: 1](R/W) ECC correction disable for replay bottom memories. */
+ uint64_t rply1_cdis : 1; /**< [ 2: 2](R/W) ECC correction disable for replay top memories. */
+ uint64_t reserved_3_31 : 29;
+ uint64_t fifo_flip : 2; /**< [ 33: 32](R/W) Test pattern to cause ECC errors in TX FIFO RAM. */
+ uint64_t rply0_flip : 2; /**< [ 35: 34](R/W) Test pattern to cause ECC errors in RPLY0 RAM. */
+ uint64_t rply1_flip : 2; /**< [ 37: 36](R/W) Test pattern to cause ECC errors in RPLY1 RAM. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_ecc_ctl_s cn; */
+};
+typedef union bdk_ocx_tlkx_ecc_ctl bdk_ocx_tlkx_ecc_ctl_t;
+
+static inline uint64_t BDK_OCX_TLKX_ECC_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_ECC_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010018ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_ECC_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_ECC_CTL(a) bdk_ocx_tlkx_ecc_ctl_t
+#define bustype_BDK_OCX_TLKX_ECC_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_ECC_CTL(a) "OCX_TLKX_ECC_CTL"
+#define device_bar_BDK_OCX_TLKX_ECC_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_ECC_CTL(a) (a)
+#define arguments_BDK_OCX_TLKX_ECC_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_fifo#_cnt
+ *
+ * OCX Transmit Link FIFO Count Registers
+ */
+union bdk_ocx_tlkx_fifox_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_fifox_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t count : 16; /**< [ 15: 0](RO/H) TX FIFO count of bus cycles to send. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 16; /**< [ 15: 0](RO/H) TX FIFO count of bus cycles to send. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_fifox_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_fifox_cnt bdk_ocx_tlkx_fifox_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_FIFOX_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_FIFOX_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=13)))
+ return 0x87e011010100ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("OCX_TLKX_FIFOX_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_FIFOX_CNT(a,b) bdk_ocx_tlkx_fifox_cnt_t
+#define bustype_BDK_OCX_TLKX_FIFOX_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_FIFOX_CNT(a,b) "OCX_TLKX_FIFOX_CNT"
+#define device_bar_BDK_OCX_TLKX_FIFOX_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_FIFOX_CNT(a,b) (a)
+#define arguments_BDK_OCX_TLKX_FIFOX_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_key_high#
+ *
+ * OCX Transmit Encryption Key Registers
+ */
+union bdk_ocx_tlkx_key_highx
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_key_highx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit encryption key \<127:64\>.
+ Reads as zero if OCX_TLK(0..2)_PROTECT[WO_KEY] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit encryption key \<127:64\>.
+ Reads as zero if OCX_TLK(0..2)_PROTECT[WO_KEY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_key_highx_s cn; */
+};
+typedef union bdk_ocx_tlkx_key_highx bdk_ocx_tlkx_key_highx_t;
+
+static inline uint64_t BDK_OCX_TLKX_KEY_HIGHX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_KEY_HIGHX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=2)))
+ return 0x87e011010708ll + 0x2000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_KEY_HIGHX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_KEY_HIGHX(a,b) bdk_ocx_tlkx_key_highx_t
+#define bustype_BDK_OCX_TLKX_KEY_HIGHX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_KEY_HIGHX(a,b) "OCX_TLKX_KEY_HIGHX"
+#define device_bar_BDK_OCX_TLKX_KEY_HIGHX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_KEY_HIGHX(a,b) (a)
+#define arguments_BDK_OCX_TLKX_KEY_HIGHX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_key_low#
+ *
+ * OCX Transmit Encryption Key Registers
+ */
+union bdk_ocx_tlkx_key_lowx
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_key_lowx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit encryption key \<63:0\>.
+ Reads as zero if OCX_TLK(0..2)_PROTECT[WO_KEY] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit encryption key \<63:0\>.
+ Reads as zero if OCX_TLK(0..2)_PROTECT[WO_KEY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_key_lowx_s cn; */
+};
+typedef union bdk_ocx_tlkx_key_lowx bdk_ocx_tlkx_key_lowx_t;
+
+static inline uint64_t BDK_OCX_TLKX_KEY_LOWX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_KEY_LOWX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=2)))
+ return 0x87e011010700ll + 0x2000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_KEY_LOWX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_KEY_LOWX(a,b) bdk_ocx_tlkx_key_lowx_t
+#define bustype_BDK_OCX_TLKX_KEY_LOWX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_KEY_LOWX(a,b) "OCX_TLKX_KEY_LOWX"
+#define device_bar_BDK_OCX_TLKX_KEY_LOWX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_KEY_LOWX(a,b) (a)
+#define arguments_BDK_OCX_TLKX_KEY_LOWX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_lnk_data
+ *
+ * OCX Transmit Link Data Registers
+ */
+union bdk_ocx_tlkx_lnk_data
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_lnk_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t data : 56; /**< [ 55: 0](R/W) Writes to this register transfer the contents to the OCX_RLK(0..2)_LNK_DATA register on
+ the receiving link. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 56; /**< [ 55: 0](R/W) Writes to this register transfer the contents to the OCX_RLK(0..2)_LNK_DATA register on
+ the receiving link. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_lnk_data_s cn; */
+};
+typedef union bdk_ocx_tlkx_lnk_data bdk_ocx_tlkx_lnk_data_t;
+
+static inline uint64_t BDK_OCX_TLKX_LNK_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_LNK_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010028ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_LNK_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_LNK_DATA(a) bdk_ocx_tlkx_lnk_data_t
+#define bustype_BDK_OCX_TLKX_LNK_DATA(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_LNK_DATA(a) "OCX_TLKX_LNK_DATA"
+#define device_bar_BDK_OCX_TLKX_LNK_DATA(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_LNK_DATA(a) (a)
+#define arguments_BDK_OCX_TLKX_LNK_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_lnk_vc#_cnt
+ *
+ * OCX Transmit Link VC Credits Registers
+ */
+union bdk_ocx_tlkx_lnk_vcx_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_lnk_vcx_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t count : 16; /**< [ 15: 0](RO/H) Link VC credits available for use. VC13 always reads 1 since credits are not required. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 16; /**< [ 15: 0](RO/H) Link VC credits available for use. VC13 always reads 1 since credits are not required. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_lnk_vcx_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_lnk_vcx_cnt bdk_ocx_tlkx_lnk_vcx_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_LNK_VCX_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_LNK_VCX_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=13)))
+ return 0x87e011010200ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("OCX_TLKX_LNK_VCX_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_LNK_VCX_CNT(a,b) bdk_ocx_tlkx_lnk_vcx_cnt_t
+#define bustype_BDK_OCX_TLKX_LNK_VCX_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_LNK_VCX_CNT(a,b) "OCX_TLKX_LNK_VCX_CNT"
+#define device_bar_BDK_OCX_TLKX_LNK_VCX_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_LNK_VCX_CNT(a,b) (a)
+#define arguments_BDK_OCX_TLKX_LNK_VCX_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_mcd_ctl
+ *
+ * OCX Transmit Link MCD Control Registers
+ * This register controls which MCD bits are transported via the link. For proper operation
+ * only one link must be enabled in both directions between each pair of link partners.
+ *
+ * Internal:
+ * If N chips are connected over OCX, N-1 links should have MCD enabled.
+ * A single "central" chip should connect all MCD buses and have a single MCD enabled link
+ * to each of the other chips. No MCD enabled links should connect between chips that don't
+ * include the "central" chip.
+ */
+union bdk_ocx_tlkx_mcd_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_mcd_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t tx_enb : 3; /**< [ 2: 0](R/W) Transmission enable signals for MCD bits \<2:0\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_enb : 3; /**< [ 2: 0](R/W) Transmission enable signals for MCD bits \<2:0\>. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_mcd_ctl_s cn; */
+};
+typedef union bdk_ocx_tlkx_mcd_ctl bdk_ocx_tlkx_mcd_ctl_t;
+
+static inline uint64_t BDK_OCX_TLKX_MCD_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_MCD_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010020ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_MCD_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_MCD_CTL(a) bdk_ocx_tlkx_mcd_ctl_t
+#define bustype_BDK_OCX_TLKX_MCD_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_MCD_CTL(a) "OCX_TLKX_MCD_CTL"
+#define device_bar_BDK_OCX_TLKX_MCD_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_MCD_CTL(a) (a)
+#define arguments_BDK_OCX_TLKX_MCD_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_protect
+ *
+ * OCX Transmit Data Protection Control Registers
+ */
+union bdk_ocx_tlkx_protect
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_protect_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t wo_key : 1; /**< [ 7: 7](R/W1S) Setting this bit blocks read access to the OCX_TLK(0..2)_KEY and
+ OCX_TLK(0..2)_SALT registers. Once set this bit cannot be cleared until reset. */
+ uint64_t reserved_3_6 : 4;
+ uint64_t busy : 1; /**< [ 2: 2](RO/H) When set, LOAD and/or BUSY signals are being transmitted to the link
+ partner. Hold off any updates to the OCX_TLK()_KEY_LOW, OCX_TLK()_KEY_HIGH,
+ OCX_TLK()_SALT_LOW, OCX_TLK()_SALT_HIGH and OCX_TLK()_PROTECT registers while
+ this bit is set. */
+ uint64_t load : 1; /**< [ 1: 1](WO) Seting this bit loads the current set of keys written to the
+ OCX_TLK()_KEY_LOW, OCX_TLK()_KEY_HIGH, OCX_TLK()_SALT_LOW, OCX_TLK()_SALT_HIGH
+ and forces the receive side of the link parter to do likewise. */
+ uint64_t enable : 1; /**< [ 0: 0](R/W) Enable data encryption. When set this bit enables encryption on the
+ transmitter and the receiving link partner.
+
+ Internal:
+ Encryption is non-functional on pass 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t enable : 1; /**< [ 0: 0](R/W) Enable data encryption. When set this bit enables encryption on the
+ transmitter and the receiving link partner.
+
+ Internal:
+ Encryption is non-functional on pass 1. */
+ uint64_t load : 1; /**< [ 1: 1](WO) Seting this bit loads the current set of keys written to the
+ OCX_TLK()_KEY_LOW, OCX_TLK()_KEY_HIGH, OCX_TLK()_SALT_LOW, OCX_TLK()_SALT_HIGH
+ and forces the receive side of the link parter to do likewise. */
+ uint64_t busy : 1; /**< [ 2: 2](RO/H) When set, LOAD and/or BUSY signals are being transmitted to the link
+ partner. Hold off any updates to the OCX_TLK()_KEY_LOW, OCX_TLK()_KEY_HIGH,
+ OCX_TLK()_SALT_LOW, OCX_TLK()_SALT_HIGH and OCX_TLK()_PROTECT registers while
+ this bit is set. */
+ uint64_t reserved_3_6 : 4;
+ uint64_t wo_key : 1; /**< [ 7: 7](R/W1S) Setting this bit blocks read access to the OCX_TLK(0..2)_KEY and
+ OCX_TLK(0..2)_SALT registers. Once set this bit cannot be cleared until reset. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_protect_s cn; */
+};
+typedef union bdk_ocx_tlkx_protect bdk_ocx_tlkx_protect_t;
+
+static inline uint64_t BDK_OCX_TLKX_PROTECT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_PROTECT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e0110107c0ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_PROTECT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_PROTECT(a) bdk_ocx_tlkx_protect_t
+#define bustype_BDK_OCX_TLKX_PROTECT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_PROTECT(a) "OCX_TLKX_PROTECT"
+#define device_bar_BDK_OCX_TLKX_PROTECT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_PROTECT(a) (a)
+#define arguments_BDK_OCX_TLKX_PROTECT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_rtn_vc#_cnt
+ *
+ * OCX Transmit Link Return VC Credits Registers
+ */
+union bdk_ocx_tlkx_rtn_vcx_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_rtn_vcx_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t count : 16; /**< [ 15: 0](RO/H) Link VC credits to return. VC13 always reads 0 since credits are never returned. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 16; /**< [ 15: 0](RO/H) Link VC credits to return. VC13 always reads 0 since credits are never returned. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_rtn_vcx_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_rtn_vcx_cnt bdk_ocx_tlkx_rtn_vcx_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_RTN_VCX_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_RTN_VCX_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=13)))
+ return 0x87e011010300ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("OCX_TLKX_RTN_VCX_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_RTN_VCX_CNT(a,b) bdk_ocx_tlkx_rtn_vcx_cnt_t
+#define bustype_BDK_OCX_TLKX_RTN_VCX_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_RTN_VCX_CNT(a,b) "OCX_TLKX_RTN_VCX_CNT"
+#define device_bar_BDK_OCX_TLKX_RTN_VCX_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_RTN_VCX_CNT(a,b) (a)
+#define arguments_BDK_OCX_TLKX_RTN_VCX_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_salt_high
+ *
+ * OCX Transmit Encryption Salt Registers
+ */
+union bdk_ocx_tlkx_salt_high
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_salt_high_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit salt data \<127:64\>.
+ Reads as zero if OCX_TLK(0..2)_PROTECT[WO_KEY] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit salt data \<127:64\>.
+ Reads as zero if OCX_TLK(0..2)_PROTECT[WO_KEY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_salt_high_s cn; */
+};
+typedef union bdk_ocx_tlkx_salt_high bdk_ocx_tlkx_salt_high_t;
+
+static inline uint64_t BDK_OCX_TLKX_SALT_HIGH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_SALT_HIGH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010788ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_SALT_HIGH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_SALT_HIGH(a) bdk_ocx_tlkx_salt_high_t
+#define bustype_BDK_OCX_TLKX_SALT_HIGH(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_SALT_HIGH(a) "OCX_TLKX_SALT_HIGH"
+#define device_bar_BDK_OCX_TLKX_SALT_HIGH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_SALT_HIGH(a) (a)
+#define arguments_BDK_OCX_TLKX_SALT_HIGH(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_salt_low
+ *
+ * OCX Transmit Encryption Salt Registers
+ */
+union bdk_ocx_tlkx_salt_low
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_salt_low_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit salt data \<63:0\>.
+ Reads as zero if OCX_TLK(0..2)_PROTECT[WO_KEY] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](WO) Transmit salt data \<63:0\>.
+ Reads as zero if OCX_TLK(0..2)_PROTECT[WO_KEY] = 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_salt_low_s cn; */
+};
+typedef union bdk_ocx_tlkx_salt_low bdk_ocx_tlkx_salt_low_t;
+
+static inline uint64_t BDK_OCX_TLKX_SALT_LOW(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_SALT_LOW(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010780ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_SALT_LOW", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_SALT_LOW(a) bdk_ocx_tlkx_salt_low_t
+#define bustype_BDK_OCX_TLKX_SALT_LOW(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_SALT_LOW(a) "OCX_TLKX_SALT_LOW"
+#define device_bar_BDK_OCX_TLKX_SALT_LOW(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_SALT_LOW(a) (a)
+#define arguments_BDK_OCX_TLKX_SALT_LOW(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_ctl
+ *
+ * OCX Transmit Link Statistics Control Registers
+ */
+union bdk_ocx_tlkx_stat_ctl
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t clear : 1; /**< [ 1: 1](WO) Setting this bit clears all OCX_TLK(a)_STAT_*CNT, OCX_TLK(a)_STAT_*CMD,
+ OCX_TLK(a)_STAT_*PKT and OCX_TLK(0..2)_STAT_*CON registers. */
+ uint64_t enable : 1; /**< [ 0: 0](R/W) This bit controls the capture of statistics to the OCX_TLK(a)_STAT_*CNT,
+ OCX_TLK(a)_STAT_*CMD, OCX_TLK(a)_STAT_*PKT and OCX_TLK(a)_STAT_*CON registers. When set,
+ traffic increments the corresponding registers. When cleared, traffic is ignored. */
+#else /* Word 0 - Little Endian */
+ uint64_t enable : 1; /**< [ 0: 0](R/W) This bit controls the capture of statistics to the OCX_TLK(a)_STAT_*CNT,
+ OCX_TLK(a)_STAT_*CMD, OCX_TLK(a)_STAT_*PKT and OCX_TLK(a)_STAT_*CON registers. When set,
+ traffic increments the corresponding registers. When cleared, traffic is ignored. */
+ uint64_t clear : 1; /**< [ 1: 1](WO) Setting this bit clears all OCX_TLK(a)_STAT_*CNT, OCX_TLK(a)_STAT_*CMD,
+ OCX_TLK(a)_STAT_*PKT and OCX_TLK(0..2)_STAT_*CON registers. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_ctl_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_ctl bdk_ocx_tlkx_stat_ctl_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010040ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STAT_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_CTL(a) bdk_ocx_tlkx_stat_ctl_t
+#define bustype_BDK_OCX_TLKX_STAT_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_CTL(a) "OCX_TLKX_STAT_CTL"
+#define device_bar_BDK_OCX_TLKX_STAT_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_CTL(a) (a)
+#define arguments_BDK_OCX_TLKX_STAT_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_data_cnt
+ *
+ * OCX Transmit Link Statistics Data Count Registers
+ */
+union bdk_ocx_tlkx_stat_data_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_data_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Indicates the number of data blocks transferred over the CCPI link while
+ OCX_TLK()_STAT_CTL[ENABLE] has been set. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Indicates the number of data blocks transferred over the CCPI link while
+ OCX_TLK()_STAT_CTL[ENABLE] has been set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_data_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_data_cnt bdk_ocx_tlkx_stat_data_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_DATA_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_DATA_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010408ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STAT_DATA_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_DATA_CNT(a) bdk_ocx_tlkx_stat_data_cnt_t
+#define bustype_BDK_OCX_TLKX_STAT_DATA_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_DATA_CNT(a) "OCX_TLKX_STAT_DATA_CNT"
+#define device_bar_BDK_OCX_TLKX_STAT_DATA_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_DATA_CNT(a) (a)
+#define arguments_BDK_OCX_TLKX_STAT_DATA_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_err_cnt
+ *
+ * OCX Transmit Link Statistics Error Count Registers
+ */
+union bdk_ocx_tlkx_stat_err_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_err_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of blocks received with an error over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of blocks received with an error over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_err_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_err_cnt bdk_ocx_tlkx_stat_err_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_ERR_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_ERR_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010420ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STAT_ERR_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_ERR_CNT(a) bdk_ocx_tlkx_stat_err_cnt_t
+#define bustype_BDK_OCX_TLKX_STAT_ERR_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_ERR_CNT(a) "OCX_TLKX_STAT_ERR_CNT"
+#define device_bar_BDK_OCX_TLKX_STAT_ERR_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_ERR_CNT(a) (a)
+#define arguments_BDK_OCX_TLKX_STAT_ERR_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_idle_cnt
+ *
+ * OCX Transmit Link Statistics Idle Count Registers
+ */
+union bdk_ocx_tlkx_stat_idle_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_idle_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of idle blocks transferred over the CCPI link while OCX_TLK(0..2)_STAT_CTL[ENABLE]
+ has been set. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of idle blocks transferred over the CCPI link while OCX_TLK(0..2)_STAT_CTL[ENABLE]
+ has been set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_idle_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_idle_cnt bdk_ocx_tlkx_stat_idle_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_IDLE_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_IDLE_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010400ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STAT_IDLE_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_IDLE_CNT(a) bdk_ocx_tlkx_stat_idle_cnt_t
+#define bustype_BDK_OCX_TLKX_STAT_IDLE_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_IDLE_CNT(a) "OCX_TLKX_STAT_IDLE_CNT"
+#define device_bar_BDK_OCX_TLKX_STAT_IDLE_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_IDLE_CNT(a) (a)
+#define arguments_BDK_OCX_TLKX_STAT_IDLE_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_mat#_cnt
+ *
+ * OCX Transmit Link Statistics Match Count Registers
+ */
+union bdk_ocx_tlkx_stat_matx_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_matx_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of packets that have matched OCX_TLK(a)_STAT_MATCH0 and have been transferred over
+ the CCPI link while OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of packets that have matched OCX_TLK(a)_STAT_MATCH0 and have been transferred over
+ the CCPI link while OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_matx_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_matx_cnt bdk_ocx_tlkx_stat_matx_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_MATX_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_MATX_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=3)))
+ return 0x87e011010440ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STAT_MATX_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_MATX_CNT(a,b) bdk_ocx_tlkx_stat_matx_cnt_t
+#define bustype_BDK_OCX_TLKX_STAT_MATX_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_MATX_CNT(a,b) "OCX_TLKX_STAT_MATX_CNT"
+#define device_bar_BDK_OCX_TLKX_STAT_MATX_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_MATX_CNT(a,b) (a)
+#define arguments_BDK_OCX_TLKX_STAT_MATX_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_match#
+ *
+ * OCX Transmit Link Statistics Match Registers
+ */
+union bdk_ocx_tlkx_stat_matchx
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_matchx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t mask : 9; /**< [ 24: 16](R/W) Setting these bits mask (really matches) the corresponding bit comparison for each packet. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t cmd : 5; /**< [ 8: 4](R/W) These bits are compared against the command for each packet sent over the link. If both
+ the unmasked [VC] and [CMD] bits match then OCX_TLK(0..2)_STAT_MAT(0..3)_CNT is
+ incremented. */
+ uint64_t vc : 4; /**< [ 3: 0](R/W) These bits are compared against the link VC number for each packet sent over the link.
+ If both the unmasked [VC] and [CMD] bits match, then OCX_TLK(0..2)_STAT_MAT(0..3)_CNT is
+ incremented. Only memory and I/O traffic are monitored. Matches are limited to
+ VC0 through VC11. */
+#else /* Word 0 - Little Endian */
+ uint64_t vc : 4; /**< [ 3: 0](R/W) These bits are compared against the link VC number for each packet sent over the link.
+ If both the unmasked [VC] and [CMD] bits match, then OCX_TLK(0..2)_STAT_MAT(0..3)_CNT is
+ incremented. Only memory and I/O traffic are monitored. Matches are limited to
+ VC0 through VC11. */
+ uint64_t cmd : 5; /**< [ 8: 4](R/W) These bits are compared against the command for each packet sent over the link. If both
+ the unmasked [VC] and [CMD] bits match then OCX_TLK(0..2)_STAT_MAT(0..3)_CNT is
+ incremented. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t mask : 9; /**< [ 24: 16](R/W) Setting these bits mask (really matches) the corresponding bit comparison for each packet. */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_matchx_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_matchx bdk_ocx_tlkx_stat_matchx_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_MATCHX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_MATCHX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=3)))
+ return 0x87e011010080ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STAT_MATCHX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_MATCHX(a,b) bdk_ocx_tlkx_stat_matchx_t
+#define bustype_BDK_OCX_TLKX_STAT_MATCHX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_MATCHX(a,b) "OCX_TLKX_STAT_MATCHX"
+#define device_bar_BDK_OCX_TLKX_STAT_MATCHX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_MATCHX(a,b) (a)
+#define arguments_BDK_OCX_TLKX_STAT_MATCHX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_retry_cnt
+ *
+ * OCX Transmit Link Statistics Retry Count Registers
+ */
+union bdk_ocx_tlkx_stat_retry_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_retry_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Specifies the number of data blocks repeated over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has
+ been set. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Specifies the number of data blocks repeated over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has
+ been set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_retry_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_retry_cnt bdk_ocx_tlkx_stat_retry_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_RETRY_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_RETRY_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010418ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STAT_RETRY_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_RETRY_CNT(a) bdk_ocx_tlkx_stat_retry_cnt_t
+#define bustype_BDK_OCX_TLKX_STAT_RETRY_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_RETRY_CNT(a) "OCX_TLKX_STAT_RETRY_CNT"
+#define device_bar_BDK_OCX_TLKX_STAT_RETRY_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_RETRY_CNT(a) (a)
+#define arguments_BDK_OCX_TLKX_STAT_RETRY_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_sync_cnt
+ *
+ * OCX Transmit Link Statistics Sync Count Registers
+ */
+union bdk_ocx_tlkx_stat_sync_cnt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_sync_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Indicates the number of sync (control) blocks transferred over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Indicates the number of sync (control) blocks transferred over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_sync_cnt_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_sync_cnt bdk_ocx_tlkx_stat_sync_cnt_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_SYNC_CNT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_SYNC_CNT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010410ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STAT_SYNC_CNT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_SYNC_CNT(a) bdk_ocx_tlkx_stat_sync_cnt_t
+#define bustype_BDK_OCX_TLKX_STAT_SYNC_CNT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_SYNC_CNT(a) "OCX_TLKX_STAT_SYNC_CNT"
+#define device_bar_BDK_OCX_TLKX_STAT_SYNC_CNT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_SYNC_CNT(a) (a)
+#define arguments_BDK_OCX_TLKX_STAT_SYNC_CNT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_vc#_cmd
+ *
+ * OCX Transmit Link Statistics VC Commands Count Registers
+ */
+union bdk_ocx_tlkx_stat_vcx_cmd
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_vcx_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of commands on this VC that have been transfered over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. For VCs 6 through 13 the number of commands
+ is equal to the number of packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of commands on this VC that have been transfered over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. For VCs 6 through 13 the number of commands
+ is equal to the number of packets. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_vcx_cmd_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_vcx_cmd bdk_ocx_tlkx_stat_vcx_cmd_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_VCX_CMD(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_VCX_CMD(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=5)))
+ return 0x87e011010480ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0x7);
+ __bdk_csr_fatal("OCX_TLKX_STAT_VCX_CMD", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_VCX_CMD(a,b) bdk_ocx_tlkx_stat_vcx_cmd_t
+#define bustype_BDK_OCX_TLKX_STAT_VCX_CMD(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_VCX_CMD(a,b) "OCX_TLKX_STAT_VCX_CMD"
+#define device_bar_BDK_OCX_TLKX_STAT_VCX_CMD(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_VCX_CMD(a,b) (a)
+#define arguments_BDK_OCX_TLKX_STAT_VCX_CMD(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_vc#_con
+ *
+ * OCX Transmit Link Statistics VC Conflict Count Registers
+ */
+union bdk_ocx_tlkx_stat_vcx_con
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_vcx_con_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of conflicts on this VC while OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. A
+ conflict is indicated when a VC has one or more packets to send and no link credits are
+ available. VC13 does not require credits so no conflicts are ever indicated (i.e. reads
+ 0). */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of conflicts on this VC while OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. A
+ conflict is indicated when a VC has one or more packets to send and no link credits are
+ available. VC13 does not require credits so no conflicts are ever indicated (i.e. reads
+ 0). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_vcx_con_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_vcx_con bdk_ocx_tlkx_stat_vcx_con_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_VCX_CON(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_VCX_CON(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=13)))
+ return 0x87e011010580ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("OCX_TLKX_STAT_VCX_CON", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_VCX_CON(a,b) bdk_ocx_tlkx_stat_vcx_con_t
+#define bustype_BDK_OCX_TLKX_STAT_VCX_CON(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_VCX_CON(a,b) "OCX_TLKX_STAT_VCX_CON"
+#define device_bar_BDK_OCX_TLKX_STAT_VCX_CON(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_VCX_CON(a,b) (a)
+#define arguments_BDK_OCX_TLKX_STAT_VCX_CON(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_stat_vc#_pkt
+ *
+ * OCX Transmit Link Statistics VC Packet Count Registers
+ */
+union bdk_ocx_tlkx_stat_vcx_pkt
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_stat_vcx_pkt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of packets on this VC that have been transferred over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W) Number of packets on this VC that have been transferred over the CCPI link while
+ OCX_TLK(0..2)_STAT_CTL[ENABLE] has been set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_stat_vcx_pkt_s cn; */
+};
+typedef union bdk_ocx_tlkx_stat_vcx_pkt bdk_ocx_tlkx_stat_vcx_pkt_t;
+
+static inline uint64_t BDK_OCX_TLKX_STAT_VCX_PKT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STAT_VCX_PKT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=13)))
+ return 0x87e011010500ll + 0x2000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("OCX_TLKX_STAT_VCX_PKT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STAT_VCX_PKT(a,b) bdk_ocx_tlkx_stat_vcx_pkt_t
+#define bustype_BDK_OCX_TLKX_STAT_VCX_PKT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STAT_VCX_PKT(a,b) "OCX_TLKX_STAT_VCX_PKT"
+#define device_bar_BDK_OCX_TLKX_STAT_VCX_PKT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STAT_VCX_PKT(a,b) (a)
+#define arguments_BDK_OCX_TLKX_STAT_VCX_PKT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ocx_tlk#_status
+ *
+ * OCX Transmit Link Status Registers
+ */
+union bdk_ocx_tlkx_status
+{
+ uint64_t u;
+ struct bdk_ocx_tlkx_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t rply_fptr : 8; /**< [ 55: 48](RO/H) Replay buffer last free pointer. */
+ uint64_t tx_seq : 8; /**< [ 47: 40](RO/H) Last block transmitted. */
+ uint64_t rx_seq : 8; /**< [ 39: 32](RO/H) Last block received. */
+ uint64_t reserved_23_31 : 9;
+ uint64_t ackcnt : 7; /**< [ 22: 16](RO/H) Indicates the number of ACKs waiting to be transmitted. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t drop : 1; /**< [ 8: 8](RO/H) Link is dropping all requests. */
+ uint64_t sm : 6; /**< [ 7: 2](RO/H) Block state machine:
+ Bit\<2\>: Req / Ack (Init or retry only).
+ Bit\<3\>: Init.
+ Bit\<4\>: Run.
+ Bit\<5\>: Retry.
+ Bit\<6\>: Replay.
+ Bit\<7\>: Replay Pending. */
+ uint64_t cnt : 2; /**< [ 1: 0](RO/H) Block subcount. Should always increment 0,1,2,3,0.. except during TX PHY stall. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 2; /**< [ 1: 0](RO/H) Block subcount. Should always increment 0,1,2,3,0.. except during TX PHY stall. */
+ uint64_t sm : 6; /**< [ 7: 2](RO/H) Block state machine:
+ Bit\<2\>: Req / Ack (Init or retry only).
+ Bit\<3\>: Init.
+ Bit\<4\>: Run.
+ Bit\<5\>: Retry.
+ Bit\<6\>: Replay.
+ Bit\<7\>: Replay Pending. */
+ uint64_t drop : 1; /**< [ 8: 8](RO/H) Link is dropping all requests. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t ackcnt : 7; /**< [ 22: 16](RO/H) Indicates the number of ACKs waiting to be transmitted. */
+ uint64_t reserved_23_31 : 9;
+ uint64_t rx_seq : 8; /**< [ 39: 32](RO/H) Last block received. */
+ uint64_t tx_seq : 8; /**< [ 47: 40](RO/H) Last block transmitted. */
+ uint64_t rply_fptr : 8; /**< [ 55: 48](RO/H) Replay buffer last free pointer. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_tlkx_status_s cn; */
+};
+typedef union bdk_ocx_tlkx_status bdk_ocx_tlkx_status_t;
+
+static inline uint64_t BDK_OCX_TLKX_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_TLKX_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e011010000ll + 0x2000ll * ((a) & 0x3);
+ __bdk_csr_fatal("OCX_TLKX_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_TLKX_STATUS(a) bdk_ocx_tlkx_status_t
+#define bustype_BDK_OCX_TLKX_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_TLKX_STATUS(a) "OCX_TLKX_STATUS"
+#define device_bar_BDK_OCX_TLKX_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_TLKX_STATUS(a) (a)
+#define arguments_BDK_OCX_TLKX_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ocx_win_cmd
+ *
+ * OCX Window Address Register
+ * For diagnostic use only. This register is typically written by hardware after accesses to the
+ * SLI_WIN_* registers. Contains the address, read size and write mask to used for the window
+ * operation. Write data should be written first and placed in the OCX_WIN_WR_DATA register.
+ * Writing this register starts the operation. A second write operation to this register while an
+ * operation
+ * is in progress will stall.
+ */
+union bdk_ocx_win_cmd
+{
+ uint64_t u;
+ struct bdk_ocx_win_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wr_mask : 8; /**< [ 63: 56](R/W) Mask for the data to be written. When a bit is 1, the corresponding byte will be written.
+ The values of this field must be contiguous and for 1, 2, 4, or 8 byte operations and
+ aligned to operation size. A value of 0 will produce unpredictable results. Field is
+ ignored during a read (LD_OP=1). */
+ uint64_t reserved_54_55 : 2;
+ uint64_t el : 2; /**< [ 53: 52](R/W) Execution level. This field is used to supply the execution level of the generated load
+ or store command. */
+ uint64_t nsecure : 1; /**< [ 51: 51](R/W) Nonsecure mode. Setting this bit causes the generated load or store command to be
+ considered nonsecure. */
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command sent with the read:
+ 0x0 = Load 1-bytes.
+ 0x1 = Load 2-bytes.
+ 0x2 = Load 4-bytes.
+ 0x3 = Load 8-bytes. */
+ uint64_t ld_op : 1; /**< [ 48: 48](R/W) Operation type:
+ 0 = Store.
+ 1 = Load operation. */
+ uint64_t addr : 48; /**< [ 47: 0](R/W) The address used in both the load and store operations:
+ \<47:46\> = Reserved.
+ \<45:44\> = CCPI_ID.
+ \<43:36\> = NCB_ID.
+ \<35:0\> = Address.
+
+ When \<43:36\> NCB_ID is RSL (0x7E) address field is defined as:
+ \<47:46\> = Reserved.
+ \<45:44\> = CCPI_ID.
+ \<43:36\> = 0x7E.
+ \<35:32\> = Reserved.
+ \<31:24\> = RSL_ID.
+ \<23:0\> = RSL register offset.
+
+ \<2:0\> are ignored in a store operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 48; /**< [ 47: 0](R/W) The address used in both the load and store operations:
+ \<47:46\> = Reserved.
+ \<45:44\> = CCPI_ID.
+ \<43:36\> = NCB_ID.
+ \<35:0\> = Address.
+
+ When \<43:36\> NCB_ID is RSL (0x7E) address field is defined as:
+ \<47:46\> = Reserved.
+ \<45:44\> = CCPI_ID.
+ \<43:36\> = 0x7E.
+ \<35:32\> = Reserved.
+ \<31:24\> = RSL_ID.
+ \<23:0\> = RSL register offset.
+
+ \<2:0\> are ignored in a store operation. */
+ uint64_t ld_op : 1; /**< [ 48: 48](R/W) Operation type:
+ 0 = Store.
+ 1 = Load operation. */
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command sent with the read:
+ 0x0 = Load 1-bytes.
+ 0x1 = Load 2-bytes.
+ 0x2 = Load 4-bytes.
+ 0x3 = Load 8-bytes. */
+ uint64_t nsecure : 1; /**< [ 51: 51](R/W) Nonsecure mode. Setting this bit causes the generated load or store command to be
+ considered nonsecure. */
+ uint64_t el : 2; /**< [ 53: 52](R/W) Execution level. This field is used to supply the execution level of the generated load
+ or store command. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t wr_mask : 8; /**< [ 63: 56](R/W) Mask for the data to be written. When a bit is 1, the corresponding byte will be written.
+ The values of this field must be contiguous and for 1, 2, 4, or 8 byte operations and
+ aligned to operation size. A value of 0 will produce unpredictable results. Field is
+ ignored during a read (LD_OP=1). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_win_cmd_s cn; */
+};
+typedef union bdk_ocx_win_cmd bdk_ocx_win_cmd_t;
+
+#define BDK_OCX_WIN_CMD BDK_OCX_WIN_CMD_FUNC()
+static inline uint64_t BDK_OCX_WIN_CMD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_WIN_CMD_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000048ll;
+ __bdk_csr_fatal("OCX_WIN_CMD", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_WIN_CMD bdk_ocx_win_cmd_t
+#define bustype_BDK_OCX_WIN_CMD BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_WIN_CMD "OCX_WIN_CMD"
+#define device_bar_BDK_OCX_WIN_CMD 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_WIN_CMD 0
+#define arguments_BDK_OCX_WIN_CMD -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_win_rd_data
+ *
+ * OCX Window Read Data Register
+ * For diagnostic use only. This register is the read response data associated with window
+ * command. Reads all-ones until response is received.
+ */
+union bdk_ocx_win_rd_data
+{
+ uint64_t u;
+ struct bdk_ocx_win_rd_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO/H) Read response data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](RO/H) Read response data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_win_rd_data_s cn; */
+};
+typedef union bdk_ocx_win_rd_data bdk_ocx_win_rd_data_t;
+
+#define BDK_OCX_WIN_RD_DATA BDK_OCX_WIN_RD_DATA_FUNC()
+static inline uint64_t BDK_OCX_WIN_RD_DATA_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_WIN_RD_DATA_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000050ll;
+ __bdk_csr_fatal("OCX_WIN_RD_DATA", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_WIN_RD_DATA bdk_ocx_win_rd_data_t
+#define bustype_BDK_OCX_WIN_RD_DATA BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_WIN_RD_DATA "OCX_WIN_RD_DATA"
+#define device_bar_BDK_OCX_WIN_RD_DATA 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_WIN_RD_DATA 0
+#define arguments_BDK_OCX_WIN_RD_DATA -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_win_timer
+ *
+ * OCX Window Timer Register
+ * Number of core clocks before untransmitted WIN request is dropped and interrupt is issued.
+ */
+union bdk_ocx_win_timer
+{
+ uint64_t u;
+ struct bdk_ocx_win_timer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t tout : 14; /**< [ 15: 2](R/W) Number of core clocks times four. */
+ uint64_t tout1 : 2; /**< [ 1: 0](RO) Reserved as all-ones. */
+#else /* Word 0 - Little Endian */
+ uint64_t tout1 : 2; /**< [ 1: 0](RO) Reserved as all-ones. */
+ uint64_t tout : 14; /**< [ 15: 2](R/W) Number of core clocks times four. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_win_timer_s cn; */
+};
+typedef union bdk_ocx_win_timer bdk_ocx_win_timer_t;
+
+#define BDK_OCX_WIN_TIMER BDK_OCX_WIN_TIMER_FUNC()
+static inline uint64_t BDK_OCX_WIN_TIMER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_WIN_TIMER_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000058ll;
+ __bdk_csr_fatal("OCX_WIN_TIMER", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_WIN_TIMER bdk_ocx_win_timer_t
+#define bustype_BDK_OCX_WIN_TIMER BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_WIN_TIMER "OCX_WIN_TIMER"
+#define device_bar_BDK_OCX_WIN_TIMER 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_WIN_TIMER 0
+#define arguments_BDK_OCX_WIN_TIMER -1,-1,-1,-1
+
+/**
+ * Register (RSL) ocx_win_wr_data
+ *
+ * OCX Window Write Data Register
+ * For diagnostic use only. This register is typically written by hardware after accesses to the
+ * SLI_WIN_WR_DATA register. Contains the data to write to the address located in OCX_WIN_CMD.
+ */
+union bdk_ocx_win_wr_data
+{
+ uint64_t u;
+ struct bdk_ocx_win_wr_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wr_data : 64; /**< [ 63: 0](R/W) The data to be written. */
+#else /* Word 0 - Little Endian */
+ uint64_t wr_data : 64; /**< [ 63: 0](R/W) The data to be written. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ocx_win_wr_data_s cn; */
+};
+typedef union bdk_ocx_win_wr_data bdk_ocx_win_wr_data_t;
+
+#define BDK_OCX_WIN_WR_DATA BDK_OCX_WIN_WR_DATA_FUNC()
+static inline uint64_t BDK_OCX_WIN_WR_DATA_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_OCX_WIN_WR_DATA_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e011000040ll;
+ __bdk_csr_fatal("OCX_WIN_WR_DATA", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_OCX_WIN_WR_DATA bdk_ocx_win_wr_data_t
+#define bustype_BDK_OCX_WIN_WR_DATA BDK_CSR_TYPE_RSL
+#define basename_BDK_OCX_WIN_WR_DATA "OCX_WIN_WR_DATA"
+#define device_bar_BDK_OCX_WIN_WR_DATA 0x0 /* PF_BAR0 */
+#define busnum_BDK_OCX_WIN_WR_DATA 0
+#define arguments_BDK_OCX_WIN_WR_DATA -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_OCX_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h
new file mode 100644
index 0000000000..0b3e20ca4b
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h
@@ -0,0 +1,4727 @@
+#ifndef __BDK_CSRS_PCCPF_H__
+#define __BDK_CSRS_PCCPF_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium PCCPF.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration pcc_dev_con_e
+ *
+ * PCC Device Connection Enumeration
+ * Enumerates where the device is connected in the topology. Software must rely on discovery and
+ * not use this enumeration as the values will vary by product, and the mnemonics are a super-set
+ * of the devices available. The value of the enumeration is formatted as defined by
+ * PCC_DEV_CON_S.
+ */
+#define BDK_PCC_DEV_CON_E_APX(a) (0x200 + (a))
+#define BDK_PCC_DEV_CON_E_AVS (0xf0)
+#define BDK_PCC_DEV_CON_E_BCH_CN8 (0x300)
+#define BDK_PCC_DEV_CON_E_BCH_CN9 (0x400)
+#define BDK_PCC_DEV_CON_E_BGXX(a) (0x180 + (a))
+#define BDK_PCC_DEV_CON_E_BTS (0x141)
+#define BDK_PCC_DEV_CON_E_CCS (0x109)
+#define BDK_PCC_DEV_CON_E_CCUX(a) (0x120 + (a))
+#define BDK_PCC_DEV_CON_E_CGXX(a) (0x188 + (a))
+#define BDK_PCC_DEV_CON_E_CPC (0xd0)
+#define BDK_PCC_DEV_CON_E_CPT0 (0x400)
+#define BDK_PCC_DEV_CON_E_CPT1 (0x500)
+#define BDK_PCC_DEV_CON_E_DAP (0x102)
+#define BDK_PCC_DEV_CON_E_DDF0 (0x10500)
+#define BDK_PCC_DEV_CON_E_DFA (0x400)
+#define BDK_PCC_DEV_CON_E_DPI0_CN8 (0xb00)
+#define BDK_PCC_DEV_CON_E_DPI0_CN9 (0xc00)
+#define BDK_PCC_DEV_CON_E_FPA_CN8 (0x900)
+#define BDK_PCC_DEV_CON_E_FPA_CN9 (0xa00)
+#define BDK_PCC_DEV_CON_E_FUS (0x103)
+#define BDK_PCC_DEV_CON_E_FUSF (0x104)
+#define BDK_PCC_DEV_CON_E_GIC_CN8 (0x18)
+#define BDK_PCC_DEV_CON_E_GIC_CN9 (0x20)
+#define BDK_PCC_DEV_CON_E_GPIO_CN8 (0x30)
+#define BDK_PCC_DEV_CON_E_GPIO_CN9 (0x78)
+#define BDK_PCC_DEV_CON_E_GSERX(a) (0x1e0 + (a))
+#define BDK_PCC_DEV_CON_E_GSERNX(a) (0x1f0 + (a))
+#define BDK_PCC_DEV_CON_E_GTI_CN8 (0x20)
+#define BDK_PCC_DEV_CON_E_GTI_CN9 (0x28)
+#define BDK_PCC_DEV_CON_E_IOBNX(a) (0x158 + (a))
+#define BDK_PCC_DEV_CON_E_KEY (0x10d)
+#define BDK_PCC_DEV_CON_E_L2C (0x109)
+#define BDK_PCC_DEV_CON_E_L2C_CBCX(a) (0x138 + (a))
+#define BDK_PCC_DEV_CON_E_L2C_MCIX(a) (0x13c + (a))
+#define BDK_PCC_DEV_CON_E_L2C_TADX(a) (0x130 + (a))
+#define BDK_PCC_DEV_CON_E_LBKX(a) (0x168 + (a))
+#define BDK_PCC_DEV_CON_E_LMCX(a) (0x150 + (a))
+#define BDK_PCC_DEV_CON_E_MCCX(a) (0x130 + (a))
+#define BDK_PCC_DEV_CON_E_MDC (0x140)
+#define BDK_PCC_DEV_CON_E_MIO_BOOT (0x10e)
+#define BDK_PCC_DEV_CON_E_MIO_EMM (0x10c)
+#define BDK_PCC_DEV_CON_E_MIO_FUS (0x103)
+#define BDK_PCC_DEV_CON_E_MIO_PTP (0x40)
+#define BDK_PCC_DEV_CON_E_MIO_TWSX(a) (0x148 + (a))
+#define BDK_PCC_DEV_CON_E_MPI (0x38)
+#define BDK_PCC_DEV_CON_E_MPIX(a) (0x30 + 8 * (a))
+#define BDK_PCC_DEV_CON_E_MRML (0x100)
+#define BDK_PCC_DEV_CON_E_NCSI (0x108)
+#define BDK_PCC_DEV_CON_E_NDF (0x58)
+#define BDK_PCC_DEV_CON_E_NIC_CN9 (0x10100)
+#define BDK_PCC_DEV_CON_E_NIC_CN81XX (0x500)
+#define BDK_PCC_DEV_CON_E_NIC_CN88XX (0x20100)
+#define BDK_PCC_DEV_CON_E_NIC_CN83XX (0x10100)
+#define BDK_PCC_DEV_CON_E_NICL (0x10100)
+#define BDK_PCC_DEV_CON_E_NIX0 (0x10600)
+#define BDK_PCC_DEV_CON_E_OCLAX_CN8(a) (0x160 + (a))
+#define BDK_PCC_DEV_CON_E_OCLAX_CN9(a) (0x190 + (a))
+#define BDK_PCC_DEV_CON_E_OCX (0x105)
+#define BDK_PCC_DEV_CON_E_PBUS (0x10f)
+#define BDK_PCC_DEV_CON_E_PCCBR_AP (0x10)
+#define BDK_PCC_DEV_CON_E_PCCBR_BCH (0x50)
+#define BDK_PCC_DEV_CON_E_PCCBR_CPT0 (0x60)
+#define BDK_PCC_DEV_CON_E_PCCBR_CPT1 (0x68)
+#define BDK_PCC_DEV_CON_E_PCCBR_DDF0 (0x100a0)
+#define BDK_PCC_DEV_CON_E_PCCBR_DFA (0xb0)
+#define BDK_PCC_DEV_CON_E_PCCBR_DPI0 (0xa0)
+#define BDK_PCC_DEV_CON_E_PCCBR_FPA (0x90)
+#define BDK_PCC_DEV_CON_E_PCCBR_MRML (8)
+#define BDK_PCC_DEV_CON_E_PCCBR_NIC_CN9 (0x10080)
+#define BDK_PCC_DEV_CON_E_PCCBR_NIC_CN81XX (0x78)
+#define BDK_PCC_DEV_CON_E_PCCBR_NIC_CN88XX (0x20010)
+#define BDK_PCC_DEV_CON_E_PCCBR_NIC_CN83XX (0x10080)
+#define BDK_PCC_DEV_CON_E_PCCBR_NICL (0x10080)
+#define BDK_PCC_DEV_CON_E_PCCBR_PKI (0x10088)
+#define BDK_PCC_DEV_CON_E_PCCBR_PKO (0x10090)
+#define BDK_PCC_DEV_CON_E_PCCBR_RAD_CN9 (0x70)
+#define BDK_PCC_DEV_CON_E_PCCBR_RAD_CN88XX (0xa0)
+#define BDK_PCC_DEV_CON_E_PCCBR_RAD_CN83XX (0x70)
+#define BDK_PCC_DEV_CON_E_PCCBR_RNM (0x48)
+#define BDK_PCC_DEV_CON_E_PCCBR_RVUX(a) (0x20000 + 8 * (a))
+#define BDK_PCC_DEV_CON_E_PCCBR_SSO (0x80)
+#define BDK_PCC_DEV_CON_E_PCCBR_SSOW (0x88)
+#define BDK_PCC_DEV_CON_E_PCCBR_TIM (0x98)
+#define BDK_PCC_DEV_CON_E_PCCBR_ZIP_CN9 (0x10098)
+#define BDK_PCC_DEV_CON_E_PCCBR_ZIP_CN88XX (0xa8)
+#define BDK_PCC_DEV_CON_E_PCCBR_ZIP_CN83XX (0x10098)
+#define BDK_PCC_DEV_CON_E_PCIERC0_CN9 (0x30000)
+#define BDK_PCC_DEV_CON_E_PCIERC0_CN81XX (0xc0)
+#define BDK_PCC_DEV_CON_E_PCIERC0_CN88XX (0x10080)
+#define BDK_PCC_DEV_CON_E_PCIERC0_CN83XX (0xc8)
+#define BDK_PCC_DEV_CON_E_PCIERC1_CN9 (0x40000)
+#define BDK_PCC_DEV_CON_E_PCIERC1_CN81XX (0xc8)
+#define BDK_PCC_DEV_CON_E_PCIERC1_CN88XX (0x10090)
+#define BDK_PCC_DEV_CON_E_PCIERC1_CN83XX (0xd0)
+#define BDK_PCC_DEV_CON_E_PCIERC2_CN9 (0x50000)
+#define BDK_PCC_DEV_CON_E_PCIERC2_CN81XX (0xd0)
+#define BDK_PCC_DEV_CON_E_PCIERC2_CN88XX (0x100a0)
+#define BDK_PCC_DEV_CON_E_PCIERC2_CN83XX (0xd8)
+#define BDK_PCC_DEV_CON_E_PCIERC3_CN9 (0x60000)
+#define BDK_PCC_DEV_CON_E_PCIERC3_CN88XX (0x30080)
+#define BDK_PCC_DEV_CON_E_PCIERC3_CN83XX (0xe0)
+#define BDK_PCC_DEV_CON_E_PCIERC4 (0x30090)
+#define BDK_PCC_DEV_CON_E_PCIERC5 (0x300a0)
+#define BDK_PCC_DEV_CON_E_PCM (0x68)
+#define BDK_PCC_DEV_CON_E_PEMX(a) (0x170 + (a))
+#define BDK_PCC_DEV_CON_E_PEM0 (0x2000c0)
+#define BDK_PCC_DEV_CON_E_PEM1 (0x2000c8)
+#define BDK_PCC_DEV_CON_E_PEM2 (0x2000d0)
+#define BDK_PCC_DEV_CON_E_PEM3 (0x2000d8)
+#define BDK_PCC_DEV_CON_E_PKI (0x10200)
+#define BDK_PCC_DEV_CON_E_PKO (0x10300)
+#define BDK_PCC_DEV_CON_E_PSBM (0x107)
+#define BDK_PCC_DEV_CON_E_RAD_CN9 (0x700)
+#define BDK_PCC_DEV_CON_E_RAD_CN88XX (0x200)
+#define BDK_PCC_DEV_CON_E_RAD_CN83XX (0x600)
+#define BDK_PCC_DEV_CON_E_RGXX(a) (0x190 + (a))
+#define BDK_PCC_DEV_CON_E_RNM_CN9 (0x300)
+#define BDK_PCC_DEV_CON_E_RNM_CN81XX (0x200)
+#define BDK_PCC_DEV_CON_E_RNM_CN88XX (0x48)
+#define BDK_PCC_DEV_CON_E_RNM_CN83XX (0x200)
+#define BDK_PCC_DEV_CON_E_RST (0x101)
+#define BDK_PCC_DEV_CON_E_RVUX(a) (0x20100 + 0x100 * (a))
+#define BDK_PCC_DEV_CON_E_SATA0_CN9 (0x10020)
+#define BDK_PCC_DEV_CON_E_SATA0_CN81XX (0xb0)
+#define BDK_PCC_DEV_CON_E_SATA0_CN88XX (0x10020)
+#define BDK_PCC_DEV_CON_E_SATA0_CN83XX (0x10020)
+#define BDK_PCC_DEV_CON_E_SATA1_CN9 (0x10028)
+#define BDK_PCC_DEV_CON_E_SATA1_CN81XX (0xb8)
+#define BDK_PCC_DEV_CON_E_SATA1_CN88XX (0x10028)
+#define BDK_PCC_DEV_CON_E_SATA1_CN83XX (0x10028)
+#define BDK_PCC_DEV_CON_E_SATA10 (0x30030)
+#define BDK_PCC_DEV_CON_E_SATA11 (0x30038)
+#define BDK_PCC_DEV_CON_E_SATA12 (0x30040)
+#define BDK_PCC_DEV_CON_E_SATA13 (0x30048)
+#define BDK_PCC_DEV_CON_E_SATA14 (0x30050)
+#define BDK_PCC_DEV_CON_E_SATA15 (0x30058)
+#define BDK_PCC_DEV_CON_E_SATA2 (0x10030)
+#define BDK_PCC_DEV_CON_E_SATA3 (0x10038)
+#define BDK_PCC_DEV_CON_E_SATA4 (0x10040)
+#define BDK_PCC_DEV_CON_E_SATA5 (0x10048)
+#define BDK_PCC_DEV_CON_E_SATA6 (0x10050)
+#define BDK_PCC_DEV_CON_E_SATA7 (0x10058)
+#define BDK_PCC_DEV_CON_E_SATA8 (0x30020)
+#define BDK_PCC_DEV_CON_E_SATA9 (0x30028)
+#define BDK_PCC_DEV_CON_E_SGP (0x10a)
+#define BDK_PCC_DEV_CON_E_SLI0_CN81XX (0x70)
+#define BDK_PCC_DEV_CON_E_SLI0_CN88XX (0x10010)
+#define BDK_PCC_DEV_CON_E_SLI1 (0x30010)
+#define BDK_PCC_DEV_CON_E_SLIRE0 (0xc0)
+#define BDK_PCC_DEV_CON_E_SMI (0x10b)
+#define BDK_PCC_DEV_CON_E_SMMU0_CN8 (0x10)
+#define BDK_PCC_DEV_CON_E_SMMU0_CN9 (0x18)
+#define BDK_PCC_DEV_CON_E_SMMU1 (0x10008)
+#define BDK_PCC_DEV_CON_E_SMMU2 (0x20008)
+#define BDK_PCC_DEV_CON_E_SMMU3 (0x30008)
+#define BDK_PCC_DEV_CON_E_SSO_CN8 (0x700)
+#define BDK_PCC_DEV_CON_E_SSO_CN9 (0x800)
+#define BDK_PCC_DEV_CON_E_SSOW_CN8 (0x800)
+#define BDK_PCC_DEV_CON_E_SSOW_CN9 (0x900)
+#define BDK_PCC_DEV_CON_E_TIM_CN8 (0xa00)
+#define BDK_PCC_DEV_CON_E_TIM_CN9 (0xb00)
+#define BDK_PCC_DEV_CON_E_TNS (0x20018)
+#define BDK_PCC_DEV_CON_E_TSNX(a) (0x170 + (a))
+#define BDK_PCC_DEV_CON_E_UAAX_CN8(a) (0x140 + (a))
+#define BDK_PCC_DEV_CON_E_UAAX_CN9(a) (0x160 + (a))
+#define BDK_PCC_DEV_CON_E_USBDRDX_CN81XX(a) (0x80 + 8 * (a))
+#define BDK_PCC_DEV_CON_E_USBDRDX_CN83XX(a) (0x10060 + 8 * (a))
+#define BDK_PCC_DEV_CON_E_USBDRDX_CN9(a) (0x10060 + 8 * (a))
+#define BDK_PCC_DEV_CON_E_USBHX(a) (0x80 + 8 * (a))
+#define BDK_PCC_DEV_CON_E_VRMX(a) (0x144 + (a))
+#define BDK_PCC_DEV_CON_E_XCPX(a) (0xe0 + 8 * (a))
+#define BDK_PCC_DEV_CON_E_XCVX(a) (0x110 + (a))
+#define BDK_PCC_DEV_CON_E_ZIP_CN9 (0x10400)
+#define BDK_PCC_DEV_CON_E_ZIP_CN88XX (0x300)
+#define BDK_PCC_DEV_CON_E_ZIP_CN83XX (0x10400)
+
+/**
+ * Enumeration pcc_dev_idl_e
+ *
+ * PCC Device ID Low Enumeration
+ * Enumerates the values of the PCI configuration header Device ID bits
+ * \<7:0\>.
+ *
+ * Internal:
+ * The class_codes are formatted as defined by PCC_CLASS_CODE_S.
+ */
+#define BDK_PCC_DEV_IDL_E_AP5 (0x76)
+#define BDK_PCC_DEV_IDL_E_AVS (0x6a)
+#define BDK_PCC_DEV_IDL_E_BCH (0x43)
+#define BDK_PCC_DEV_IDL_E_BCH_VF (0x44)
+#define BDK_PCC_DEV_IDL_E_BGX (0x26)
+#define BDK_PCC_DEV_IDL_E_BTS (0x88)
+#define BDK_PCC_DEV_IDL_E_CCS (0x6e)
+#define BDK_PCC_DEV_IDL_E_CCU (0x6f)
+#define BDK_PCC_DEV_IDL_E_CER (0x61)
+#define BDK_PCC_DEV_IDL_E_CGX (0x59)
+#define BDK_PCC_DEV_IDL_E_CHIP (0)
+#define BDK_PCC_DEV_IDL_E_CHIP_VF (3)
+#define BDK_PCC_DEV_IDL_E_CPC (0x68)
+#define BDK_PCC_DEV_IDL_E_CPT (0x40)
+#define BDK_PCC_DEV_IDL_E_CPT_VF (0x41)
+#define BDK_PCC_DEV_IDL_E_DAP (0x2c)
+#define BDK_PCC_DEV_IDL_E_DDF (0x45)
+#define BDK_PCC_DEV_IDL_E_DDF_VF (0x46)
+#define BDK_PCC_DEV_IDL_E_DFA (0x19)
+#define BDK_PCC_DEV_IDL_E_DPI (0x57)
+#define BDK_PCC_DEV_IDL_E_DPI5 (0x80)
+#define BDK_PCC_DEV_IDL_E_DPI5_VF (0x81)
+#define BDK_PCC_DEV_IDL_E_DPI_VF (0x58)
+#define BDK_PCC_DEV_IDL_E_FPA (0x52)
+#define BDK_PCC_DEV_IDL_E_FPA_VF (0x53)
+#define BDK_PCC_DEV_IDL_E_FUS5 (0x74)
+#define BDK_PCC_DEV_IDL_E_FUSF (0x32)
+#define BDK_PCC_DEV_IDL_E_GIC (9)
+#define BDK_PCC_DEV_IDL_E_GPIO (0xa)
+#define BDK_PCC_DEV_IDL_E_GSER (0x25)
+#define BDK_PCC_DEV_IDL_E_GSERN (0x28)
+#define BDK_PCC_DEV_IDL_E_GTI (0x17)
+#define BDK_PCC_DEV_IDL_E_IOBN (0x27)
+#define BDK_PCC_DEV_IDL_E_IOBN5 (0x6b)
+#define BDK_PCC_DEV_IDL_E_KEY (0x16)
+#define BDK_PCC_DEV_IDL_E_L2C (0x21)
+#define BDK_PCC_DEV_IDL_E_L2C_CBC (0x2f)
+#define BDK_PCC_DEV_IDL_E_L2C_MCI (0x30)
+#define BDK_PCC_DEV_IDL_E_L2C_TAD (0x2e)
+#define BDK_PCC_DEV_IDL_E_LBK (0x42)
+#define BDK_PCC_DEV_IDL_E_LMC (0x22)
+#define BDK_PCC_DEV_IDL_E_MCC (0x70)
+#define BDK_PCC_DEV_IDL_E_MDC (0x73)
+#define BDK_PCC_DEV_IDL_E_MIO_BOOT (0x11)
+#define BDK_PCC_DEV_IDL_E_MIO_EMM (0x10)
+#define BDK_PCC_DEV_IDL_E_MIO_FUS (0x31)
+#define BDK_PCC_DEV_IDL_E_MIO_PTP (0xc)
+#define BDK_PCC_DEV_IDL_E_MIO_TWS (0x12)
+#define BDK_PCC_DEV_IDL_E_MIX (0xd)
+#define BDK_PCC_DEV_IDL_E_MPI (0xb)
+#define BDK_PCC_DEV_IDL_E_MRML (1)
+#define BDK_PCC_DEV_IDL_E_MRML5 (0x75)
+#define BDK_PCC_DEV_IDL_E_NCSI (0x29)
+#define BDK_PCC_DEV_IDL_E_NDF (0x4f)
+#define BDK_PCC_DEV_IDL_E_NIC (0x1e)
+#define BDK_PCC_DEV_IDL_E_NICL (0x77)
+#define BDK_PCC_DEV_IDL_E_NICL_VF (0x78)
+#define BDK_PCC_DEV_IDL_E_NIC_VF (0x34)
+#define BDK_PCC_DEV_IDL_E_NPC (0x60)
+#define BDK_PCC_DEV_IDL_E_OCLA (0x23)
+#define BDK_PCC_DEV_IDL_E_OCX (0x13)
+#define BDK_PCC_DEV_IDL_E_OCX5 (0x79)
+#define BDK_PCC_DEV_IDL_E_OSM (0x24)
+#define BDK_PCC_DEV_IDL_E_PBUS (0x35)
+#define BDK_PCC_DEV_IDL_E_PCCBR (2)
+#define BDK_PCC_DEV_IDL_E_PCIERC (0x2d)
+#define BDK_PCC_DEV_IDL_E_PCM (0x4e)
+#define BDK_PCC_DEV_IDL_E_PEM (0x20)
+#define BDK_PCC_DEV_IDL_E_PEM5 (0x6c)
+#define BDK_PCC_DEV_IDL_E_PKI (0x47)
+#define BDK_PCC_DEV_IDL_E_PKO (0x48)
+#define BDK_PCC_DEV_IDL_E_PKO_VF (0x49)
+#define BDK_PCC_DEV_IDL_E_PSBM (0x69)
+#define BDK_PCC_DEV_IDL_E_RAD (0x1d)
+#define BDK_PCC_DEV_IDL_E_RAD_VF (0x36)
+#define BDK_PCC_DEV_IDL_E_RGX (0x54)
+#define BDK_PCC_DEV_IDL_E_RNM (0x18)
+#define BDK_PCC_DEV_IDL_E_RNM_VF (0x33)
+#define BDK_PCC_DEV_IDL_E_RST (0xe)
+#define BDK_PCC_DEV_IDL_E_RST5 (0x85)
+#define BDK_PCC_DEV_IDL_E_RVU (0x63)
+#define BDK_PCC_DEV_IDL_E_RVU_AF (0x65)
+#define BDK_PCC_DEV_IDL_E_RVU_VF (0x64)
+#define BDK_PCC_DEV_IDL_E_SATA (0x1c)
+#define BDK_PCC_DEV_IDL_E_SATA5 (0x84)
+#define BDK_PCC_DEV_IDL_E_SGP (0x2a)
+#define BDK_PCC_DEV_IDL_E_SLI (0x15)
+#define BDK_PCC_DEV_IDL_E_SLIRE (0x38)
+#define BDK_PCC_DEV_IDL_E_SMI (0x2b)
+#define BDK_PCC_DEV_IDL_E_SMMU (8)
+#define BDK_PCC_DEV_IDL_E_SMMU3 (0x62)
+#define BDK_PCC_DEV_IDL_E_SSO (0x4a)
+#define BDK_PCC_DEV_IDL_E_SSOW (0x4c)
+#define BDK_PCC_DEV_IDL_E_SSOW_VF (0x4d)
+#define BDK_PCC_DEV_IDL_E_SSO_VF (0x4b)
+#define BDK_PCC_DEV_IDL_E_TIM (0x50)
+#define BDK_PCC_DEV_IDL_E_TIM_VF (0x51)
+#define BDK_PCC_DEV_IDL_E_TNS (0x1f)
+#define BDK_PCC_DEV_IDL_E_TSN (0x6d)
+#define BDK_PCC_DEV_IDL_E_UAA (0xf)
+#define BDK_PCC_DEV_IDL_E_USBDRD (0x55)
+#define BDK_PCC_DEV_IDL_E_USBH (0x1b)
+#define BDK_PCC_DEV_IDL_E_VRM (0x14)
+#define BDK_PCC_DEV_IDL_E_XCP (0x67)
+#define BDK_PCC_DEV_IDL_E_XCV (0x56)
+#define BDK_PCC_DEV_IDL_E_ZIP (0x1a)
+#define BDK_PCC_DEV_IDL_E_ZIP5 (0x82)
+#define BDK_PCC_DEV_IDL_E_ZIP5_VF (0x83)
+#define BDK_PCC_DEV_IDL_E_ZIP_VF (0x37)
+
+/**
+ * Enumeration pcc_jtag_dev_e
+ *
+ * PCC JTAG Device Enumeration
+ * Enumerates the device number sub-field of Cavium-assigned JTAG ID_Codes. Device number is
+ * mapped to Part_Number[7:4]. Where Part_Number [15:0] is mapped to ID_Code[27:12].
+ */
+#define BDK_PCC_JTAG_DEV_E_DAP (1)
+#define BDK_PCC_JTAG_DEV_E_MAIN (0)
+#define BDK_PCC_JTAG_DEV_E_MCP (3)
+#define BDK_PCC_JTAG_DEV_E_SCP (2)
+
+/**
+ * Enumeration pcc_pidr_partnum0_e
+ *
+ * PCC PIDR Part Number 0 Enumeration
+ * When *_PIDR1[PARTNUM1] = PCC_PIDR_PARTNUM1_E::COMP, enumerates the values of Cavium-
+ * assigned CoreSight PIDR part number 0 fields.
+ * For example SMMU()_PIDR0[PARTNUM0].
+ */
+#define BDK_PCC_PIDR_PARTNUM0_E_CTI (0xd)
+#define BDK_PCC_PIDR_PARTNUM0_E_DBG (0xe)
+#define BDK_PCC_PIDR_PARTNUM0_E_ETR (0x13)
+#define BDK_PCC_PIDR_PARTNUM0_E_GICD (2)
+#define BDK_PCC_PIDR_PARTNUM0_E_GICR (1)
+#define BDK_PCC_PIDR_PARTNUM0_E_GITS (3)
+#define BDK_PCC_PIDR_PARTNUM0_E_GTI_BZ (4)
+#define BDK_PCC_PIDR_PARTNUM0_E_GTI_CC (5)
+#define BDK_PCC_PIDR_PARTNUM0_E_GTI_CTL (6)
+#define BDK_PCC_PIDR_PARTNUM0_E_GTI_RD (7)
+#define BDK_PCC_PIDR_PARTNUM0_E_GTI_WC (8)
+#define BDK_PCC_PIDR_PARTNUM0_E_GTI_WR (9)
+#define BDK_PCC_PIDR_PARTNUM0_E_NONE (0)
+#define BDK_PCC_PIDR_PARTNUM0_E_PMU (0xa)
+#define BDK_PCC_PIDR_PARTNUM0_E_RAS (0x12)
+#define BDK_PCC_PIDR_PARTNUM0_E_SMMU (0xb)
+#define BDK_PCC_PIDR_PARTNUM0_E_SMMU3 (0x11)
+#define BDK_PCC_PIDR_PARTNUM0_E_SYSCTI (0xf)
+#define BDK_PCC_PIDR_PARTNUM0_E_TRC (0x10)
+#define BDK_PCC_PIDR_PARTNUM0_E_UAA (0xc)
+
+/**
+ * Enumeration pcc_pidr_partnum1_e
+ *
+ * PCC PIDR Part Number 1 Enumeration
+ * Enumerates the values of Cavium-assigned CoreSight PIDR PARTNUM1 fields, for example
+ * SMMU()_PIDR1[PARTNUM1].
+ */
+#define BDK_PCC_PIDR_PARTNUM1_E_COMP (2)
+#define BDK_PCC_PIDR_PARTNUM1_E_PROD (1)
+
+/**
+ * Enumeration pcc_prod_e
+ *
+ * PCC Device ID Product Enumeration
+ * Enumerates the chip identifier.
+ */
+#define BDK_PCC_PROD_E_CN81XX (0xa2)
+#define BDK_PCC_PROD_E_CN83XX (0xa3)
+#define BDK_PCC_PROD_E_CN88XX (0xa1)
+#define BDK_PCC_PROD_E_CN93XX (0xb2)
+#define BDK_PCC_PROD_E_CN98XX (0xb1)
+#define BDK_PCC_PROD_E_GEN (0xa0)
+
+/**
+ * Enumeration pcc_vendor_e
+ *
+ * PCC Vendor ID Enumeration
+ * Enumerates the values of the PCI configuration header vendor ID.
+ */
+#define BDK_PCC_VENDOR_E_CAVIUM (0x177d)
+
+/**
+ * Enumeration pcc_vsecid_e
+ *
+ * PCC Vendor-Specific Capability ID Enumeration
+ * Enumerates the values of Cavium's vendor-specific PCI capability IDs.
+ * Internal:
+ * See also http://mawiki.caveonetworks.com/wiki/Architecture/PCI_Vendor_Headers
+ */
+#define BDK_PCC_VSECID_E_NONE (0)
+#define BDK_PCC_VSECID_E_SY_RAS_DES (2)
+#define BDK_PCC_VSECID_E_SY_RAS_DP (1)
+#define BDK_PCC_VSECID_E_SY_RSVDX(a) (0 + (a))
+#define BDK_PCC_VSECID_E_TX_BR (0xa1)
+#define BDK_PCC_VSECID_E_TX_PF (0xa0)
+#define BDK_PCC_VSECID_E_TX_VF (0xa2)
+
+/**
+ * Structure pcc_class_code_s
+ *
+ * INTERNAL: PCC Class Code Structure
+ *
+ * Defines the components of the PCC class code.
+ */
+union bdk_pcc_class_code_s
+{
+ uint32_t u;
+ struct bdk_pcc_class_code_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t bcc : 8; /**< [ 23: 16] Base class code. */
+ uint32_t sc : 8; /**< [ 15: 8] Subclass code. */
+ uint32_t pi : 8; /**< [ 7: 0] Programming interface. */
+#else /* Word 0 - Little Endian */
+ uint32_t pi : 8; /**< [ 7: 0] Programming interface. */
+ uint32_t sc : 8; /**< [ 15: 8] Subclass code. */
+ uint32_t bcc : 8; /**< [ 23: 16] Base class code. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pcc_class_code_s_s cn; */
+};
+
+/**
+ * Structure pcc_dev_con_s
+ *
+ * PCC Device Connection Structure
+ * Defines the components of the PCC device connection values enumerated by PCC_DEV_CON_E,
+ * using ARI format.
+ */
+union bdk_pcc_dev_con_s
+{
+ uint32_t u;
+ struct bdk_pcc_dev_con_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t bus : 8; /**< [ 15: 8] PCI requestor bus number. */
+ uint32_t func : 8; /**< [ 7: 0] For ARI devices (when bus is nonzero), an eight-bit RSL function number.
+
+ For non-ARI devices (when bus is zero), \<7:3\> is the device number, \<2:0\> the function
+ number. */
+#else /* Word 0 - Little Endian */
+ uint32_t func : 8; /**< [ 7: 0] For ARI devices (when bus is nonzero), an eight-bit RSL function number.
+
+ For non-ARI devices (when bus is zero), \<7:3\> is the device number, \<2:0\> the function
+ number. */
+ uint32_t bus : 8; /**< [ 15: 8] PCI requestor bus number. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pcc_dev_con_s_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t ecam : 2; /**< [ 17: 16] ECAM number. */
+ uint32_t bus : 8; /**< [ 15: 8] PCI requestor bus number. */
+ uint32_t func : 8; /**< [ 7: 0] For ARI devices (when bus is nonzero), an eight-bit RSL function number.
+
+ For non-ARI devices (when bus is zero), \<7:3\> is the device number, \<2:0\> the function
+ number. */
+#else /* Word 0 - Little Endian */
+ uint32_t func : 8; /**< [ 7: 0] For ARI devices (when bus is nonzero), an eight-bit RSL function number.
+
+ For non-ARI devices (when bus is zero), \<7:3\> is the device number, \<2:0\> the function
+ number. */
+ uint32_t bus : 8; /**< [ 15: 8] PCI requestor bus number. */
+ uint32_t ecam : 2; /**< [ 17: 16] ECAM number. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pcc_dev_con_s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t dmn : 6; /**< [ 21: 16] Domain number. */
+ uint32_t bus : 8; /**< [ 15: 8] PCI requestor bus number. */
+ uint32_t func : 8; /**< [ 7: 0] For ARI devices (when bus is nonzero), an eight-bit RSL function number.
+
+ For non-ARI devices (when bus is zero), \<7:3\> is the device number, \<2:0\> the function
+ number. */
+#else /* Word 0 - Little Endian */
+ uint32_t func : 8; /**< [ 7: 0] For ARI devices (when bus is nonzero), an eight-bit RSL function number.
+
+ For non-ARI devices (when bus is zero), \<7:3\> is the device number, \<2:0\> the function
+ number. */
+ uint32_t bus : 8; /**< [ 15: 8] PCI requestor bus number. */
+ uint32_t dmn : 6; /**< [ 21: 16] Domain number. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn9;
+};
+
+/**
+ * Structure pcc_ea_entry_s
+ *
+ * PCC PCI Enhanced Allocation Entry Structure
+ * This structure describes the format of an enhanced allocation entry stored in
+ * PCCPF_XXX_EA_ENTRY(). This describes what PCC hardware generates only; software must
+ * implement a full EA parser including testing the [ENTRY_SIZE], [BASE64] and
+ * [OFFSET64] fields.
+ *
+ * PCI configuration registers are 32-bits, however due to tool limitiations this
+ * structure is described as a little-endian 64-bit wide structure.
+ */
+union bdk_pcc_ea_entry_s
+{
+ uint64_t u[3];
+ struct bdk_pcc_ea_entry_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t basel : 30; /**< [ 63: 34] Lower bits of the entry 0 base address. */
+ uint64_t base64 : 1; /**< [ 33: 33] 64-bit base, indicates [BASEH] is present. For CNXXXX always set. */
+ uint64_t reserved_32 : 1;
+ uint64_t enable : 1; /**< [ 31: 31] Enable. Always set. */
+ uint64_t w : 1; /**< [ 30: 30] Writable. Always clear. */
+ uint64_t reserved_24_29 : 6;
+ uint64_t sec_prop : 8; /**< [ 23: 16] Secondary properties. For CNXXXX always 0xFF, indicating that the primary properties must
+ be used. */
+ uint64_t pri_prop : 8; /**< [ 15: 8] Primary properties.
+ 0x0 = Memory space, non-prefetchable.
+ 0x4 = Physical function indicating virtual function memory space, non-prefetchable. */
+ uint64_t bei : 4; /**< [ 7: 4] BAR equivelent indicator.
+ 0x0 = Entry is equivalent to BAR 0.
+ 0x2 = Entry is equivalent to BAR 2.
+ 0x4 = Entry is equivalent to BAR 4.
+ 0x7 = Equivalent not indicated.
+ 0x9 = Entry is equivalent to SR-IOV BAR 0.
+ 0xB = Entry is equivalent to SR-IOV BAR 2.
+ 0xD = Entry is equivalent to SR-IOV BAR 4. */
+ uint64_t reserved_3 : 1;
+ uint64_t entry_size : 3; /**< [ 2: 0] Number of 32-bit words following this entry format header, excluding the header
+ itself.
+ 0x4 = Four 32-bit words; header followed by base low, offset low, base high,
+ offset high. */
+#else /* Word 0 - Little Endian */
+ uint64_t entry_size : 3; /**< [ 2: 0] Number of 32-bit words following this entry format header, excluding the header
+ itself.
+ 0x4 = Four 32-bit words; header followed by base low, offset low, base high,
+ offset high. */
+ uint64_t reserved_3 : 1;
+ uint64_t bei : 4; /**< [ 7: 4] BAR equivelent indicator.
+ 0x0 = Entry is equivalent to BAR 0.
+ 0x2 = Entry is equivalent to BAR 2.
+ 0x4 = Entry is equivalent to BAR 4.
+ 0x7 = Equivalent not indicated.
+ 0x9 = Entry is equivalent to SR-IOV BAR 0.
+ 0xB = Entry is equivalent to SR-IOV BAR 2.
+ 0xD = Entry is equivalent to SR-IOV BAR 4. */
+ uint64_t pri_prop : 8; /**< [ 15: 8] Primary properties.
+ 0x0 = Memory space, non-prefetchable.
+ 0x4 = Physical function indicating virtual function memory space, non-prefetchable. */
+ uint64_t sec_prop : 8; /**< [ 23: 16] Secondary properties. For CNXXXX always 0xFF, indicating that the primary properties must
+ be used. */
+ uint64_t reserved_24_29 : 6;
+ uint64_t w : 1; /**< [ 30: 30] Writable. Always clear. */
+ uint64_t enable : 1; /**< [ 31: 31] Enable. Always set. */
+ uint64_t reserved_32 : 1;
+ uint64_t base64 : 1; /**< [ 33: 33] 64-bit base, indicates [BASEH] is present. For CNXXXX always set. */
+ uint64_t basel : 30; /**< [ 63: 34] Lower bits of the entry 0 base address. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t baseh : 32; /**< [127: 96] Upper bits of the entry 0 base address. */
+ uint64_t offsetl : 30; /**< [ 95: 66] Lower bits of the entry 0 offset. Bits \<1:0\> of the offset are not present and
+ must be interpreted as all-ones. */
+ uint64_t offset64 : 1; /**< [ 65: 65] 64-bit offset, indicates [OFFSETH] is present. For CNXXXX always set. */
+ uint64_t reserved_64 : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t reserved_64 : 1;
+ uint64_t offset64 : 1; /**< [ 65: 65] 64-bit offset, indicates [OFFSETH] is present. For CNXXXX always set. */
+ uint64_t offsetl : 30; /**< [ 95: 66] Lower bits of the entry 0 offset. Bits \<1:0\> of the offset are not present and
+ must be interpreted as all-ones. */
+ uint64_t baseh : 32; /**< [127: 96] Upper bits of the entry 0 base address. */
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_160_191 : 32;
+ uint64_t offseth : 32; /**< [159:128] Upper bits of the entry 0 offset. */
+#else /* Word 2 - Little Endian */
+ uint64_t offseth : 32; /**< [159:128] Upper bits of the entry 0 offset. */
+ uint64_t reserved_160_191 : 32;
+#endif /* Word 2 - End */
+ } s;
+ /* struct bdk_pcc_ea_entry_s_s cn; */
+};
+
+/**
+ * Register (PCCPF) pccpf_xxx_aer_cap_hdr
+ *
+ * PCC PF AER Capability Header Register
+ * This register is the header of the 44-byte PCI advanced error reporting (AER) capability
+ * structure.
+ */
+union bdk_pccpf_xxx_aer_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_aer_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. If this device is on a nonzero bus, points to
+ PCCPF_XXX_ARI_CAP_HDR, else 0x0. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t aerid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates AER capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t aerid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates AER capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. If this device is on a nonzero bus, points to
+ PCCPF_XXX_ARI_CAP_HDR, else 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_aer_cap_hdr_s cn; */
+};
+typedef union bdk_pccpf_xxx_aer_cap_hdr bdk_pccpf_xxx_aer_cap_hdr_t;
+
+#define BDK_PCCPF_XXX_AER_CAP_HDR BDK_PCCPF_XXX_AER_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_AER_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_AER_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x140;
+ __bdk_csr_fatal("PCCPF_XXX_AER_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_AER_CAP_HDR bdk_pccpf_xxx_aer_cap_hdr_t
+#define bustype_BDK_PCCPF_XXX_AER_CAP_HDR BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_AER_CAP_HDR "PCCPF_XXX_AER_CAP_HDR"
+#define busnum_BDK_PCCPF_XXX_AER_CAP_HDR 0
+#define arguments_BDK_PCCPF_XXX_AER_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_aer_cor_mask
+ *
+ * PCC PF AER Correctable Error Mask Register
+ * This register contains a mask bit for each nonreserved bit in PCCPF_XXX_AER_COR_STATUS.
+ * The mask bits are R/W for PCIe and software compatibility but are not used by hardware.
+ *
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccpf_xxx_aer_cor_mask
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_aer_cor_mask_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_15_31 : 17;
+ uint32_t cor_intn : 1; /**< [ 14: 14](R/W) Corrected internal error. */
+ uint32_t adv_nfat : 1; /**< [ 13: 13](R/W) Advisory nonfatal error. */
+ uint32_t rep_timer : 1; /**< [ 12: 12](R/W) Replay timer timeout. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rep_roll : 1; /**< [ 8: 8](R/W) Replay number rollover. */
+ uint32_t bad_dllp : 1; /**< [ 7: 7](R/W) Bad DLLP. */
+ uint32_t bad_tlp : 1; /**< [ 6: 6](R/W) Bad TLP. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rcvr : 1; /**< [ 0: 0](R/W) Receiver error. */
+#else /* Word 0 - Little Endian */
+ uint32_t rcvr : 1; /**< [ 0: 0](R/W) Receiver error. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t bad_tlp : 1; /**< [ 6: 6](R/W) Bad TLP. */
+ uint32_t bad_dllp : 1; /**< [ 7: 7](R/W) Bad DLLP. */
+ uint32_t rep_roll : 1; /**< [ 8: 8](R/W) Replay number rollover. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rep_timer : 1; /**< [ 12: 12](R/W) Replay timer timeout. */
+ uint32_t adv_nfat : 1; /**< [ 13: 13](R/W) Advisory nonfatal error. */
+ uint32_t cor_intn : 1; /**< [ 14: 14](R/W) Corrected internal error. */
+ uint32_t reserved_15_31 : 17;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_aer_cor_mask_s cn; */
+};
+typedef union bdk_pccpf_xxx_aer_cor_mask bdk_pccpf_xxx_aer_cor_mask_t;
+
+#define BDK_PCCPF_XXX_AER_COR_MASK BDK_PCCPF_XXX_AER_COR_MASK_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_AER_COR_MASK_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_AER_COR_MASK_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x154;
+ __bdk_csr_fatal("PCCPF_XXX_AER_COR_MASK", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_AER_COR_MASK bdk_pccpf_xxx_aer_cor_mask_t
+#define bustype_BDK_PCCPF_XXX_AER_COR_MASK BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_AER_COR_MASK "PCCPF_XXX_AER_COR_MASK"
+#define busnum_BDK_PCCPF_XXX_AER_COR_MASK 0
+#define arguments_BDK_PCCPF_XXX_AER_COR_MASK -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_aer_cor_status
+ *
+ * PCC PF AER Correctable Error Status Register
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccpf_xxx_aer_cor_status
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_aer_cor_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_15_31 : 17;
+ uint32_t cor_intn : 1; /**< [ 14: 14](R/W1C/H) Corrected internal error. Set when one is written to PCCPF_XXX_VSEC_CTL[COR_INTN]. */
+ uint32_t adv_nfat : 1; /**< [ 13: 13](R/W1C/H) Advisory non-fatal error. Set when one is written to PCCPF_XXX_VSEC_CTL[ADV_NFAT]. */
+ uint32_t rep_timer : 1; /**< [ 12: 12](RO) Replay timer timeout. Always zero. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rep_roll : 1; /**< [ 8: 8](RO) Replay number rollover. Always zero. */
+ uint32_t bad_dllp : 1; /**< [ 7: 7](RO) Bad DLLP. Always zero. */
+ uint32_t bad_tlp : 1; /**< [ 6: 6](RO) Bad TLP. Always zero. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rcvr : 1; /**< [ 0: 0](RO) Receiver error. Always zero. */
+#else /* Word 0 - Little Endian */
+ uint32_t rcvr : 1; /**< [ 0: 0](RO) Receiver error. Always zero. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t bad_tlp : 1; /**< [ 6: 6](RO) Bad TLP. Always zero. */
+ uint32_t bad_dllp : 1; /**< [ 7: 7](RO) Bad DLLP. Always zero. */
+ uint32_t rep_roll : 1; /**< [ 8: 8](RO) Replay number rollover. Always zero. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rep_timer : 1; /**< [ 12: 12](RO) Replay timer timeout. Always zero. */
+ uint32_t adv_nfat : 1; /**< [ 13: 13](R/W1C/H) Advisory non-fatal error. Set when one is written to PCCPF_XXX_VSEC_CTL[ADV_NFAT]. */
+ uint32_t cor_intn : 1; /**< [ 14: 14](R/W1C/H) Corrected internal error. Set when one is written to PCCPF_XXX_VSEC_CTL[COR_INTN]. */
+ uint32_t reserved_15_31 : 17;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_aer_cor_status_s cn; */
+};
+typedef union bdk_pccpf_xxx_aer_cor_status bdk_pccpf_xxx_aer_cor_status_t;
+
+#define BDK_PCCPF_XXX_AER_COR_STATUS BDK_PCCPF_XXX_AER_COR_STATUS_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_AER_COR_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_AER_COR_STATUS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x150;
+ __bdk_csr_fatal("PCCPF_XXX_AER_COR_STATUS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_AER_COR_STATUS bdk_pccpf_xxx_aer_cor_status_t
+#define bustype_BDK_PCCPF_XXX_AER_COR_STATUS BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_AER_COR_STATUS "PCCPF_XXX_AER_COR_STATUS"
+#define busnum_BDK_PCCPF_XXX_AER_COR_STATUS 0
+#define arguments_BDK_PCCPF_XXX_AER_COR_STATUS -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_aer_uncor_mask
+ *
+ * PCC PF AER Uncorrectable Error Mask Register
+ * This register contains a mask bit for each nonreserved bit in PCCPF_XXX_AER_UNCOR_STATUS.
+ * The mask bits are R/W for PCIe and software compatibility but are not used by hardware.
+ *
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccpf_xxx_aer_uncor_mask
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_aer_uncor_mask_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t uncor_intn : 1; /**< [ 22: 22](R/W) Uncorrectable internal error. */
+ uint32_t reserved_21 : 1;
+ uint32_t unsup : 1; /**< [ 20: 20](R/W) Unsupported request error. */
+ uint32_t reserved_19 : 1;
+ uint32_t malf_tlp : 1; /**< [ 18: 18](R/W) Malformed TLP. */
+ uint32_t reserved_17 : 1;
+ uint32_t unx_comp : 1; /**< [ 16: 16](R/W) Unexpected completion. */
+ uint32_t reserved_15 : 1;
+ uint32_t comp_time : 1; /**< [ 14: 14](R/W) Completion timeout. */
+ uint32_t reserved_13 : 1;
+ uint32_t poison_tlp : 1; /**< [ 12: 12](R/W) Poisoned TLP received. */
+ uint32_t reserved_5_11 : 7;
+ uint32_t dlp : 1; /**< [ 4: 4](R/W) Data link protocol error. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlp : 1; /**< [ 4: 4](R/W) Data link protocol error. */
+ uint32_t reserved_5_11 : 7;
+ uint32_t poison_tlp : 1; /**< [ 12: 12](R/W) Poisoned TLP received. */
+ uint32_t reserved_13 : 1;
+ uint32_t comp_time : 1; /**< [ 14: 14](R/W) Completion timeout. */
+ uint32_t reserved_15 : 1;
+ uint32_t unx_comp : 1; /**< [ 16: 16](R/W) Unexpected completion. */
+ uint32_t reserved_17 : 1;
+ uint32_t malf_tlp : 1; /**< [ 18: 18](R/W) Malformed TLP. */
+ uint32_t reserved_19 : 1;
+ uint32_t unsup : 1; /**< [ 20: 20](R/W) Unsupported request error. */
+ uint32_t reserved_21 : 1;
+ uint32_t uncor_intn : 1; /**< [ 22: 22](R/W) Uncorrectable internal error. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_aer_uncor_mask_s cn; */
+};
+typedef union bdk_pccpf_xxx_aer_uncor_mask bdk_pccpf_xxx_aer_uncor_mask_t;
+
+#define BDK_PCCPF_XXX_AER_UNCOR_MASK BDK_PCCPF_XXX_AER_UNCOR_MASK_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_AER_UNCOR_MASK_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_AER_UNCOR_MASK_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x148;
+ __bdk_csr_fatal("PCCPF_XXX_AER_UNCOR_MASK", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_AER_UNCOR_MASK bdk_pccpf_xxx_aer_uncor_mask_t
+#define bustype_BDK_PCCPF_XXX_AER_UNCOR_MASK BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_AER_UNCOR_MASK "PCCPF_XXX_AER_UNCOR_MASK"
+#define busnum_BDK_PCCPF_XXX_AER_UNCOR_MASK 0
+#define arguments_BDK_PCCPF_XXX_AER_UNCOR_MASK -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_aer_uncor_sever
+ *
+ * PCC PF AER Uncorrectable Error Severity Register
+ * This register controls whether an individual error is reported as a nonfatal or
+ * fatal error. An error is reported as fatal when the corresponding severity bit is set, and
+ * nonfatal otherwise.
+ *
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccpf_xxx_aer_uncor_sever
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_aer_uncor_sever_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t uncor_intn : 1; /**< [ 22: 22](R/W) Uncorrectable internal error. */
+ uint32_t reserved_21 : 1;
+ uint32_t unsup : 1; /**< [ 20: 20](R/W) Unsupported request error. */
+ uint32_t reserved_19 : 1;
+ uint32_t malf_tlp : 1; /**< [ 18: 18](R/W) Malformed TLP. */
+ uint32_t reserved_17 : 1;
+ uint32_t unx_comp : 1; /**< [ 16: 16](R/W) Unexpected completion. */
+ uint32_t reserved_15 : 1;
+ uint32_t comp_time : 1; /**< [ 14: 14](R/W) Completion timeout. */
+ uint32_t reserved_13 : 1;
+ uint32_t poison_tlp : 1; /**< [ 12: 12](R/W) Poisoned TLP received. */
+ uint32_t reserved_5_11 : 7;
+ uint32_t dlp : 1; /**< [ 4: 4](R/W) Data link protocol error. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlp : 1; /**< [ 4: 4](R/W) Data link protocol error. */
+ uint32_t reserved_5_11 : 7;
+ uint32_t poison_tlp : 1; /**< [ 12: 12](R/W) Poisoned TLP received. */
+ uint32_t reserved_13 : 1;
+ uint32_t comp_time : 1; /**< [ 14: 14](R/W) Completion timeout. */
+ uint32_t reserved_15 : 1;
+ uint32_t unx_comp : 1; /**< [ 16: 16](R/W) Unexpected completion. */
+ uint32_t reserved_17 : 1;
+ uint32_t malf_tlp : 1; /**< [ 18: 18](R/W) Malformed TLP. */
+ uint32_t reserved_19 : 1;
+ uint32_t unsup : 1; /**< [ 20: 20](R/W) Unsupported request error. */
+ uint32_t reserved_21 : 1;
+ uint32_t uncor_intn : 1; /**< [ 22: 22](R/W) Uncorrectable internal error. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_aer_uncor_sever_s cn; */
+};
+typedef union bdk_pccpf_xxx_aer_uncor_sever bdk_pccpf_xxx_aer_uncor_sever_t;
+
+#define BDK_PCCPF_XXX_AER_UNCOR_SEVER BDK_PCCPF_XXX_AER_UNCOR_SEVER_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_AER_UNCOR_SEVER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_AER_UNCOR_SEVER_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x14c;
+ __bdk_csr_fatal("PCCPF_XXX_AER_UNCOR_SEVER", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_AER_UNCOR_SEVER bdk_pccpf_xxx_aer_uncor_sever_t
+#define bustype_BDK_PCCPF_XXX_AER_UNCOR_SEVER BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_AER_UNCOR_SEVER "PCCPF_XXX_AER_UNCOR_SEVER"
+#define busnum_BDK_PCCPF_XXX_AER_UNCOR_SEVER 0
+#define arguments_BDK_PCCPF_XXX_AER_UNCOR_SEVER -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_aer_uncor_status
+ *
+ * PCC PF AER Uncorrectable Error Status Register
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccpf_xxx_aer_uncor_status
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_aer_uncor_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t uncor_intn : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error. Set when one is written to PCCPF_XXX_VSEC_CTL[UNCOR_INTN]. */
+ uint32_t reserved_21 : 1;
+ uint32_t unsup : 1; /**< [ 20: 20](RO) Unsupported request error. Always zero. */
+ uint32_t reserved_19 : 1;
+ uint32_t malf_tlp : 1; /**< [ 18: 18](RO) Malformed TLP. Always zero. */
+ uint32_t reserved_17 : 1;
+ uint32_t unx_comp : 1; /**< [ 16: 16](RO) Unexpected completion. Always zero. */
+ uint32_t reserved_15 : 1;
+ uint32_t comp_time : 1; /**< [ 14: 14](RO) Completion timeout. Always zero. */
+ uint32_t reserved_13 : 1;
+ uint32_t poison_tlp : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP received. Set when one is written to PCCPF_XXX_VSEC_CTL[POISON_TLP]. */
+ uint32_t reserved_5_11 : 7;
+ uint32_t dlp : 1; /**< [ 4: 4](RO) Data link protocol error. Always zero. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlp : 1; /**< [ 4: 4](RO) Data link protocol error. Always zero. */
+ uint32_t reserved_5_11 : 7;
+ uint32_t poison_tlp : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP received. Set when one is written to PCCPF_XXX_VSEC_CTL[POISON_TLP]. */
+ uint32_t reserved_13 : 1;
+ uint32_t comp_time : 1; /**< [ 14: 14](RO) Completion timeout. Always zero. */
+ uint32_t reserved_15 : 1;
+ uint32_t unx_comp : 1; /**< [ 16: 16](RO) Unexpected completion. Always zero. */
+ uint32_t reserved_17 : 1;
+ uint32_t malf_tlp : 1; /**< [ 18: 18](RO) Malformed TLP. Always zero. */
+ uint32_t reserved_19 : 1;
+ uint32_t unsup : 1; /**< [ 20: 20](RO) Unsupported request error. Always zero. */
+ uint32_t reserved_21 : 1;
+ uint32_t uncor_intn : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error. Set when one is written to PCCPF_XXX_VSEC_CTL[UNCOR_INTN]. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_aer_uncor_status_s cn; */
+};
+typedef union bdk_pccpf_xxx_aer_uncor_status bdk_pccpf_xxx_aer_uncor_status_t;
+
+#define BDK_PCCPF_XXX_AER_UNCOR_STATUS BDK_PCCPF_XXX_AER_UNCOR_STATUS_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_AER_UNCOR_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_AER_UNCOR_STATUS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x144;
+ __bdk_csr_fatal("PCCPF_XXX_AER_UNCOR_STATUS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_AER_UNCOR_STATUS bdk_pccpf_xxx_aer_uncor_status_t
+#define bustype_BDK_PCCPF_XXX_AER_UNCOR_STATUS BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_AER_UNCOR_STATUS "PCCPF_XXX_AER_UNCOR_STATUS"
+#define busnum_BDK_PCCPF_XXX_AER_UNCOR_STATUS 0
+#define arguments_BDK_PCCPF_XXX_AER_UNCOR_STATUS -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_ari_cap_hdr
+ *
+ * PCC PF ARI Capability Header Register
+ * This register is the header of the eight-byte PCI ARI capability structure.
+ * If this device is on bus 0x0, this ARI header is not present and reads as 0x0.
+ */
+union bdk_pccpf_xxx_ari_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_ari_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. Points to PCCPF_XXX_VSEC_CAP_HDR. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t ariid : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t ariid : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. Points to PCCPF_XXX_VSEC_CAP_HDR. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_ari_cap_hdr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. If SR-IOV is supported as per PCC_DEV_IDL_E, points to
+ PCCPF_XXX_SRIOV_CAP_HDR, else 0x0. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version (0x1). */
+ uint32_t ariid : 16; /**< [ 15: 0](RO) PCIE extended capability (0xe). */
+#else /* Word 0 - Little Endian */
+ uint32_t ariid : 16; /**< [ 15: 0](RO) PCIE extended capability (0xe). */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version (0x1). */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. If SR-IOV is supported as per PCC_DEV_IDL_E, points to
+ PCCPF_XXX_SRIOV_CAP_HDR, else 0x0. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_pccpf_xxx_ari_cap_hdr_cn9 cn81xx; */
+ /* struct bdk_pccpf_xxx_ari_cap_hdr_s cn88xx; */
+ /* struct bdk_pccpf_xxx_ari_cap_hdr_cn9 cn83xx; */
+};
+typedef union bdk_pccpf_xxx_ari_cap_hdr bdk_pccpf_xxx_ari_cap_hdr_t;
+
+#define BDK_PCCPF_XXX_ARI_CAP_HDR BDK_PCCPF_XXX_ARI_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_ARI_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_ARI_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x140;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x140;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x100;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x170;
+ __bdk_csr_fatal("PCCPF_XXX_ARI_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_ARI_CAP_HDR bdk_pccpf_xxx_ari_cap_hdr_t
+#define bustype_BDK_PCCPF_XXX_ARI_CAP_HDR BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_ARI_CAP_HDR "PCCPF_XXX_ARI_CAP_HDR"
+#define busnum_BDK_PCCPF_XXX_ARI_CAP_HDR 0
+#define arguments_BDK_PCCPF_XXX_ARI_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_bar0l
+ *
+ * PCC PF Base Address 0 Lower Register
+ */
+union bdk_pccpf_xxx_bar0l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_bar0l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_bar0l_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar0_rbsz and tie__pfbar0_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar0_rbsz and tie__pfbar0_offset. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pccpf_xxx_bar0l_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_bar0l_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar0_rbsz and tie__pfbar0_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar0_rbsz and tie__pfbar0_offset. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_bar0l_cn81xx cn83xx; */
+ /* struct bdk_pccpf_xxx_bar0l_cn81xx cn88xxp2; */
+};
+typedef union bdk_pccpf_xxx_bar0l bdk_pccpf_xxx_bar0l_t;
+
+#define BDK_PCCPF_XXX_BAR0L BDK_PCCPF_XXX_BAR0L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_BAR0L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_BAR0L_FUNC(void)
+{
+ return 0x10;
+}
+
+#define typedef_BDK_PCCPF_XXX_BAR0L bdk_pccpf_xxx_bar0l_t
+#define bustype_BDK_PCCPF_XXX_BAR0L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_BAR0L "PCCPF_XXX_BAR0L"
+#define busnum_BDK_PCCPF_XXX_BAR0L 0
+#define arguments_BDK_PCCPF_XXX_BAR0L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_bar0u
+ *
+ * PCC PF Base Address 0 Upper Register
+ */
+union bdk_pccpf_xxx_bar0u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_bar0u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_bar0u_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Contains the upper 32 bits of the BAR 0 base address. Bits corresponding to address bits
+ less than the size of the BAR are read-as-zero, other bits are read-write only to allow
+ determining the size of the BAR. Hardware has fixed address decoding and does not use this
+ BAR for address decoding. After sizing, for proper software behavior, software must
+ restore the register value, where the proper value may be read from PCCPF_XXX_VSEC_BAR0U,
+ with software writing the node number into the field bits corresponding to address bits
+ \<45:44\>.
+
+ Internal:
+ From PCC's tie__pfbar0_rbsz and tie__pfbar0_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Contains the upper 32 bits of the BAR 0 base address. Bits corresponding to address bits
+ less than the size of the BAR are read-as-zero, other bits are read-write only to allow
+ determining the size of the BAR. Hardware has fixed address decoding and does not use this
+ BAR for address decoding. After sizing, for proper software behavior, software must
+ restore the register value, where the proper value may be read from PCCPF_XXX_VSEC_BAR0U,
+ with software writing the node number into the field bits corresponding to address bits
+ \<45:44\>.
+
+ Internal:
+ From PCC's tie__pfbar0_rbsz and tie__pfbar0_offset. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pccpf_xxx_bar0u_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_bar0u bdk_pccpf_xxx_bar0u_t;
+
+#define BDK_PCCPF_XXX_BAR0U BDK_PCCPF_XXX_BAR0U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_BAR0U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_BAR0U_FUNC(void)
+{
+ return 0x14;
+}
+
+#define typedef_BDK_PCCPF_XXX_BAR0U bdk_pccpf_xxx_bar0u_t
+#define bustype_BDK_PCCPF_XXX_BAR0U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_BAR0U "PCCPF_XXX_BAR0U"
+#define busnum_BDK_PCCPF_XXX_BAR0U 0
+#define arguments_BDK_PCCPF_XXX_BAR0U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_bar2l
+ *
+ * PCC PF Base Address 2 Lower Register
+ */
+union bdk_pccpf_xxx_bar2l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_bar2l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_bar2l_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_rbsz and tie__pfbar2_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_rbsz and tie__pfbar2_offset. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pccpf_xxx_bar2l_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_bar2l_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_rbsz and tie__pfbar2_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_rbsz and tie__pfbar2_offset. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_bar2l_cn81xx cn83xx; */
+ /* struct bdk_pccpf_xxx_bar2l_cn81xx cn88xxp2; */
+};
+typedef union bdk_pccpf_xxx_bar2l bdk_pccpf_xxx_bar2l_t;
+
+#define BDK_PCCPF_XXX_BAR2L BDK_PCCPF_XXX_BAR2L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_BAR2L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_BAR2L_FUNC(void)
+{
+ return 0x18;
+}
+
+#define typedef_BDK_PCCPF_XXX_BAR2L bdk_pccpf_xxx_bar2l_t
+#define bustype_BDK_PCCPF_XXX_BAR2L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_BAR2L "PCCPF_XXX_BAR2L"
+#define busnum_BDK_PCCPF_XXX_BAR2L 0
+#define arguments_BDK_PCCPF_XXX_BAR2L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_bar2u
+ *
+ * PCC PF Base Address 2 Upper Register
+ */
+union bdk_pccpf_xxx_bar2u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_bar2u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_bar2u_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Contains the upper 32 bits of the BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_rbsz and tie__pfbar2_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Contains the upper 32 bits of the BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_rbsz and tie__pfbar2_offset. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pccpf_xxx_bar2u_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_bar2u bdk_pccpf_xxx_bar2u_t;
+
+#define BDK_PCCPF_XXX_BAR2U BDK_PCCPF_XXX_BAR2U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_BAR2U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_BAR2U_FUNC(void)
+{
+ return 0x1c;
+}
+
+#define typedef_BDK_PCCPF_XXX_BAR2U bdk_pccpf_xxx_bar2u_t
+#define bustype_BDK_PCCPF_XXX_BAR2U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_BAR2U "PCCPF_XXX_BAR2U"
+#define busnum_BDK_PCCPF_XXX_BAR2U 0
+#define arguments_BDK_PCCPF_XXX_BAR2U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_bar4l
+ *
+ * PCC PF Base Address 4 Lower Register
+ */
+union bdk_pccpf_xxx_bar4l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_bar4l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_bar4l_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_rbsz and tie__pfbar4_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_rbsz and tie__pfbar4_offset. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pccpf_xxx_bar4l_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_bar4l_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_rbsz and tie__pfbar4_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_rbsz and tie__pfbar4_offset. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_bar4l_cn81xx cn83xx; */
+ /* struct bdk_pccpf_xxx_bar4l_cn81xx cn88xxp2; */
+};
+typedef union bdk_pccpf_xxx_bar4l bdk_pccpf_xxx_bar4l_t;
+
+#define BDK_PCCPF_XXX_BAR4L BDK_PCCPF_XXX_BAR4L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_BAR4L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_BAR4L_FUNC(void)
+{
+ return 0x20;
+}
+
+#define typedef_BDK_PCCPF_XXX_BAR4L bdk_pccpf_xxx_bar4l_t
+#define bustype_BDK_PCCPF_XXX_BAR4L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_BAR4L "PCCPF_XXX_BAR4L"
+#define busnum_BDK_PCCPF_XXX_BAR4L 0
+#define arguments_BDK_PCCPF_XXX_BAR4L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_bar4u
+ *
+ * PCC PF Base Address 4 Upper Register
+ */
+union bdk_pccpf_xxx_bar4u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_bar4u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_bar4u_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Contains the upper 32 bits of the BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_rbsz and tie__pfbar4_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Contains the upper 32 bits of the BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_rbsz and tie__pfbar4_offset. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pccpf_xxx_bar4u_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_bar4u bdk_pccpf_xxx_bar4u_t;
+
+#define BDK_PCCPF_XXX_BAR4U BDK_PCCPF_XXX_BAR4U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_BAR4U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_BAR4U_FUNC(void)
+{
+ return 0x24;
+}
+
+#define typedef_BDK_PCCPF_XXX_BAR4U bdk_pccpf_xxx_bar4u_t
+#define bustype_BDK_PCCPF_XXX_BAR4U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_BAR4U "PCCPF_XXX_BAR4U"
+#define busnum_BDK_PCCPF_XXX_BAR4U 0
+#define arguments_BDK_PCCPF_XXX_BAR4U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_cap_ptr
+ *
+ * PCC PF Capability Pointer Register
+ */
+union bdk_pccpf_xxx_cap_ptr
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_cap_ptr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cp : 8; /**< [ 7: 0](RO) First capability pointer. Points to PCCPF_XXX_E_CAP_HDR. */
+#else /* Word 0 - Little Endian */
+ uint32_t cp : 8; /**< [ 7: 0](RO) First capability pointer. Points to PCCPF_XXX_E_CAP_HDR. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_cap_ptr_s cn; */
+};
+typedef union bdk_pccpf_xxx_cap_ptr bdk_pccpf_xxx_cap_ptr_t;
+
+#define BDK_PCCPF_XXX_CAP_PTR BDK_PCCPF_XXX_CAP_PTR_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_CAP_PTR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_CAP_PTR_FUNC(void)
+{
+ return 0x34;
+}
+
+#define typedef_BDK_PCCPF_XXX_CAP_PTR bdk_pccpf_xxx_cap_ptr_t
+#define bustype_BDK_PCCPF_XXX_CAP_PTR BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_CAP_PTR "PCCPF_XXX_CAP_PTR"
+#define busnum_BDK_PCCPF_XXX_CAP_PTR 0
+#define arguments_BDK_PCCPF_XXX_CAP_PTR -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_clsize
+ *
+ * PCC PF Cache Line Size Register
+ */
+union bdk_pccpf_xxx_clsize
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_clsize_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bist : 8; /**< [ 31: 24](RO) BIST. */
+ uint32_t hdrtype : 8; /**< [ 23: 16](RO) Header type. For RSL, 0x80 to indicate a multifunction device, else 0x0. Per the SR-IOV
+ specification, VFs are not indicated as multifunction devices. */
+ uint32_t lattim : 8; /**< [ 15: 8](RO) Latency timer. */
+ uint32_t clsize : 8; /**< [ 7: 0](RO) Cacheline size. */
+#else /* Word 0 - Little Endian */
+ uint32_t clsize : 8; /**< [ 7: 0](RO) Cacheline size. */
+ uint32_t lattim : 8; /**< [ 15: 8](RO) Latency timer. */
+ uint32_t hdrtype : 8; /**< [ 23: 16](RO) Header type. For RSL, 0x80 to indicate a multifunction device, else 0x0. Per the SR-IOV
+ specification, VFs are not indicated as multifunction devices. */
+ uint32_t bist : 8; /**< [ 31: 24](RO) BIST. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_clsize_s cn; */
+};
+typedef union bdk_pccpf_xxx_clsize bdk_pccpf_xxx_clsize_t;
+
+#define BDK_PCCPF_XXX_CLSIZE BDK_PCCPF_XXX_CLSIZE_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_CLSIZE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_CLSIZE_FUNC(void)
+{
+ return 0xc;
+}
+
+#define typedef_BDK_PCCPF_XXX_CLSIZE bdk_pccpf_xxx_clsize_t
+#define bustype_BDK_PCCPF_XXX_CLSIZE BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_CLSIZE "PCCPF_XXX_CLSIZE"
+#define busnum_BDK_PCCPF_XXX_CLSIZE 0
+#define arguments_BDK_PCCPF_XXX_CLSIZE -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_cmd
+ *
+ * PCC PF Command/Status Register
+ * This register is reset on a block domain reset or PF function level reset.
+ */
+union bdk_pccpf_xxx_cmd
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_21_31 : 11;
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. */
+ uint32_t reserved_3_19 : 17;
+ uint32_t me : 1; /**< [ 2: 2](RO) Master enable.
+ Internal:
+ For simplicity always one; we do not disable NCB transactions. */
+ uint32_t msae : 1; /**< [ 1: 1](RO) Memory space access enable.
+ Internal:
+ NCB/RSL always decoded; have hardcoded BARs. */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t msae : 1; /**< [ 1: 1](RO) Memory space access enable.
+ Internal:
+ NCB/RSL always decoded; have hardcoded BARs. */
+ uint32_t me : 1; /**< [ 2: 2](RO) Master enable.
+ Internal:
+ For simplicity always one; we do not disable NCB transactions. */
+ uint32_t reserved_3_19 : 17;
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. */
+ uint32_t reserved_21_31 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_cmd_s cn8; */
+ struct bdk_pccpf_xxx_cmd_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_21_31 : 11;
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. */
+ uint32_t reserved_3_19 : 17;
+ uint32_t me : 1; /**< [ 2: 2](R/W) Bus master enable. If set, function may initiate upstream DMA or MSI-X
+ transactions.
+
+ If PCCPF_XXX_E_DEV_CAP[FLR] is read-only zero, always set and writes have no
+ effect. Resets to zero and writable otherwise.
+
+ Internal:
+ Drives pcc__blk_masterena if block's CSR file has pcc_flr="True"
+ attribute. Function must not initiate NCBI DMA requests when
+ pcc__blk_masterena=0. In addition, PCC will not generate GIB (MSI-X)
+ transactions when this bit is clear. */
+ uint32_t msae : 1; /**< [ 1: 1](RO) Memory space access enable.
+ Internal:
+ NCB/RSL always decoded; have hardcoded BARs. */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t msae : 1; /**< [ 1: 1](RO) Memory space access enable.
+ Internal:
+ NCB/RSL always decoded; have hardcoded BARs. */
+ uint32_t me : 1; /**< [ 2: 2](R/W) Bus master enable. If set, function may initiate upstream DMA or MSI-X
+ transactions.
+
+ If PCCPF_XXX_E_DEV_CAP[FLR] is read-only zero, always set and writes have no
+ effect. Resets to zero and writable otherwise.
+
+ Internal:
+ Drives pcc__blk_masterena if block's CSR file has pcc_flr="True"
+ attribute. Function must not initiate NCBI DMA requests when
+ pcc__blk_masterena=0. In addition, PCC will not generate GIB (MSI-X)
+ transactions when this bit is clear. */
+ uint32_t reserved_3_19 : 17;
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. */
+ uint32_t reserved_21_31 : 11;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_cmd bdk_pccpf_xxx_cmd_t;
+
+#define BDK_PCCPF_XXX_CMD BDK_PCCPF_XXX_CMD_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_CMD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_CMD_FUNC(void)
+{
+ return 4;
+}
+
+#define typedef_BDK_PCCPF_XXX_CMD bdk_pccpf_xxx_cmd_t
+#define bustype_BDK_PCCPF_XXX_CMD BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_CMD "PCCPF_XXX_CMD"
+#define busnum_BDK_PCCPF_XXX_CMD 0
+#define arguments_BDK_PCCPF_XXX_CMD -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_e_cap_hdr
+ *
+ * PCC PF PCI Express Capabilities Register
+ * This register is the header of the 64-byte PCIe capability header.
+ */
+union bdk_pccpf_xxx_e_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_e_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device/port type. Indicates PCIe endpoint (0x0) for ARI devices (when bus is nonzero) and
+ integrated endpoint (0x9) otherwise. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else 0x0. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else 0x0. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device/port type. Indicates PCIe endpoint (0x0) for ARI devices (when bus is nonzero) and
+ integrated endpoint (0x9) otherwise. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_e_cap_hdr_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else 0x0. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else 0x0. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pccpf_xxx_e_cap_hdr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device/port type. Indicates PCIe endpoint (0x0) for ARI devices (when bus is nonzero) and
+ integrated endpoint (0x9) otherwise. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/H) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else points to PCCPF_XXX_EA_CAP_HDR. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/H) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else points to PCCPF_XXX_EA_CAP_HDR. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device/port type. Indicates PCIe endpoint (0x0) for ARI devices (when bus is nonzero) and
+ integrated endpoint (0x9) otherwise. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_e_cap_hdr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device/port type. Indicates PCIe endpoint (0x0) for ARI devices (when bus is nonzero) and
+ integrated endpoint (0x9) otherwise. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/H) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else if PCCPF_XXX_VSEC_SCTL[EA] is set points to
+ PCCPF_XXX_EA_CAP_HDR, else 0x0. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/H) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else if PCCPF_XXX_VSEC_SCTL[EA] is set points to
+ PCCPF_XXX_EA_CAP_HDR, else 0x0. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device/port type. Indicates PCIe endpoint (0x0) for ARI devices (when bus is nonzero) and
+ integrated endpoint (0x9) otherwise. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_e_cap_hdr_cn81xx cn83xx; */
+ struct bdk_pccpf_xxx_e_cap_hdr_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Reserved. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/H) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else if PCCPF_XXX_VSEC_SCTL[EA] is set points to
+ PCCPF_XXX_EA_CAP_HDR, else 0x0. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/H) Next capability pointer. If MSI-X is supported, points to
+ PCCPF_XXX_MSIX_CAP_HDR, else if PCCPF_XXX_VSEC_SCTL[EA] is set points to
+ PCCPF_XXX_EA_CAP_HDR, else 0x0. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Reserved. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pccpf_xxx_e_cap_hdr bdk_pccpf_xxx_e_cap_hdr_t;
+
+#define BDK_PCCPF_XXX_E_CAP_HDR BDK_PCCPF_XXX_E_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_E_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_E_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x40;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x40;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ return 0x70;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x40;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x40;
+ __bdk_csr_fatal("PCCPF_XXX_E_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_E_CAP_HDR bdk_pccpf_xxx_e_cap_hdr_t
+#define bustype_BDK_PCCPF_XXX_E_CAP_HDR BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_E_CAP_HDR "PCCPF_XXX_E_CAP_HDR"
+#define busnum_BDK_PCCPF_XXX_E_CAP_HDR 0
+#define arguments_BDK_PCCPF_XXX_E_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_e_dev_cap
+ *
+ * PCC PF PCI Express Device Capabilities Register
+ */
+union bdk_pccpf_xxx_e_dev_cap
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_e_dev_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t flr : 1; /**< [ 28: 28](RO) Function level reset capability. If set, PCCPF_XXX_E_DEV_CTL[BCR_FLR] is
+ implemented.
+
+ In CNXXX:
+ 0 = PCCPF_XXX_E_DEV_CTL[BCR_FLR] is ignored, PCCPF_XXX_E_DEV_CTL[TRPEND] is
+ always zero, PCCPF_XXX_CMD[ME] is always set, and PCCPF_XXX_SRIOV_CTL[VFE] is
+ always set (for SR-IOV functions).
+
+ 1 = PCCPF_XXX_E_DEV_CTL[BCR_FLR], PCCPF_XXX_E_DEV_CTL[TRPEND],
+ PCCPF_XXX_CMD[ME], and PCCPF_XXX_SRIOV_CTL[VFE] (if applicable) are
+ functional.
+
+ Internal:
+ Returns 1 if block's CSR file has pcc_flr="True" attribute. */
+ uint32_t reserved_16_27 : 12;
+ uint32_t rber : 1; /**< [ 15: 15](RO) Role-based error reporting. Required to be set by PCIe3.1. */
+ uint32_t reserved_0_14 : 15;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_14 : 15;
+ uint32_t rber : 1; /**< [ 15: 15](RO) Role-based error reporting. Required to be set by PCIe3.1. */
+ uint32_t reserved_16_27 : 12;
+ uint32_t flr : 1; /**< [ 28: 28](RO) Function level reset capability. If set, PCCPF_XXX_E_DEV_CTL[BCR_FLR] is
+ implemented.
+
+ In CNXXX:
+ 0 = PCCPF_XXX_E_DEV_CTL[BCR_FLR] is ignored, PCCPF_XXX_E_DEV_CTL[TRPEND] is
+ always zero, PCCPF_XXX_CMD[ME] is always set, and PCCPF_XXX_SRIOV_CTL[VFE] is
+ always set (for SR-IOV functions).
+
+ 1 = PCCPF_XXX_E_DEV_CTL[BCR_FLR], PCCPF_XXX_E_DEV_CTL[TRPEND],
+ PCCPF_XXX_CMD[ME], and PCCPF_XXX_SRIOV_CTL[VFE] (if applicable) are
+ functional.
+
+ Internal:
+ Returns 1 if block's CSR file has pcc_flr="True" attribute. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_e_dev_cap_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t rber : 1; /**< [ 15: 15](RO) Role-based error reporting. Required to be set by PCIe3.1. */
+ uint32_t reserved_0_14 : 15;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_14 : 15;
+ uint32_t rber : 1; /**< [ 15: 15](RO) Role-based error reporting. Required to be set by PCIe3.1. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_pccpf_xxx_e_dev_cap_s cn9; */
+};
+typedef union bdk_pccpf_xxx_e_dev_cap bdk_pccpf_xxx_e_dev_cap_t;
+
+#define BDK_PCCPF_XXX_E_DEV_CAP BDK_PCCPF_XXX_E_DEV_CAP_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_E_DEV_CAP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_E_DEV_CAP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x44;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x44;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x44;
+ __bdk_csr_fatal("PCCPF_XXX_E_DEV_CAP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_E_DEV_CAP bdk_pccpf_xxx_e_dev_cap_t
+#define bustype_BDK_PCCPF_XXX_E_DEV_CAP BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_E_DEV_CAP "PCCPF_XXX_E_DEV_CAP"
+#define busnum_BDK_PCCPF_XXX_E_DEV_CAP 0
+#define arguments_BDK_PCCPF_XXX_E_DEV_CAP -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_e_dev_ctl
+ *
+ * PCC PF PCI Express Device Control and Status Register
+ * This register is reset on a block domain reset or PF function level reset.
+ */
+union bdk_pccpf_xxx_e_dev_ctl
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_e_dev_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t trpend : 1; /**< [ 21: 21](RO/H) Transactions pending. If PCCPF_XXX_E_DEV_CAP[FLR] is clear, always 0. */
+ uint32_t reserved_20 : 1;
+ uint32_t urd : 1; /**< [ 19: 19](RO) Unsupported request detected. Always zero. */
+ uint32_t fed : 1; /**< [ 18: 18](R/W1C/H) Fatal error detected. Set when any bit in PCCPF_XXX_AER_UNCOR_STATUS transitions
+ to one while the corresponding bit in PCCPF_XXX_AER_UNCOR_SEVER is set. */
+ uint32_t nfed : 1; /**< [ 17: 17](R/W1C/H) Non-fatal error detected. Set when any bit in PCCPF_XXX_AER_UNCOR_STATUS
+ transitions to one while the corresponding bit in PCCPF_XXX_AER_UNCOR_SEVER is
+ clear. */
+ uint32_t ced : 1; /**< [ 16: 16](R/W1C/H) Correctable error detected. Set when any bit in PCCPF_XXX_AER_COR_STATUS transitions to one. */
+ uint32_t bcr_flr : 1; /**< [ 15: 15](R/W1S/H) Initiate function level reset. Writing a one to this bit initiates function level
+ reset if PCCPF_XXX_E_DEV_CAP[FLR] is set, else writes have no effect. This is a
+ self-clearing bit and always reads as zero. */
+ uint32_t reserved_4_14 : 11;
+ uint32_t urre : 1; /**< [ 3: 3](R/W) Unsupported request reporting enable. R/W for PCIe and software compatibility, not
+ used by hardware. */
+ uint32_t fere : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. R/W for PCIe and software compatibility, not
+ used by hardware. */
+ uint32_t nfere : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. R/W for PCIe and software compatibility, not
+ used by hardware. */
+ uint32_t cere : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. R/W for PCIe and software compatibility, not
+ used by hardware. */
+#else /* Word 0 - Little Endian */
+ uint32_t cere : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. R/W for PCIe and software compatibility, not
+ used by hardware. */
+ uint32_t nfere : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. R/W for PCIe and software compatibility, not
+ used by hardware. */
+ uint32_t fere : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. R/W for PCIe and software compatibility, not
+ used by hardware. */
+ uint32_t urre : 1; /**< [ 3: 3](R/W) Unsupported request reporting enable. R/W for PCIe and software compatibility, not
+ used by hardware. */
+ uint32_t reserved_4_14 : 11;
+ uint32_t bcr_flr : 1; /**< [ 15: 15](R/W1S/H) Initiate function level reset. Writing a one to this bit initiates function level
+ reset if PCCPF_XXX_E_DEV_CAP[FLR] is set, else writes have no effect. This is a
+ self-clearing bit and always reads as zero. */
+ uint32_t ced : 1; /**< [ 16: 16](R/W1C/H) Correctable error detected. Set when any bit in PCCPF_XXX_AER_COR_STATUS transitions to one. */
+ uint32_t nfed : 1; /**< [ 17: 17](R/W1C/H) Non-fatal error detected. Set when any bit in PCCPF_XXX_AER_UNCOR_STATUS
+ transitions to one while the corresponding bit in PCCPF_XXX_AER_UNCOR_SEVER is
+ clear. */
+ uint32_t fed : 1; /**< [ 18: 18](R/W1C/H) Fatal error detected. Set when any bit in PCCPF_XXX_AER_UNCOR_STATUS transitions
+ to one while the corresponding bit in PCCPF_XXX_AER_UNCOR_SEVER is set. */
+ uint32_t urd : 1; /**< [ 19: 19](RO) Unsupported request detected. Always zero. */
+ uint32_t reserved_20 : 1;
+ uint32_t trpend : 1; /**< [ 21: 21](RO/H) Transactions pending. If PCCPF_XXX_E_DEV_CAP[FLR] is clear, always 0. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_e_dev_ctl_s cn; */
+};
+typedef union bdk_pccpf_xxx_e_dev_ctl bdk_pccpf_xxx_e_dev_ctl_t;
+
+#define BDK_PCCPF_XXX_E_DEV_CTL BDK_PCCPF_XXX_E_DEV_CTL_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_E_DEV_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_E_DEV_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x48;
+ __bdk_csr_fatal("PCCPF_XXX_E_DEV_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_E_DEV_CTL bdk_pccpf_xxx_e_dev_ctl_t
+#define bustype_BDK_PCCPF_XXX_E_DEV_CTL BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_E_DEV_CTL "PCCPF_XXX_E_DEV_CTL"
+#define busnum_BDK_PCCPF_XXX_E_DEV_CTL 0
+#define arguments_BDK_PCCPF_XXX_E_DEV_CTL -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_ea_cap_hdr
+ *
+ * PCC PF PCI Enhanced Allocation Capabilities Register
+ * This register is the header of the variable-sized PCI enhanced allocation capability
+ * structure for type 0 devices.
+ */
+union bdk_pccpf_xxx_ea_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_ea_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t num_entries : 6; /**< [ 21: 16](RO/H) Number of enhanced entries:
+ 0x0 = No nonzero BARs.
+ 0x1 = 1 nonzero normal or SR-IOV BARs.
+ 0x2 = 2 nonzero normal or SR-IOV BARs.
+ 0x3 = 3 nonzero normal or SR-IOV BARs.
+ 0x4 = 4 nonzero normal or SR-IOV BARs.
+
+ CNXXXX never has more than four normal or SR-IOV BARs. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. No next capability. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO/H) Enhanced allocation capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO/H) Enhanced allocation capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. No next capability. */
+ uint32_t num_entries : 6; /**< [ 21: 16](RO/H) Number of enhanced entries:
+ 0x0 = No nonzero BARs.
+ 0x1 = 1 nonzero normal or SR-IOV BARs.
+ 0x2 = 2 nonzero normal or SR-IOV BARs.
+ 0x3 = 3 nonzero normal or SR-IOV BARs.
+ 0x4 = 4 nonzero normal or SR-IOV BARs.
+
+ CNXXXX never has more than four normal or SR-IOV BARs. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_ea_cap_hdr_s cn8; */
+ struct bdk_pccpf_xxx_ea_cap_hdr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t num_entries : 6; /**< [ 21: 16](RO/H) Number of enhanced entries:
+ 0x0 = No nonzero BARs.
+ 0x1 = 1 nonzero normal or SR-IOV BARs.
+ 0x2 = 2 nonzero normal or SR-IOV BARs.
+ 0x3 = 3 nonzero normal or SR-IOV BARs.
+ 0x4 = 4 nonzero normal or SR-IOV BARs.
+
+ CNXXXX never has more than four normal or SR-IOV BARs. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. No next capability. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) Enhanced allocation capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) Enhanced allocation capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. No next capability. */
+ uint32_t num_entries : 6; /**< [ 21: 16](RO/H) Number of enhanced entries:
+ 0x0 = No nonzero BARs.
+ 0x1 = 1 nonzero normal or SR-IOV BARs.
+ 0x2 = 2 nonzero normal or SR-IOV BARs.
+ 0x3 = 3 nonzero normal or SR-IOV BARs.
+ 0x4 = 4 nonzero normal or SR-IOV BARs.
+
+ CNXXXX never has more than four normal or SR-IOV BARs. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_ea_cap_hdr bdk_pccpf_xxx_ea_cap_hdr_t;
+
+#define BDK_PCCPF_XXX_EA_CAP_HDR BDK_PCCPF_XXX_EA_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_EA_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_EA_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x98;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x98;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x98;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x98;
+ __bdk_csr_fatal("PCCPF_XXX_EA_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_EA_CAP_HDR bdk_pccpf_xxx_ea_cap_hdr_t
+#define bustype_BDK_PCCPF_XXX_EA_CAP_HDR BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_EA_CAP_HDR "PCCPF_XXX_EA_CAP_HDR"
+#define busnum_BDK_PCCPF_XXX_EA_CAP_HDR 0
+#define arguments_BDK_PCCPF_XXX_EA_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_ea_entry#
+ *
+ * PCC PF PCI Enhanced Allocation Entry Registers
+ * These registers contain up to four sequential enhanced allocation entries. Each
+ * entry consists of five sequential 32-bit words described by PCC_EA_ENTRY_S.
+ */
+union bdk_pccpf_xxx_ea_entryx
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_ea_entryx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](RO/H) Entry data. See PCC_EA_ENTRY_S. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](RO/H) Entry data. See PCC_EA_ENTRY_S. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_ea_entryx_s cn; */
+};
+typedef union bdk_pccpf_xxx_ea_entryx bdk_pccpf_xxx_ea_entryx_t;
+
+static inline uint64_t BDK_PCCPF_XXX_EA_ENTRYX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_EA_ENTRYX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=19))
+ return 0x9c + 4 * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=24))
+ return 0x9c + 4 * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=19))
+ return 0x9c + 4 * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=24))
+ return 0x9c + 4 * ((a) & 0x1f);
+ __bdk_csr_fatal("PCCPF_XXX_EA_ENTRYX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_EA_ENTRYX(a) bdk_pccpf_xxx_ea_entryx_t
+#define bustype_BDK_PCCPF_XXX_EA_ENTRYX(a) BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_EA_ENTRYX(a) "PCCPF_XXX_EA_ENTRYX"
+#define busnum_BDK_PCCPF_XXX_EA_ENTRYX(a) (a)
+#define arguments_BDK_PCCPF_XXX_EA_ENTRYX(a) (a),-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_id
+ *
+ * PCC PF Vendor and Device ID Register
+ * This register is the header of the 64-byte PCI type 0 configuration structure.
+ */
+union bdk_pccpf_xxx_id
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_id_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t devid : 16; /**< [ 31: 16](RO) Device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> enumerated by PCC_DEV_IDL_E.
+
+ Internal:
+ Unit
+ from PCC's tie__pfunitid. */
+ uint32_t vendid : 16; /**< [ 15: 0](RO) Cavium's vendor ID. Enumerated by PCC_VENDOR_E::CAVIUM. */
+#else /* Word 0 - Little Endian */
+ uint32_t vendid : 16; /**< [ 15: 0](RO) Cavium's vendor ID. Enumerated by PCC_VENDOR_E::CAVIUM. */
+ uint32_t devid : 16; /**< [ 31: 16](RO) Device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> enumerated by PCC_DEV_IDL_E.
+
+ Internal:
+ Unit
+ from PCC's tie__pfunitid. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_id_s cn8; */
+ struct bdk_pccpf_xxx_id_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t devid : 16; /**< [ 31: 16](RO/H) Device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> enumerated by PCC_DEV_IDL_E.
+
+ Internal:
+ Unit from PCC's tie__pfunitid. */
+ uint32_t vendid : 16; /**< [ 15: 0](RO) Cavium's vendor ID. Enumerated by PCC_VENDOR_E::CAVIUM. */
+#else /* Word 0 - Little Endian */
+ uint32_t vendid : 16; /**< [ 15: 0](RO) Cavium's vendor ID. Enumerated by PCC_VENDOR_E::CAVIUM. */
+ uint32_t devid : 16; /**< [ 31: 16](RO/H) Device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> enumerated by PCC_DEV_IDL_E.
+
+ Internal:
+ Unit from PCC's tie__pfunitid. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_id bdk_pccpf_xxx_id_t;
+
+#define BDK_PCCPF_XXX_ID BDK_PCCPF_XXX_ID_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_ID_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_ID_FUNC(void)
+{
+ return 0;
+}
+
+#define typedef_BDK_PCCPF_XXX_ID bdk_pccpf_xxx_id_t
+#define bustype_BDK_PCCPF_XXX_ID BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_ID "PCCPF_XXX_ID"
+#define busnum_BDK_PCCPF_XXX_ID 0
+#define arguments_BDK_PCCPF_XXX_ID -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_msix_cap_hdr
+ *
+ * PCC PF MSI-X Capability Header Register
+ * This register is the header of the 36-byte PCI MSI-X capability structure.
+ *
+ * This register is reset on a block domain reset or PF function level reset.
+ */
+union bdk_pccpf_xxx_msix_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_msix_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixen : 1; /**< [ 31: 31](R/W) MSI-X enable.
+ 0 = The MSI-X PBAs corresponding to this function are cleared. Interrupt messages
+ will not be issued.
+ 1 = Normal PBA and MSI-X delivery. */
+ uint32_t funm : 1; /**< [ 30: 30](R/W) Function mask.
+ 0 = Each vectors' mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits.
+
+ Setting or clearing FUNM has no effect on the state of the per-vector mask bits. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t msixts : 11; /**< [ 26: 16](RO) MSI-X table size encoded as (table size - 1).
+ Internal:
+ From PCC's MSIX_PF_VECS parameter. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO) MSI-X capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO) MSI-X capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. */
+ uint32_t msixts : 11; /**< [ 26: 16](RO) MSI-X table size encoded as (table size - 1).
+ Internal:
+ From PCC's MSIX_PF_VECS parameter. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t funm : 1; /**< [ 30: 30](R/W) Function mask.
+ 0 = Each vectors' mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits.
+
+ Setting or clearing FUNM has no effect on the state of the per-vector mask bits. */
+ uint32_t msixen : 1; /**< [ 31: 31](R/W) MSI-X enable.
+ 0 = The MSI-X PBAs corresponding to this function are cleared. Interrupt messages
+ will not be issued.
+ 1 = Normal PBA and MSI-X delivery. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_msix_cap_hdr_s cn88xxp1; */
+ struct bdk_pccpf_xxx_msix_cap_hdr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixen : 1; /**< [ 31: 31](R/W) MSI-X enable.
+ 0 = The MSI-X PBAs corresponding to this function are cleared. Interrupt messages
+ will not be issued.
+ 1 = Normal PBA and MSI-X delivery. See also PCCPF_XXX_CMD[ME]. */
+ uint32_t funm : 1; /**< [ 30: 30](R/W) Function mask.
+ 0 = Each vectors' mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits.
+
+ Setting or clearing [FUNM] has no effect on the state of the per-vector mask bits. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t msixts : 11; /**< [ 26: 16](RO/H) MSI-X table size encoded as (table size - 1).
+ Internal:
+ From PCC generated parameter. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. Points to PCCPF_XXX_EA_CAP_HDR. */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO) MSI-X Capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO) MSI-X Capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. Points to PCCPF_XXX_EA_CAP_HDR. */
+ uint32_t msixts : 11; /**< [ 26: 16](RO/H) MSI-X table size encoded as (table size - 1).
+ Internal:
+ From PCC generated parameter. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t funm : 1; /**< [ 30: 30](R/W) Function mask.
+ 0 = Each vectors' mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits.
+
+ Setting or clearing [FUNM] has no effect on the state of the per-vector mask bits. */
+ uint32_t msixen : 1; /**< [ 31: 31](R/W) MSI-X enable.
+ 0 = The MSI-X PBAs corresponding to this function are cleared. Interrupt messages
+ will not be issued.
+ 1 = Normal PBA and MSI-X delivery. See also PCCPF_XXX_CMD[ME]. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_msix_cap_hdr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixen : 1; /**< [ 31: 31](R/W) MSI-X enable.
+ 0 = The MSI-X PBAs corresponding to this function are cleared. Interrupt messages
+ will not be issued.
+ 1 = Normal PBA and MSI-X delivery. */
+ uint32_t funm : 1; /**< [ 30: 30](R/W) Function mask.
+ 0 = Each vectors' mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits.
+
+ Setting or clearing FUNM has no effect on the state of the per-vector mask bits. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t msixts : 11; /**< [ 26: 16](RO) MSI-X table size encoded as (table size - 1).
+ Internal:
+ From PCC's MSIX_PF_VECS parameter. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/H) Next capability pointer. If PCCPF_XXX_VSEC_SCTL[EA] is set points to
+ PCCPF_XXX_EA_CAP_HDR, else 0x0. */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO) MSI-X Capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO) MSI-X Capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/H) Next capability pointer. If PCCPF_XXX_VSEC_SCTL[EA] is set points to
+ PCCPF_XXX_EA_CAP_HDR, else 0x0. */
+ uint32_t msixts : 11; /**< [ 26: 16](RO) MSI-X table size encoded as (table size - 1).
+ Internal:
+ From PCC's MSIX_PF_VECS parameter. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t funm : 1; /**< [ 30: 30](R/W) Function mask.
+ 0 = Each vectors' mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits.
+
+ Setting or clearing FUNM has no effect on the state of the per-vector mask bits. */
+ uint32_t msixen : 1; /**< [ 31: 31](R/W) MSI-X enable.
+ 0 = The MSI-X PBAs corresponding to this function are cleared. Interrupt messages
+ will not be issued.
+ 1 = Normal PBA and MSI-X delivery. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_msix_cap_hdr_cn81xx cn83xx; */
+ /* struct bdk_pccpf_xxx_msix_cap_hdr_cn81xx cn88xxp2; */
+};
+typedef union bdk_pccpf_xxx_msix_cap_hdr bdk_pccpf_xxx_msix_cap_hdr_t;
+
+#define BDK_PCCPF_XXX_MSIX_CAP_HDR BDK_PCCPF_XXX_MSIX_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_MSIX_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_MSIX_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x80;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x80;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ return 0xb0;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x80;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x80;
+ __bdk_csr_fatal("PCCPF_XXX_MSIX_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_MSIX_CAP_HDR bdk_pccpf_xxx_msix_cap_hdr_t
+#define bustype_BDK_PCCPF_XXX_MSIX_CAP_HDR BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_MSIX_CAP_HDR "PCCPF_XXX_MSIX_CAP_HDR"
+#define busnum_BDK_PCCPF_XXX_MSIX_CAP_HDR 0
+#define arguments_BDK_PCCPF_XXX_MSIX_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_msix_pba
+ *
+ * PCC PF MSI-X PBA Offset and BIR Register
+ */
+union bdk_pccpf_xxx_msix_pba
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_msix_pba_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO) MSI-X PBA offset register. Offset of the MSI-X PBA, as a number of eight-byte words from
+ the base address of the BAR; e.g. 0x1E000 corresponds to a byte offset of 0xF0000. */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO) MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space. As BARs are 64-bits, 0x4 indicates BAR4H/L. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO) MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space. As BARs are 64-bits, 0x4 indicates BAR4H/L. */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO) MSI-X PBA offset register. Offset of the MSI-X PBA, as a number of eight-byte words from
+ the base address of the BAR; e.g. 0x1E000 corresponds to a byte offset of 0xF0000. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_msix_pba_s cn8; */
+ struct bdk_pccpf_xxx_msix_pba_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO) MSI-X PBA offset register. Offset of the MSI-X PBA, as a number of eight-byte words from
+ the base address of the BAR; e.g. 0x1E000 corresponds to a byte offset of 0xF0000. */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO) MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space. Typically 0x4, indicating BAR4H/L. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO) MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space. Typically 0x4, indicating BAR4H/L. */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO) MSI-X PBA offset register. Offset of the MSI-X PBA, as a number of eight-byte words from
+ the base address of the BAR; e.g. 0x1E000 corresponds to a byte offset of 0xF0000. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_msix_pba bdk_pccpf_xxx_msix_pba_t;
+
+#define BDK_PCCPF_XXX_MSIX_PBA BDK_PCCPF_XXX_MSIX_PBA_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_MSIX_PBA_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_MSIX_PBA_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x88;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x88;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ return 0xb8;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x88;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x88;
+ __bdk_csr_fatal("PCCPF_XXX_MSIX_PBA", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_MSIX_PBA bdk_pccpf_xxx_msix_pba_t
+#define bustype_BDK_PCCPF_XXX_MSIX_PBA BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_MSIX_PBA "PCCPF_XXX_MSIX_PBA"
+#define busnum_BDK_PCCPF_XXX_MSIX_PBA 0
+#define arguments_BDK_PCCPF_XXX_MSIX_PBA -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_msix_table
+ *
+ * PCC PF MSI-X Table Offset and BIR Register
+ */
+union bdk_pccpf_xxx_msix_table
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_msix_table_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO) MSI-X table offset register. Offset of the MSI-X table, as a number of eight-byte
+ words from the base address of the BAR. */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO) MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ table into memory space. As BARs are 64-bits, 0x4 indicates BAR4H/L. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO) MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ table into memory space. As BARs are 64-bits, 0x4 indicates BAR4H/L. */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO) MSI-X table offset register. Offset of the MSI-X table, as a number of eight-byte
+ words from the base address of the BAR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_msix_table_s cn8; */
+ struct bdk_pccpf_xxx_msix_table_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO) MSI-X table offset register. Offset of the MSI-X table, as a number of eight-byte
+ words from the base address of the BAR. */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO) MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ table into memory space. Typically 0x4, indicating BAR4H/L.
+
+ Internal:
+ From PCC generated parameter. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO) MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ table into memory space. Typically 0x4, indicating BAR4H/L.
+
+ Internal:
+ From PCC generated parameter. */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO) MSI-X table offset register. Offset of the MSI-X table, as a number of eight-byte
+ words from the base address of the BAR. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_msix_table bdk_pccpf_xxx_msix_table_t;
+
+#define BDK_PCCPF_XXX_MSIX_TABLE BDK_PCCPF_XXX_MSIX_TABLE_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_MSIX_TABLE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_MSIX_TABLE_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x84;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x84;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ return 0xb4;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x84;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x84;
+ __bdk_csr_fatal("PCCPF_XXX_MSIX_TABLE", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_MSIX_TABLE bdk_pccpf_xxx_msix_table_t
+#define bustype_BDK_PCCPF_XXX_MSIX_TABLE BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_MSIX_TABLE "PCCPF_XXX_MSIX_TABLE"
+#define busnum_BDK_PCCPF_XXX_MSIX_TABLE 0
+#define arguments_BDK_PCCPF_XXX_MSIX_TABLE -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_rev
+ *
+ * PCC PF Class Code/Revision ID Register
+ */
+union bdk_pccpf_xxx_rev
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_rev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bcc : 8; /**< [ 31: 24](RO) Base class code. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[23:16]. */
+ uint32_t sc : 8; /**< [ 23: 16](RO) Subclass code. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[15:8]. */
+ uint32_t pi : 8; /**< [ 15: 8](RO) Programming interface. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[7:0]. */
+ uint32_t rid : 8; /**< [ 7: 0](RO/H) Revision ID. Read only version of PCCPF_XXX_VSEC_SCTL[RID]. */
+#else /* Word 0 - Little Endian */
+ uint32_t rid : 8; /**< [ 7: 0](RO/H) Revision ID. Read only version of PCCPF_XXX_VSEC_SCTL[RID]. */
+ uint32_t pi : 8; /**< [ 15: 8](RO) Programming interface. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[7:0]. */
+ uint32_t sc : 8; /**< [ 23: 16](RO) Subclass code. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[15:8]. */
+ uint32_t bcc : 8; /**< [ 31: 24](RO) Base class code. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[23:16]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_rev_s cn8; */
+ struct bdk_pccpf_xxx_rev_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/H) Base class code. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[23:16]. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/H) Subclass code. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[15:8]. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/H) Programming interface. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[7:0]. */
+ uint32_t rid : 8; /**< [ 7: 0](RO/H) Revision ID. Read only version of PCCPF_XXX_VSEC_SCTL[RID]. */
+#else /* Word 0 - Little Endian */
+ uint32_t rid : 8; /**< [ 7: 0](RO/H) Revision ID. Read only version of PCCPF_XXX_VSEC_SCTL[RID]. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/H) Programming interface. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[7:0]. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/H) Subclass code. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[15:8]. */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/H) Base class code. See PCC_DEV_IDL_E.
+ Internal:
+ From PCC's tie__class_code[23:16]. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_rev bdk_pccpf_xxx_rev_t;
+
+#define BDK_PCCPF_XXX_REV BDK_PCCPF_XXX_REV_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_REV_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_REV_FUNC(void)
+{
+ return 8;
+}
+
+#define typedef_BDK_PCCPF_XXX_REV bdk_pccpf_xxx_rev_t
+#define bustype_BDK_PCCPF_XXX_REV BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_REV "PCCPF_XXX_REV"
+#define busnum_BDK_PCCPF_XXX_REV 0
+#define arguments_BDK_PCCPF_XXX_REV -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sari_nxt
+ *
+ * PCC PF ARI Capability Register
+ * If this device is on bus 0x0, this ARI header is not present and reads as 0x0.
+ */
+union bdk_pccpf_xxx_sari_nxt
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sari_nxt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t nxtfn : 8; /**< [ 15: 8](RO/H) Next function number. 0x0 except for PCC_DEV_IDL_E::MRML, when it points to the next MFD
+ in the linked list of MFDs underneath the RSL and the value comes from
+ PCCPF_XXX_VSEC_CTL[NXTFN_NS] or PCCPF_XXX_VSEC_SCTL[NXTFN_S] for nonsecure or secure
+ accesses respectively. */
+ uint32_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_7 : 8;
+ uint32_t nxtfn : 8; /**< [ 15: 8](RO/H) Next function number. 0x0 except for PCC_DEV_IDL_E::MRML, when it points to the next MFD
+ in the linked list of MFDs underneath the RSL and the value comes from
+ PCCPF_XXX_VSEC_CTL[NXTFN_NS] or PCCPF_XXX_VSEC_SCTL[NXTFN_S] for nonsecure or secure
+ accesses respectively. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sari_nxt_s cn; */
+};
+typedef union bdk_pccpf_xxx_sari_nxt bdk_pccpf_xxx_sari_nxt_t;
+
+#define BDK_PCCPF_XXX_SARI_NXT BDK_PCCPF_XXX_SARI_NXT_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SARI_NXT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SARI_NXT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x144;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x144;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x104;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x174;
+ __bdk_csr_fatal("PCCPF_XXX_SARI_NXT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_SARI_NXT bdk_pccpf_xxx_sari_nxt_t
+#define bustype_BDK_PCCPF_XXX_SARI_NXT BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SARI_NXT "PCCPF_XXX_SARI_NXT"
+#define busnum_BDK_PCCPF_XXX_SARI_NXT 0
+#define arguments_BDK_PCCPF_XXX_SARI_NXT -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_bar0l
+ *
+ * PCC PF SR-IOV BAR 0 Lower Register
+ */
+union bdk_pccpf_xxx_sriov_bar0l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_bar0l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_sriov_bar0l_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_rbsz and
+ tie__vfbar0_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_rbsz and
+ tie__vfbar0_offset. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pccpf_xxx_sriov_bar0l_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_sriov_bar0l_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_rbsz and
+ tie__vfbar0_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory Space Indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory Space Indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_rbsz and
+ tie__vfbar0_offset. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pccpf_xxx_sriov_bar0l_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_rbsz and
+ tie__vfbar0_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_rbsz and
+ tie__vfbar0_offset. */
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_pccpf_xxx_sriov_bar0l_cn83xx cn88xxp2; */
+};
+typedef union bdk_pccpf_xxx_sriov_bar0l bdk_pccpf_xxx_sriov_bar0l_t;
+
+#define BDK_PCCPF_XXX_SRIOV_BAR0L BDK_PCCPF_XXX_SRIOV_BAR0L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR0L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR0L_FUNC(void)
+{
+ return 0x1a4;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_BAR0L bdk_pccpf_xxx_sriov_bar0l_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_BAR0L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_BAR0L "PCCPF_XXX_SRIOV_BAR0L"
+#define busnum_BDK_PCCPF_XXX_SRIOV_BAR0L 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_BAR0L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_bar0u
+ *
+ * PCC PF SR-IOV BAR 0 Upper Register
+ */
+union bdk_pccpf_xxx_sriov_bar0u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_bar0u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_sriov_bar0u_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Upper bits of the VF BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_rbsz and tie__vfbar0_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Upper bits of the VF BAR 0 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_rbsz and tie__vfbar0_offset. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pccpf_xxx_sriov_bar0u_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_sriov_bar0u bdk_pccpf_xxx_sriov_bar0u_t;
+
+#define BDK_PCCPF_XXX_SRIOV_BAR0U BDK_PCCPF_XXX_SRIOV_BAR0U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR0U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR0U_FUNC(void)
+{
+ return 0x1a8;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_BAR0U bdk_pccpf_xxx_sriov_bar0u_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_BAR0U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_BAR0U "PCCPF_XXX_SRIOV_BAR0U"
+#define busnum_BDK_PCCPF_XXX_SRIOV_BAR0U 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_BAR0U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_bar2l
+ *
+ * PCC PF SR-IOV BAR 2 Lower Register
+ */
+union bdk_pccpf_xxx_sriov_bar2l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_bar2l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_sriov_bar2l_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_rbsz and tie__vfbar2_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_rbsz and tie__vfbar2_offset. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pccpf_xxx_sriov_bar2l_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_sriov_bar2l_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_rbsz and tie__vfbar2_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_rbsz and tie__vfbar2_offset. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_sriov_bar2l_cn81xx cn83xx; */
+ /* struct bdk_pccpf_xxx_sriov_bar2l_cn81xx cn88xxp2; */
+};
+typedef union bdk_pccpf_xxx_sriov_bar2l bdk_pccpf_xxx_sriov_bar2l_t;
+
+#define BDK_PCCPF_XXX_SRIOV_BAR2L BDK_PCCPF_XXX_SRIOV_BAR2L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR2L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR2L_FUNC(void)
+{
+ return 0x1ac;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_BAR2L bdk_pccpf_xxx_sriov_bar2l_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_BAR2L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_BAR2L "PCCPF_XXX_SRIOV_BAR2L"
+#define busnum_BDK_PCCPF_XXX_SRIOV_BAR2L 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_BAR2L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_bar2u
+ *
+ * PCC PF SR-IOV BAR 2 Upper Register
+ */
+union bdk_pccpf_xxx_sriov_bar2u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_bar2u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_sriov_bar2u_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Upper bits of the VF BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_rbsz and tie__vfbar2_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Upper bits of the VF BAR 2 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_rbsz and tie__vfbar2_offset. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pccpf_xxx_sriov_bar2u_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_sriov_bar2u bdk_pccpf_xxx_sriov_bar2u_t;
+
+#define BDK_PCCPF_XXX_SRIOV_BAR2U BDK_PCCPF_XXX_SRIOV_BAR2U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR2U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR2U_FUNC(void)
+{
+ return 0x1b0;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_BAR2U bdk_pccpf_xxx_sriov_bar2u_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_BAR2U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_BAR2U "PCCPF_XXX_SRIOV_BAR2U"
+#define busnum_BDK_PCCPF_XXX_SRIOV_BAR2U 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_BAR2U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_bar4l
+ *
+ * PCC PF SR-IOV BAR 4 Lower Register
+ */
+union bdk_pccpf_xxx_sriov_bar4l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_bar4l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_sriov_bar4l_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_rbsz and tie__vfbar4_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO) BAR type. 0x0 if not implemented, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_rbsz and tie__vfbar4_offset. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pccpf_xxx_sriov_bar4l_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_sriov_bar4l_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_rbsz and tie__vfbar4_offset. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mspc : 1; /**< [ 0: 0](RO) Memory space indicator.
+ 0 = BAR is a memory BAR.
+ 1 = BAR is an I/O BAR. */
+ uint32_t typ : 2; /**< [ 2: 1](RO/H) BAR type. 0x0 if not implemented or PCCPF_XXX_VSEC_SCTL[EA] is set, else 0x2:
+ 0x0 = 32-bit BAR, or BAR not present.
+ 0x2 = 64-bit BAR. */
+ uint32_t pf : 1; /**< [ 3: 3](RO) Prefetchable. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lbab : 16; /**< [ 31: 16](R/W/H) Lower bits of the VF BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_rbsz and tie__vfbar4_offset. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_sriov_bar4l_cn81xx cn83xx; */
+ /* struct bdk_pccpf_xxx_sriov_bar4l_cn81xx cn88xxp2; */
+};
+typedef union bdk_pccpf_xxx_sriov_bar4l bdk_pccpf_xxx_sriov_bar4l_t;
+
+#define BDK_PCCPF_XXX_SRIOV_BAR4L BDK_PCCPF_XXX_SRIOV_BAR4L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR4L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR4L_FUNC(void)
+{
+ return 0x1b4;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_BAR4L bdk_pccpf_xxx_sriov_bar4l_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_BAR4L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_BAR4L "PCCPF_XXX_SRIOV_BAR4L"
+#define busnum_BDK_PCCPF_XXX_SRIOV_BAR4L 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_BAR4L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_bar4u
+ *
+ * PCC PF SR-IOV BAR 4 Upper Register
+ */
+union bdk_pccpf_xxx_sriov_bar4u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_bar4u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_sriov_bar4u_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Upper bits of the VF BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_rbsz and tie__vfbar4_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](R/W/H) Upper bits of the VF BAR 4 base address. See additional BAR related notes in
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_rbsz and tie__vfbar4_offset. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pccpf_xxx_sriov_bar4u_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#else /* Word 0 - Little Endian */
+ uint32_t bar : 32; /**< [ 31: 0](RO) Always zero. Enhanced allocation used instead of BARs. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_sriov_bar4u bdk_pccpf_xxx_sriov_bar4u_t;
+
+#define BDK_PCCPF_XXX_SRIOV_BAR4U BDK_PCCPF_XXX_SRIOV_BAR4U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR4U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_BAR4U_FUNC(void)
+{
+ return 0x1b8;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_BAR4U bdk_pccpf_xxx_sriov_bar4u_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_BAR4U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_BAR4U "PCCPF_XXX_SRIOV_BAR4U"
+#define busnum_BDK_PCCPF_XXX_SRIOV_BAR4U 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_BAR4U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_cap
+ *
+ * PCC PF SR-IOV Capability Register
+ */
+union bdk_pccpf_xxx_sriov_cap
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vfmimn : 11; /**< [ 31: 21](RO) VF migration interrupt message number. */
+ uint32_t reserved_2_20 : 19;
+ uint32_t arichp : 1; /**< [ 1: 1](RO) ARI capable hierarchy preserved. */
+ uint32_t vfmc : 1; /**< [ 0: 0](RO) VF migration capable. */
+#else /* Word 0 - Little Endian */
+ uint32_t vfmc : 1; /**< [ 0: 0](RO) VF migration capable. */
+ uint32_t arichp : 1; /**< [ 1: 1](RO) ARI capable hierarchy preserved. */
+ uint32_t reserved_2_20 : 19;
+ uint32_t vfmimn : 11; /**< [ 31: 21](RO) VF migration interrupt message number. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_cap_s cn; */
+};
+typedef union bdk_pccpf_xxx_sriov_cap bdk_pccpf_xxx_sriov_cap_t;
+
+#define BDK_PCCPF_XXX_SRIOV_CAP BDK_PCCPF_XXX_SRIOV_CAP_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_CAP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_CAP_FUNC(void)
+{
+ return 0x184;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_CAP bdk_pccpf_xxx_sriov_cap_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_CAP BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_CAP "PCCPF_XXX_SRIOV_CAP"
+#define busnum_BDK_PCCPF_XXX_SRIOV_CAP 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_CAP -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_cap_hdr
+ *
+ * PCC PF SR-IOV Capability Header Register
+ * This register is the header of the 64-byte PCI SR-IOV capability structure.
+ */
+union bdk_pccpf_xxx_sriov_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. None. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. None. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_cap_hdr_s cn; */
+};
+typedef union bdk_pccpf_xxx_sriov_cap_hdr bdk_pccpf_xxx_sriov_cap_hdr_t;
+
+#define BDK_PCCPF_XXX_SRIOV_CAP_HDR BDK_PCCPF_XXX_SRIOV_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_CAP_HDR_FUNC(void)
+{
+ return 0x180;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_CAP_HDR bdk_pccpf_xxx_sriov_cap_hdr_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_CAP_HDR BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_CAP_HDR "PCCPF_XXX_SRIOV_CAP_HDR"
+#define busnum_BDK_PCCPF_XXX_SRIOV_CAP_HDR 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_ctl
+ *
+ * PCC PF SR-IOV Control/Status Register
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccpf_xxx_sriov_ctl
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_17_31 : 15;
+ uint32_t ms : 1; /**< [ 16: 16](RO) VF migration status. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t ach : 1; /**< [ 4: 4](RO) ARI capable hierarchy. */
+ uint32_t mse : 1; /**< [ 3: 3](RO) VF MSE. Master space enable always on. */
+ uint32_t mie : 1; /**< [ 2: 2](RO) VF migration interrupt enable. */
+ uint32_t me : 1; /**< [ 1: 1](RO) VF migration enable. */
+ uint32_t vfe : 1; /**< [ 0: 0](RO) VF enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t vfe : 1; /**< [ 0: 0](RO) VF enable. */
+ uint32_t me : 1; /**< [ 1: 1](RO) VF migration enable. */
+ uint32_t mie : 1; /**< [ 2: 2](RO) VF migration interrupt enable. */
+ uint32_t mse : 1; /**< [ 3: 3](RO) VF MSE. Master space enable always on. */
+ uint32_t ach : 1; /**< [ 4: 4](RO) ARI capable hierarchy. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t ms : 1; /**< [ 16: 16](RO) VF migration status. */
+ uint32_t reserved_17_31 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_ctl_s cn8; */
+ struct bdk_pccpf_xxx_sriov_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_17_31 : 15;
+ uint32_t ms : 1; /**< [ 16: 16](RO) VF migration status. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t ach : 1; /**< [ 4: 4](RO) ARI capable hierarchy. */
+ uint32_t mse : 1; /**< [ 3: 3](RO) VF MSE. Master space enable always on. */
+ uint32_t mie : 1; /**< [ 2: 2](RO) VF migration interrupt enable. */
+ uint32_t me : 1; /**< [ 1: 1](RO) VF migration enable. */
+ uint32_t vfe : 1; /**< [ 0: 0](R/W) VF enable. If PCCPF_XXX_E_DEV_CAP[FLR] is clear, always set and writes have no
+ effect. Resets to zero and writable otherwise.
+
+ When clear, PCCVF_XXX_* CSRs are reset, reads and writes to them are RAO/WI.
+
+ Internal:
+ When clear, forces PCCVF_XXX_CMD[ME] = pcc__blk_masterena = 0. */
+#else /* Word 0 - Little Endian */
+ uint32_t vfe : 1; /**< [ 0: 0](R/W) VF enable. If PCCPF_XXX_E_DEV_CAP[FLR] is clear, always set and writes have no
+ effect. Resets to zero and writable otherwise.
+
+ When clear, PCCVF_XXX_* CSRs are reset, reads and writes to them are RAO/WI.
+
+ Internal:
+ When clear, forces PCCVF_XXX_CMD[ME] = pcc__blk_masterena = 0. */
+ uint32_t me : 1; /**< [ 1: 1](RO) VF migration enable. */
+ uint32_t mie : 1; /**< [ 2: 2](RO) VF migration interrupt enable. */
+ uint32_t mse : 1; /**< [ 3: 3](RO) VF MSE. Master space enable always on. */
+ uint32_t ach : 1; /**< [ 4: 4](RO) ARI capable hierarchy. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t ms : 1; /**< [ 16: 16](RO) VF migration status. */
+ uint32_t reserved_17_31 : 15;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_sriov_ctl bdk_pccpf_xxx_sriov_ctl_t;
+
+#define BDK_PCCPF_XXX_SRIOV_CTL BDK_PCCPF_XXX_SRIOV_CTL_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_CTL_FUNC(void)
+{
+ return 0x188;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_CTL bdk_pccpf_xxx_sriov_ctl_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_CTL BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_CTL "PCCPF_XXX_SRIOV_CTL"
+#define busnum_BDK_PCCPF_XXX_SRIOV_CTL 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_CTL -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_dev
+ *
+ * PCC PF SR-IOV VF Device ID Register
+ */
+union bdk_pccpf_xxx_sriov_dev
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_dev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vfdev : 16; /**< [ 31: 16](RO) VF device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> enumerated by PCC_DEV_IDL_E.
+ e.g. 0xA033 for RNM_VF.
+
+ Internal:
+ Unit from PCC's tie__vfunitid. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t vfdev : 16; /**< [ 31: 16](RO) VF device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> enumerated by PCC_DEV_IDL_E.
+ e.g. 0xA033 for RNM_VF.
+
+ Internal:
+ Unit from PCC's tie__vfunitid. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_dev_s cn8; */
+ struct bdk_pccpf_xxx_sriov_dev_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vfdev : 16; /**< [ 31: 16](RO/H) VF device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> enumerated by PCC_DEV_IDL_E.
+ e.g. 0xA033 for RNM's VF (PCC_DEV_IDL_E::RNM_VF).
+
+ Internal:
+ Unit from PCC's tie__vfunitid. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t vfdev : 16; /**< [ 31: 16](RO/H) VF device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> enumerated by PCC_DEV_IDL_E.
+ e.g. 0xA033 for RNM's VF (PCC_DEV_IDL_E::RNM_VF).
+
+ Internal:
+ Unit from PCC's tie__vfunitid. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_sriov_dev bdk_pccpf_xxx_sriov_dev_t;
+
+#define BDK_PCCPF_XXX_SRIOV_DEV BDK_PCCPF_XXX_SRIOV_DEV_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_DEV_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_DEV_FUNC(void)
+{
+ return 0x198;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_DEV bdk_pccpf_xxx_sriov_dev_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_DEV BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_DEV "PCCPF_XXX_SRIOV_DEV"
+#define busnum_BDK_PCCPF_XXX_SRIOV_DEV 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_DEV -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_fo
+ *
+ * PCC PF SR-IOV First VF Offset/VF Stride Register
+ */
+union bdk_pccpf_xxx_sriov_fo
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_fo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vfs : 16; /**< [ 31: 16](RO) VF stride. */
+ uint32_t fo : 16; /**< [ 15: 0](RO) First VF offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t fo : 16; /**< [ 15: 0](RO) First VF offset. */
+ uint32_t vfs : 16; /**< [ 31: 16](RO) VF stride. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_fo_s cn; */
+};
+typedef union bdk_pccpf_xxx_sriov_fo bdk_pccpf_xxx_sriov_fo_t;
+
+#define BDK_PCCPF_XXX_SRIOV_FO BDK_PCCPF_XXX_SRIOV_FO_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_FO_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_FO_FUNC(void)
+{
+ return 0x194;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_FO bdk_pccpf_xxx_sriov_fo_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_FO BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_FO "PCCPF_XXX_SRIOV_FO"
+#define busnum_BDK_PCCPF_XXX_SRIOV_FO 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_FO -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_nvf
+ *
+ * PCC PF SR-IOV Number of VFs/Function Dependency Link Register
+ */
+union bdk_pccpf_xxx_sriov_nvf
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_nvf_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t fdl : 8; /**< [ 23: 16](RO) Function dependency link. Only a single PF 0 exists. */
+ uint32_t nvf : 16; /**< [ 15: 0](RO) Number of VFs that are visible.
+ Internal:
+ From PCC's MSIX_VFS parameter. */
+#else /* Word 0 - Little Endian */
+ uint32_t nvf : 16; /**< [ 15: 0](RO) Number of VFs that are visible.
+ Internal:
+ From PCC's MSIX_VFS parameter. */
+ uint32_t fdl : 8; /**< [ 23: 16](RO) Function dependency link. Only a single PF 0 exists. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_nvf_s cn8; */
+ struct bdk_pccpf_xxx_sriov_nvf_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t fdl : 8; /**< [ 23: 16](RO) Function dependency link. Only a single PF 0 exists. */
+ uint32_t nvf : 16; /**< [ 15: 0](RO/H) Number of VFs that are visible.
+ Internal:
+ From PCC generated parameter. For RVU, from RVU_PRIV_PF()_CFG[NVF]. */
+#else /* Word 0 - Little Endian */
+ uint32_t nvf : 16; /**< [ 15: 0](RO/H) Number of VFs that are visible.
+ Internal:
+ From PCC generated parameter. For RVU, from RVU_PRIV_PF()_CFG[NVF]. */
+ uint32_t fdl : 8; /**< [ 23: 16](RO) Function dependency link. Only a single PF 0 exists. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_sriov_nvf bdk_pccpf_xxx_sriov_nvf_t;
+
+#define BDK_PCCPF_XXX_SRIOV_NVF BDK_PCCPF_XXX_SRIOV_NVF_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_NVF_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_NVF_FUNC(void)
+{
+ return 0x190;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_NVF bdk_pccpf_xxx_sriov_nvf_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_NVF BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_NVF "PCCPF_XXX_SRIOV_NVF"
+#define busnum_BDK_PCCPF_XXX_SRIOV_NVF 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_NVF -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_ps
+ *
+ * PCC PF SR-IOV System Page Sizes Register
+ */
+union bdk_pccpf_xxx_sriov_ps
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_ps_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ps : 32; /**< [ 31: 0](RO) System page size. 1MB, as that is minimum stride between VFs. */
+#else /* Word 0 - Little Endian */
+ uint32_t ps : 32; /**< [ 31: 0](RO) System page size. 1MB, as that is minimum stride between VFs. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_ps_s cn; */
+};
+typedef union bdk_pccpf_xxx_sriov_ps bdk_pccpf_xxx_sriov_ps_t;
+
+#define BDK_PCCPF_XXX_SRIOV_PS BDK_PCCPF_XXX_SRIOV_PS_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_PS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_PS_FUNC(void)
+{
+ return 0x1a0;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_PS bdk_pccpf_xxx_sriov_ps_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_PS BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_PS "PCCPF_XXX_SRIOV_PS"
+#define busnum_BDK_PCCPF_XXX_SRIOV_PS 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_PS -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_supps
+ *
+ * PCC PF SR-IOV Supported Page Sizes Register
+ */
+union bdk_pccpf_xxx_sriov_supps
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_supps_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t supps : 32; /**< [ 31: 0](RO) Supported page sizes. Indicates required 4K, 8K, 64K, 256K, 1M, 4M. The BAR fixed
+ assignment makes this not useful. */
+#else /* Word 0 - Little Endian */
+ uint32_t supps : 32; /**< [ 31: 0](RO) Supported page sizes. Indicates required 4K, 8K, 64K, 256K, 1M, 4M. The BAR fixed
+ assignment makes this not useful. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_supps_s cn; */
+};
+typedef union bdk_pccpf_xxx_sriov_supps bdk_pccpf_xxx_sriov_supps_t;
+
+#define BDK_PCCPF_XXX_SRIOV_SUPPS BDK_PCCPF_XXX_SRIOV_SUPPS_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_SUPPS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_SUPPS_FUNC(void)
+{
+ return 0x19c;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_SUPPS bdk_pccpf_xxx_sriov_supps_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_SUPPS BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_SUPPS "PCCPF_XXX_SRIOV_SUPPS"
+#define busnum_BDK_PCCPF_XXX_SRIOV_SUPPS 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_SUPPS -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_sriov_vfs
+ *
+ * PCC PF SR-IOV Initial VFs/Total VFs Register
+ */
+union bdk_pccpf_xxx_sriov_vfs
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_sriov_vfs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tvf : 16; /**< [ 31: 16](RO) Total VFs.
+ Internal:
+ from pcc's MSIX_VFS parameter. */
+ uint32_t ivf : 16; /**< [ 15: 0](RO) Initial VFs.
+ Internal:
+ From PCC's MSIX_VFS parameter. */
+#else /* Word 0 - Little Endian */
+ uint32_t ivf : 16; /**< [ 15: 0](RO) Initial VFs.
+ Internal:
+ From PCC's MSIX_VFS parameter. */
+ uint32_t tvf : 16; /**< [ 31: 16](RO) Total VFs.
+ Internal:
+ from pcc's MSIX_VFS parameter. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_sriov_vfs_s cn8; */
+ struct bdk_pccpf_xxx_sriov_vfs_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tvf : 16; /**< [ 31: 16](RO) Total VFs.
+ Internal:
+ From PCC generated parameter. For RVU, from RVU_PRIV_CONST[MAX_VFS_PER_PF]. */
+ uint32_t ivf : 16; /**< [ 15: 0](RO/H) Initial VFs.
+ Internal:
+ From PCC generated parameter. For RVU, from RVU_PRIV_PF()_CFG[NVF]. */
+#else /* Word 0 - Little Endian */
+ uint32_t ivf : 16; /**< [ 15: 0](RO/H) Initial VFs.
+ Internal:
+ From PCC generated parameter. For RVU, from RVU_PRIV_PF()_CFG[NVF]. */
+ uint32_t tvf : 16; /**< [ 31: 16](RO) Total VFs.
+ Internal:
+ From PCC generated parameter. For RVU, from RVU_PRIV_CONST[MAX_VFS_PER_PF]. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_sriov_vfs bdk_pccpf_xxx_sriov_vfs_t;
+
+#define BDK_PCCPF_XXX_SRIOV_VFS BDK_PCCPF_XXX_SRIOV_VFS_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_VFS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SRIOV_VFS_FUNC(void)
+{
+ return 0x18c;
+}
+
+#define typedef_BDK_PCCPF_XXX_SRIOV_VFS bdk_pccpf_xxx_sriov_vfs_t
+#define bustype_BDK_PCCPF_XXX_SRIOV_VFS BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SRIOV_VFS "PCCPF_XXX_SRIOV_VFS"
+#define busnum_BDK_PCCPF_XXX_SRIOV_VFS 0
+#define arguments_BDK_PCCPF_XXX_SRIOV_VFS -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_subid
+ *
+ * PCC PF Subsystem ID/Subsystem Vendor ID Register
+ */
+union bdk_pccpf_xxx_subid
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_subid_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ssid : 16; /**< [ 31: 16](RO) Device ID. \<15:8\> enumerated by PCC_PROD_E. \<7:0\> enumerated by PCC_DEV_IDL_E.
+
+ Internal:
+ Unit from PCC's tie__prod and tie__pfunitid. */
+ uint32_t ssvid : 16; /**< [ 15: 0](RO) Subsystem vendor ID. Cavium = 0x177D. */
+#else /* Word 0 - Little Endian */
+ uint32_t ssvid : 16; /**< [ 15: 0](RO) Subsystem vendor ID. Cavium = 0x177D. */
+ uint32_t ssid : 16; /**< [ 31: 16](RO) Device ID. \<15:8\> enumerated by PCC_PROD_E. \<7:0\> enumerated by PCC_DEV_IDL_E.
+
+ Internal:
+ Unit from PCC's tie__prod and tie__pfunitid. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_subid_s cn8; */
+ struct bdk_pccpf_xxx_subid_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ssid : 16; /**< [ 31: 16](RO) Subsystem ID. \<15:8\> enumerated by PCC_PROD_E. \<7:0\> = 0x0.
+
+ Internal:
+ \<15:8\> from PCC's tie__prod. */
+ uint32_t ssvid : 16; /**< [ 15: 0](RO) Subsystem vendor ID. Cavium = 0x177D. */
+#else /* Word 0 - Little Endian */
+ uint32_t ssvid : 16; /**< [ 15: 0](RO) Subsystem vendor ID. Cavium = 0x177D. */
+ uint32_t ssid : 16; /**< [ 31: 16](RO) Subsystem ID. \<15:8\> enumerated by PCC_PROD_E. \<7:0\> = 0x0.
+
+ Internal:
+ \<15:8\> from PCC's tie__prod. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_subid bdk_pccpf_xxx_subid_t;
+
+#define BDK_PCCPF_XXX_SUBID BDK_PCCPF_XXX_SUBID_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_SUBID_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_SUBID_FUNC(void)
+{
+ return 0x2c;
+}
+
+#define typedef_BDK_PCCPF_XXX_SUBID bdk_pccpf_xxx_subid_t
+#define bustype_BDK_PCCPF_XXX_SUBID BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_SUBID "PCCPF_XXX_SUBID"
+#define busnum_BDK_PCCPF_XXX_SUBID 0
+#define arguments_BDK_PCCPF_XXX_SUBID -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_bar0l
+ *
+ * PCC PF Vendor-Specific Address 0 Lower Register
+ */
+union bdk_pccpf_xxx_vsec_bar0l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_bar0l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded BAR 0 base address; the reset value for
+ PCCPF_XXX_BAR0L[LBAB].
+
+ Internal:
+ From PCC's tie__pfbar0_offset. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded BAR 0 base address; the reset value for
+ PCCPF_XXX_BAR0L[LBAB].
+
+ Internal:
+ From PCC's tie__pfbar0_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_bar0l_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_bar0l bdk_pccpf_xxx_vsec_bar0l_t;
+
+#define BDK_PCCPF_XXX_VSEC_BAR0L BDK_PCCPF_XXX_VSEC_BAR0L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR0L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR0L_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x110;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x110;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x118;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_BAR0L", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_BAR0L bdk_pccpf_xxx_vsec_bar0l_t
+#define bustype_BDK_PCCPF_XXX_VSEC_BAR0L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_BAR0L "PCCPF_XXX_VSEC_BAR0L"
+#define busnum_BDK_PCCPF_XXX_VSEC_BAR0L 0
+#define arguments_BDK_PCCPF_XXX_VSEC_BAR0L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_bar0u
+ *
+ * PCC PF Vendor-Specific Address 0 Upper Register
+ */
+union bdk_pccpf_xxx_vsec_bar0u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_bar0u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded BAR 0 base address; the reset value for
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar0_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded BAR 0 base address; the reset value for
+ PCCPF_XXX_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar0_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_bar0u_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_bar0u bdk_pccpf_xxx_vsec_bar0u_t;
+
+#define BDK_PCCPF_XXX_VSEC_BAR0U BDK_PCCPF_XXX_VSEC_BAR0U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR0U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR0U_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x114;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x114;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x11c;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_BAR0U", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_BAR0U bdk_pccpf_xxx_vsec_bar0u_t
+#define bustype_BDK_PCCPF_XXX_VSEC_BAR0U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_BAR0U "PCCPF_XXX_VSEC_BAR0U"
+#define busnum_BDK_PCCPF_XXX_VSEC_BAR0U 0
+#define arguments_BDK_PCCPF_XXX_VSEC_BAR0U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_bar2l
+ *
+ * PCC PF Vendor-Specific Address 2 Lower Register
+ */
+union bdk_pccpf_xxx_vsec_bar2l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_bar2l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded BAR 2 base address; the reset value for
+ PCCPF_XXX_BAR2L[LBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_offset. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded BAR 2 base address; the reset value for
+ PCCPF_XXX_BAR2L[LBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_bar2l_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_bar2l bdk_pccpf_xxx_vsec_bar2l_t;
+
+#define BDK_PCCPF_XXX_VSEC_BAR2L BDK_PCCPF_XXX_VSEC_BAR2L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR2L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR2L_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x118;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x118;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x120;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_BAR2L", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_BAR2L bdk_pccpf_xxx_vsec_bar2l_t
+#define bustype_BDK_PCCPF_XXX_VSEC_BAR2L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_BAR2L "PCCPF_XXX_VSEC_BAR2L"
+#define busnum_BDK_PCCPF_XXX_VSEC_BAR2L 0
+#define arguments_BDK_PCCPF_XXX_VSEC_BAR2L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_bar2u
+ *
+ * PCC PF Vendor-Specific Address 2 Upper Register
+ */
+union bdk_pccpf_xxx_vsec_bar2u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_bar2u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded BAR 2 base address; the reset value for
+ PCCPF_XXX_BAR2U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded BAR 2 base address; the reset value for
+ PCCPF_XXX_BAR2U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar2_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_bar2u_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_bar2u bdk_pccpf_xxx_vsec_bar2u_t;
+
+#define BDK_PCCPF_XXX_VSEC_BAR2U BDK_PCCPF_XXX_VSEC_BAR2U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR2U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR2U_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x11c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x11c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x124;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_BAR2U", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_BAR2U bdk_pccpf_xxx_vsec_bar2u_t
+#define bustype_BDK_PCCPF_XXX_VSEC_BAR2U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_BAR2U "PCCPF_XXX_VSEC_BAR2U"
+#define busnum_BDK_PCCPF_XXX_VSEC_BAR2U 0
+#define arguments_BDK_PCCPF_XXX_VSEC_BAR2U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_bar4l
+ *
+ * PCC PF Vendor-Specific Address 4 Lower Register
+ */
+union bdk_pccpf_xxx_vsec_bar4l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_bar4l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded BAR 4 base address; the reset value for
+ PCCPF_XXX_BAR4L[LBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_offset. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded BAR 4 base address; the reset value for
+ PCCPF_XXX_BAR4L[LBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_bar4l_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_bar4l bdk_pccpf_xxx_vsec_bar4l_t;
+
+#define BDK_PCCPF_XXX_VSEC_BAR4L BDK_PCCPF_XXX_VSEC_BAR4L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR4L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR4L_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x120;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x120;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x128;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_BAR4L", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_BAR4L bdk_pccpf_xxx_vsec_bar4l_t
+#define bustype_BDK_PCCPF_XXX_VSEC_BAR4L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_BAR4L "PCCPF_XXX_VSEC_BAR4L"
+#define busnum_BDK_PCCPF_XXX_VSEC_BAR4L 0
+#define arguments_BDK_PCCPF_XXX_VSEC_BAR4L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_bar4u
+ *
+ * PCC PF Vendor-Specific Address 4 Upper Register
+ */
+union bdk_pccpf_xxx_vsec_bar4u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_bar4u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded BAR 4 base address; the reset value for
+ PCCPF_XXX_BAR4U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded BAR 4 base address; the reset value for
+ PCCPF_XXX_BAR4U[UBAB].
+
+ Internal:
+ From PCC's tie__pfbar4_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_bar4u_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_bar4u bdk_pccpf_xxx_vsec_bar4u_t;
+
+#define BDK_PCCPF_XXX_VSEC_BAR4U BDK_PCCPF_XXX_VSEC_BAR4U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR4U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_BAR4U_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x124;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x124;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x12c;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_BAR4U", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_BAR4U bdk_pccpf_xxx_vsec_bar4u_t
+#define bustype_BDK_PCCPF_XXX_VSEC_BAR4U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_BAR4U "PCCPF_XXX_VSEC_BAR4U"
+#define busnum_BDK_PCCPF_XXX_VSEC_BAR4U 0
+#define arguments_BDK_PCCPF_XXX_VSEC_BAR4U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_cap_hdr
+ *
+ * PCC PF Vendor-Specific Capability Header Register
+ * This register is the header of the 64-byte {ProductLine} family PF capability
+ * structure.
+ */
+union bdk_pccpf_xxx_vsec_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. If SR-IOV is supported as per PCC_DEV_IDL_E, points to the
+ PCCPF_XXX_SRIOV_CAP_HDR, else 0x0. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsecid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates vendor-specific capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t vsecid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates vendor-specific capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. If SR-IOV is supported as per PCC_DEV_IDL_E, points to the
+ PCCPF_XXX_SRIOV_CAP_HDR, else 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_vsec_cap_hdr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. Points to PCCPF_XXX_AER_CAP_HDR. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsecid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates vendor-specific capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t vsecid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates vendor-specific capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. Points to PCCPF_XXX_AER_CAP_HDR. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_vsec_cap_hdr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. If this device is on a nonzero bus, points to
+ PCCPF_XXX_ARI_CAP_HDR, else 0x0. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsecid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates vendor-specific capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t vsecid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates vendor-specific capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. If this device is on a nonzero bus, points to
+ PCCPF_XXX_ARI_CAP_HDR, else 0x0. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_vsec_cap_hdr_s cn88xx; */
+ /* struct bdk_pccpf_xxx_vsec_cap_hdr_cn81xx cn83xx; */
+};
+typedef union bdk_pccpf_xxx_vsec_cap_hdr bdk_pccpf_xxx_vsec_cap_hdr_t;
+
+#define BDK_PCCPF_XXX_VSEC_CAP_HDR BDK_PCCPF_XXX_VSEC_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x100;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x100;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x108;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x100;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_CAP_HDR bdk_pccpf_xxx_vsec_cap_hdr_t
+#define bustype_BDK_PCCPF_XXX_VSEC_CAP_HDR BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_CAP_HDR "PCCPF_XXX_VSEC_CAP_HDR"
+#define busnum_BDK_PCCPF_XXX_VSEC_CAP_HDR 0
+#define arguments_BDK_PCCPF_XXX_VSEC_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_ctl
+ *
+ * PCC PF Vendor-Specific Control Register
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccpf_xxx_vsec_ctl
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nxtfn_ns : 8; /**< [ 31: 24](R/W) For nonsecure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN]
+ indicating the next valid function number for this device.
+ Must be kept as 0x0 for non-MRML PCC devices. */
+ uint32_t reserved_12_23 : 12;
+ uint32_t cor_intn : 1; /**< [ 11: 11](R/W1S/H) Corrected internal error. Writing a one to this bit sets
+ PCCPF_XXX_AER_COR_STATUS[COR_INTN].
+ This is a self-clearing bit and always reads as zero. */
+ uint32_t adv_nfat : 1; /**< [ 10: 10](R/W1S/H) Advisory non-fatal error. Writing a one to this bit sets
+ PCCPF_XXX_AER_COR_STATUS[ADV_NFAT].
+ This is a self-clearing bit and always reads as zero. */
+ uint32_t uncor_intn : 1; /**< [ 9: 9](R/W1S/H) Poisoned TLP received. Writing a one to this bit sets
+ PCCPF_XXX_AER_UNCOR_STATUS[UNCOR_INTN]. This is a self-clearing bit and always
+ reads as zero. */
+ uint32_t poison_tlp : 1; /**< [ 8: 8](R/W1S/H) Poisoned TLP received. Writing a one to this bit sets
+ PCCPF_XXX_AER_UNCOR_STATUS[POISON_TLP]. This is a self-clearing bit and always
+ reads as zero. */
+ uint32_t inst_num : 8; /**< [ 7: 0](RO) Instance number. For blocks with multiple instances, indicates which instance number,
+ otherwise 0x0; may be used to form Linux device numbers. For example for UART(1) is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint32_t inst_num : 8; /**< [ 7: 0](RO) Instance number. For blocks with multiple instances, indicates which instance number,
+ otherwise 0x0; may be used to form Linux device numbers. For example for UART(1) is 0x1. */
+ uint32_t poison_tlp : 1; /**< [ 8: 8](R/W1S/H) Poisoned TLP received. Writing a one to this bit sets
+ PCCPF_XXX_AER_UNCOR_STATUS[POISON_TLP]. This is a self-clearing bit and always
+ reads as zero. */
+ uint32_t uncor_intn : 1; /**< [ 9: 9](R/W1S/H) Poisoned TLP received. Writing a one to this bit sets
+ PCCPF_XXX_AER_UNCOR_STATUS[UNCOR_INTN]. This is a self-clearing bit and always
+ reads as zero. */
+ uint32_t adv_nfat : 1; /**< [ 10: 10](R/W1S/H) Advisory non-fatal error. Writing a one to this bit sets
+ PCCPF_XXX_AER_COR_STATUS[ADV_NFAT].
+ This is a self-clearing bit and always reads as zero. */
+ uint32_t cor_intn : 1; /**< [ 11: 11](R/W1S/H) Corrected internal error. Writing a one to this bit sets
+ PCCPF_XXX_AER_COR_STATUS[COR_INTN].
+ This is a self-clearing bit and always reads as zero. */
+ uint32_t reserved_12_23 : 12;
+ uint32_t nxtfn_ns : 8; /**< [ 31: 24](R/W) For nonsecure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN]
+ indicating the next valid function number for this device.
+ Must be kept as 0x0 for non-MRML PCC devices. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_vsec_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nxtfn_ns : 8; /**< [ 31: 24](R/W) For nonsecure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN]
+ indicating the next valid function number for this device.
+ Must be kept as 0x0 for non-MRML PCC devices. */
+ uint32_t reserved_8_23 : 16;
+ uint32_t inst_num : 8; /**< [ 7: 0](RO) Instance number. For blocks with multiple instances, indicates which instance number,
+ otherwise 0x0; may be used to form Linux device numbers. For example for UART(1) is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint32_t inst_num : 8; /**< [ 7: 0](RO) Instance number. For blocks with multiple instances, indicates which instance number,
+ otherwise 0x0; may be used to form Linux device numbers. For example for UART(1) is 0x1. */
+ uint32_t reserved_8_23 : 16;
+ uint32_t nxtfn_ns : 8; /**< [ 31: 24](R/W) For nonsecure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN]
+ indicating the next valid function number for this device.
+ Must be kept as 0x0 for non-MRML PCC devices. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_pccpf_xxx_vsec_ctl_s cn9; */
+};
+typedef union bdk_pccpf_xxx_vsec_ctl bdk_pccpf_xxx_vsec_ctl_t;
+
+#define BDK_PCCPF_XXX_VSEC_CTL BDK_PCCPF_XXX_VSEC_CTL_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x108;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x108;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x110;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x108;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_CTL bdk_pccpf_xxx_vsec_ctl_t
+#define bustype_BDK_PCCPF_XXX_VSEC_CTL BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_CTL "PCCPF_XXX_VSEC_CTL"
+#define busnum_BDK_PCCPF_XXX_VSEC_CTL 0
+#define arguments_BDK_PCCPF_XXX_VSEC_CTL -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_id
+ *
+ * PCC PF Vendor-Specific Identification Register
+ */
+union bdk_pccpf_xxx_vsec_id
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_id_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t len : 12; /**< [ 31: 20](RO) Number of bytes in the entire VSEC structure including PCCPF_XXX_VSEC_CAP_HDR. */
+ uint32_t rev : 4; /**< [ 19: 16](RO) Vendor-specific revision. */
+ uint32_t id : 16; /**< [ 15: 0](RO) Vendor-specific ID. Indicates the {ProductLine} family VSEC ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t id : 16; /**< [ 15: 0](RO) Vendor-specific ID. Indicates the {ProductLine} family VSEC ID. */
+ uint32_t rev : 4; /**< [ 19: 16](RO) Vendor-specific revision. */
+ uint32_t len : 12; /**< [ 31: 20](RO) Number of bytes in the entire VSEC structure including PCCPF_XXX_VSEC_CAP_HDR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_id_s cn8; */
+ struct bdk_pccpf_xxx_vsec_id_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t len : 12; /**< [ 31: 20](RO) Number of bytes in the entire VSEC structure including PCCPF_XXX_VSEC_CAP_HDR. */
+ uint32_t rev : 4; /**< [ 19: 16](RO) Vendor-specific revision. */
+ uint32_t id : 16; /**< [ 15: 0](RO) Vendor-specific ID. Indicates the {ProductLine} family PF VSEC ID.
+ Enumerated by PCC_VSECID_E. */
+#else /* Word 0 - Little Endian */
+ uint32_t id : 16; /**< [ 15: 0](RO) Vendor-specific ID. Indicates the {ProductLine} family PF VSEC ID.
+ Enumerated by PCC_VSECID_E. */
+ uint32_t rev : 4; /**< [ 19: 16](RO) Vendor-specific revision. */
+ uint32_t len : 12; /**< [ 31: 20](RO) Number of bytes in the entire VSEC structure including PCCPF_XXX_VSEC_CAP_HDR. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccpf_xxx_vsec_id bdk_pccpf_xxx_vsec_id_t;
+
+#define BDK_PCCPF_XXX_VSEC_ID BDK_PCCPF_XXX_VSEC_ID_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_ID_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_ID_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x104;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x104;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x10c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x104;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_ID", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_ID bdk_pccpf_xxx_vsec_id_t
+#define bustype_BDK_PCCPF_XXX_VSEC_ID BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_ID "PCCPF_XXX_VSEC_ID"
+#define busnum_BDK_PCCPF_XXX_VSEC_ID 0
+#define arguments_BDK_PCCPF_XXX_VSEC_ID -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_sctl
+ *
+ * PCC PF Vendor-Specific Secure Control Register
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccpf_xxx_vsec_sctl
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_sctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nxtfn_s : 8; /**< [ 31: 24](SR/W) For secure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN] indicating
+ the next valid function number for this device. Must be 0x0 for non-MRML PCC
+ devices. */
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCPF_XXX_REV[RID]. */
+ uint32_t msix_sec_phys : 1; /**< [ 15: 15](SR/W) MSI-X secure physical:
+ _ 0 = Any MSI-X vectors with SECVEC = 1 use the same physical setting as
+ nonsecure vectors, i.e. [MSIX_PHYS].
+ _ 1 = Any MSI-X vectors with SECVEC = 1 are considered physical, regardless
+ of [MSIX_PHYS]. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t gia_timeout : 6; /**< [ 11: 6](SR/W) GIA timeout (2^[GIA_TIMEOUT] clock cycles). Timeout for MSI-X commits. When zero, wait
+ for commits is disabled. */
+ uint32_t node : 2; /**< [ 5: 4](SRO) Reserved. */
+ uint32_t ea : 1; /**< [ 3: 3](SRO) Reserved. */
+ uint32_t reserved_2 : 1;
+ uint32_t msix_sec : 1; /**< [ 1: 1](SR/W) All MSI-X interrupts are secure. This is equivelent to setting the per-vector secure bit
+ (e.g. GTI_MSIX_VEC()_ADDR[SECVEC]) for all vectors in the block. */
+ uint32_t msix_phys : 1; /**< [ 0: 0](SR/W) MSI-X interrupts are physical.
+ 0 = MSI-X interrupt vector addresses are standard virtual addresses and subject to SMMU
+ address translation.
+ 1 = MSI-X interrupt vector addresses are considered physical addresses and PCC MSI-X
+ interrupt delivery will bypass the SMMU. */
+#else /* Word 0 - Little Endian */
+ uint32_t msix_phys : 1; /**< [ 0: 0](SR/W) MSI-X interrupts are physical.
+ 0 = MSI-X interrupt vector addresses are standard virtual addresses and subject to SMMU
+ address translation.
+ 1 = MSI-X interrupt vector addresses are considered physical addresses and PCC MSI-X
+ interrupt delivery will bypass the SMMU. */
+ uint32_t msix_sec : 1; /**< [ 1: 1](SR/W) All MSI-X interrupts are secure. This is equivelent to setting the per-vector secure bit
+ (e.g. GTI_MSIX_VEC()_ADDR[SECVEC]) for all vectors in the block. */
+ uint32_t reserved_2 : 1;
+ uint32_t ea : 1; /**< [ 3: 3](SRO) Reserved. */
+ uint32_t node : 2; /**< [ 5: 4](SRO) Reserved. */
+ uint32_t gia_timeout : 6; /**< [ 11: 6](SR/W) GIA timeout (2^[GIA_TIMEOUT] clock cycles). Timeout for MSI-X commits. When zero, wait
+ for commits is disabled. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t msix_sec_phys : 1; /**< [ 15: 15](SR/W) MSI-X secure physical:
+ _ 0 = Any MSI-X vectors with SECVEC = 1 use the same physical setting as
+ nonsecure vectors, i.e. [MSIX_PHYS].
+ _ 1 = Any MSI-X vectors with SECVEC = 1 are considered physical, regardless
+ of [MSIX_PHYS]. */
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCPF_XXX_REV[RID]. */
+ uint32_t nxtfn_s : 8; /**< [ 31: 24](SR/W) For secure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN] indicating
+ the next valid function number for this device. Must be 0x0 for non-MRML PCC
+ devices. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccpf_xxx_vsec_sctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nxtfn_s : 8; /**< [ 31: 24](SR/W) For secure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN] indicating
+ the next valid function number for this device. Must be 0x0 for non-MRML PCC
+ devices. */
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCPF_XXX_REV[RID]. */
+ uint32_t reserved_6_15 : 10;
+ uint32_t node : 2; /**< [ 5: 4](SRO) Reserved. */
+ uint32_t ea : 1; /**< [ 3: 3](SRO) Reserved. */
+ uint32_t bcst_rsp : 1; /**< [ 2: 2](SR/W) Reserved, must be 0.
+ Internal:
+ Reserved for future use - Enable this PCC
+ instance as the responder to PCC broadcast reads/writes. */
+ uint32_t msix_sec : 1; /**< [ 1: 1](SR/W) All MSI-X interrupts are secure. This is equivelent to setting the per-vector secure bit
+ (e.g. GTI_MSIX_VEC()_ADDR[SECVEC]) for all vectors in the block. */
+ uint32_t msix_phys : 1; /**< [ 0: 0](SR/W) MSI-X interrupts are physical.
+ 0 = MSI-X interrupt vector addresses are standard virtual addresses and subject to SMMU
+ address translation.
+ 1 = MSI-X interrupt vector addresses are considered physical addresses and PCC MSI-X
+ interrupt delivery will bypass the SMMU. */
+#else /* Word 0 - Little Endian */
+ uint32_t msix_phys : 1; /**< [ 0: 0](SR/W) MSI-X interrupts are physical.
+ 0 = MSI-X interrupt vector addresses are standard virtual addresses and subject to SMMU
+ address translation.
+ 1 = MSI-X interrupt vector addresses are considered physical addresses and PCC MSI-X
+ interrupt delivery will bypass the SMMU. */
+ uint32_t msix_sec : 1; /**< [ 1: 1](SR/W) All MSI-X interrupts are secure. This is equivelent to setting the per-vector secure bit
+ (e.g. GTI_MSIX_VEC()_ADDR[SECVEC]) for all vectors in the block. */
+ uint32_t bcst_rsp : 1; /**< [ 2: 2](SR/W) Reserved, must be 0.
+ Internal:
+ Reserved for future use - Enable this PCC
+ instance as the responder to PCC broadcast reads/writes. */
+ uint32_t ea : 1; /**< [ 3: 3](SRO) Reserved. */
+ uint32_t node : 2; /**< [ 5: 4](SRO) Reserved. */
+ uint32_t reserved_6_15 : 10;
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCPF_XXX_REV[RID]. */
+ uint32_t nxtfn_s : 8; /**< [ 31: 24](SR/W) For secure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN] indicating
+ the next valid function number for this device. Must be 0x0 for non-MRML PCC
+ devices. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pccpf_xxx_vsec_sctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nxtfn_s : 8; /**< [ 31: 24](SR/W) For secure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN] indicating
+ the next valid function number for this device. Must be 0x0 for non-MRML PCC
+ devices. */
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCPF_XXX_REV[RID]. */
+ uint32_t msix_sec_phys : 1; /**< [ 15: 15](SR/W) MSI-X secure physical:
+ _ 0 = Any MSI-X vectors with SECVEC = 1 use the same physical setting as
+ nonsecure vectors, i.e. [MSIX_PHYS].
+ _ 1 = Any MSI-X vectors with SECVEC = 1 are considered physical, regardless
+ of [MSIX_PHYS]. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t gia_timeout : 6; /**< [ 11: 6](SR/W) GIA timeout (2^[GIA_TIMEOUT] clock cycles). Timeout for MSI-X commits. When zero, wait
+ for commits is disabled. */
+ uint32_t node : 2; /**< [ 5: 4](SR/W) Node number. */
+ uint32_t ea : 1; /**< [ 3: 3](SRO) Enable PCI enhanced allocation. Always set.
+
+ Addresses are discovered using enhanced allocation and PCCPF_XXX_EA_ENTRY().
+ Standard BARs are read-only zero (PCCPF_XXX_BAR0L, PCCPF_XXX_BAR0U,
+ PCCPF_XXX_BAR2L, PCCPF_XXX_BAR2U, PCCPF_XXX_BAR4L, PCCPF_XXX_BAR4U,
+ PCCPF_XXX_SRIOV_BAR0L, PCCPF_XXX_SRIOV_BAR0U, PCCPF_XXX_SRIOV_BAR2L,
+ PCCPF_XXX_SRIOV_BAR2U, PCCPF_XXX_SRIOV_BAR4L, PCCPF_XXX_SRIOV_BAR4U). */
+ uint32_t msix_sec_en : 1; /**< [ 2: 2](SR/W) MSI-X secure enable:
+ _ 0 = Any MSI-X vectors with SECVEC = 1, or all vectors if [MSIX_SEC], use
+ the same enable settings as nonsecure vectors based on normal PCIe
+ rules, i.e. are enabled when PCCPF_XXX_MSIX_CAP_HDR[MSIXEN]=1 and
+ unmasked when PCCPF_XXX_MSIX_CAP_HDR[FUNM]=0 and PCCPF_XXX_CMD[ME]=1.
+ _ 1 = Any MSI-X vectors with SECVEC = 1, or all vectors if [MSIX_SEC], will
+ act as if PCCPF_XXX_MSIX_CAP_HDR[MSIXEN]=1, PCCPF_XXX_MSIX_CAP_HDR[FUNM]=0
+ and PCCPF_XXX_CMD[ME]=1,
+ regardless of the true setting of those bits. Nonsecure vectors are
+ unaffected. Blocks that have both secure and nonsecure vectors in use
+ simultaneously may want to use this setting to prevent the nonsecure world
+ from globally disabling secure interrupts. */
+ uint32_t msix_sec : 1; /**< [ 1: 1](SR/W) All MSI-X interrupts are secure. This is equivelent to setting the per-vector secure bit
+ (e.g. GTI_MSIX_VEC()_ADDR[SECVEC]) for all vectors in the block. */
+ uint32_t msix_phys : 1; /**< [ 0: 0](SR/W) MSI-X interrupts are physical.
+ 0 = MSI-X interrupt vector addresses are standard virtual addresses and subject to SMMU
+ address translation.
+ 1 = MSI-X interrupt vector addresses are considered physical addresses and PCC MSI-X
+ interrupt delivery will bypass the SMMU. */
+#else /* Word 0 - Little Endian */
+ uint32_t msix_phys : 1; /**< [ 0: 0](SR/W) MSI-X interrupts are physical.
+ 0 = MSI-X interrupt vector addresses are standard virtual addresses and subject to SMMU
+ address translation.
+ 1 = MSI-X interrupt vector addresses are considered physical addresses and PCC MSI-X
+ interrupt delivery will bypass the SMMU. */
+ uint32_t msix_sec : 1; /**< [ 1: 1](SR/W) All MSI-X interrupts are secure. This is equivelent to setting the per-vector secure bit
+ (e.g. GTI_MSIX_VEC()_ADDR[SECVEC]) for all vectors in the block. */
+ uint32_t msix_sec_en : 1; /**< [ 2: 2](SR/W) MSI-X secure enable:
+ _ 0 = Any MSI-X vectors with SECVEC = 1, or all vectors if [MSIX_SEC], use
+ the same enable settings as nonsecure vectors based on normal PCIe
+ rules, i.e. are enabled when PCCPF_XXX_MSIX_CAP_HDR[MSIXEN]=1 and
+ unmasked when PCCPF_XXX_MSIX_CAP_HDR[FUNM]=0 and PCCPF_XXX_CMD[ME]=1.
+ _ 1 = Any MSI-X vectors with SECVEC = 1, or all vectors if [MSIX_SEC], will
+ act as if PCCPF_XXX_MSIX_CAP_HDR[MSIXEN]=1, PCCPF_XXX_MSIX_CAP_HDR[FUNM]=0
+ and PCCPF_XXX_CMD[ME]=1,
+ regardless of the true setting of those bits. Nonsecure vectors are
+ unaffected. Blocks that have both secure and nonsecure vectors in use
+ simultaneously may want to use this setting to prevent the nonsecure world
+ from globally disabling secure interrupts. */
+ uint32_t ea : 1; /**< [ 3: 3](SRO) Enable PCI enhanced allocation. Always set.
+
+ Addresses are discovered using enhanced allocation and PCCPF_XXX_EA_ENTRY().
+ Standard BARs are read-only zero (PCCPF_XXX_BAR0L, PCCPF_XXX_BAR0U,
+ PCCPF_XXX_BAR2L, PCCPF_XXX_BAR2U, PCCPF_XXX_BAR4L, PCCPF_XXX_BAR4U,
+ PCCPF_XXX_SRIOV_BAR0L, PCCPF_XXX_SRIOV_BAR0U, PCCPF_XXX_SRIOV_BAR2L,
+ PCCPF_XXX_SRIOV_BAR2U, PCCPF_XXX_SRIOV_BAR4L, PCCPF_XXX_SRIOV_BAR4U). */
+ uint32_t node : 2; /**< [ 5: 4](SR/W) Node number. */
+ uint32_t gia_timeout : 6; /**< [ 11: 6](SR/W) GIA timeout (2^[GIA_TIMEOUT] clock cycles). Timeout for MSI-X commits. When zero, wait
+ for commits is disabled. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t msix_sec_phys : 1; /**< [ 15: 15](SR/W) MSI-X secure physical:
+ _ 0 = Any MSI-X vectors with SECVEC = 1 use the same physical setting as
+ nonsecure vectors, i.e. [MSIX_PHYS].
+ _ 1 = Any MSI-X vectors with SECVEC = 1 are considered physical, regardless
+ of [MSIX_PHYS]. */
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCPF_XXX_REV[RID]. */
+ uint32_t nxtfn_s : 8; /**< [ 31: 24](SR/W) For secure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN] indicating
+ the next valid function number for this device. Must be 0x0 for non-MRML PCC
+ devices. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pccpf_xxx_vsec_sctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nxtfn_s : 8; /**< [ 31: 24](SR/W) For secure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN] indicating
+ the next valid function number for this device. Must be 0x0 for non-MRML PCC
+ devices. */
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCPF_XXX_REV[RID]. */
+ uint32_t reserved_6_15 : 10;
+ uint32_t node : 2; /**< [ 5: 4](SR/W/H) Node number. */
+ uint32_t ea : 1; /**< [ 3: 3](SR/W) Enable PCI enhanced allocation.
+
+ 0 = Addresses are discovered using standard BARs, however while the BARs are
+ writable the value is ignored. PCCPF_XXX_EA_ENTRY() still indicates the BARs
+ but software will not read them as PCCPF_XXX_EA_CAP_HDR is not linked into the
+ capabilities list (see PCCPF_XXX_E_CAP_HDR[NCP], PCCPF_XXX_MSIX_CAP_HDR[NCP]).
+
+ 1 = Addresses are discovered using enhanced allocation and PCCPF_XXX_EA_ENTRY().
+ Standard BARs are read-only zero (PCCPF_XXX_BAR0L, PCCPF_XXX_BAR0U,
+ PCCPF_XXX_BAR2L, PCCPF_XXX_BAR2U, PCCPF_XXX_BAR4L, PCCPF_XXX_BAR4U,
+ PCCPF_XXX_SRIOV_BAR0L, PCCPF_XXX_SRIOV_BAR0U, PCCPF_XXX_SRIOV_BAR2L,
+ PCCPF_XXX_SRIOV_BAR2U, PCCPF_XXX_SRIOV_BAR4L, PCCPF_XXX_SRIOV_BAR4U). */
+ uint32_t bcst_rsp : 1; /**< [ 2: 2](SR/W) Reserved, must be 0.
+ Internal:
+ Reserved for future use - Enable this PCC
+ instance as the responder to PCC broadcast reads/writes. */
+ uint32_t msix_sec : 1; /**< [ 1: 1](SR/W) All MSI-X interrupts are secure. This is equivelent to setting the per-vector secure bit
+ (e.g. GTI_MSIX_VEC()_ADDR[SECVEC]) for all vectors in the block. */
+ uint32_t msix_phys : 1; /**< [ 0: 0](SR/W) MSI-X interrupts are physical.
+ 0 = MSI-X interrupt vector addresses are standard virtual addresses and subject to SMMU
+ address translation.
+ 1 = MSI-X interrupt vector addresses are considered physical addresses and PCC MSI-X
+ interrupt delivery will bypass the SMMU. */
+#else /* Word 0 - Little Endian */
+ uint32_t msix_phys : 1; /**< [ 0: 0](SR/W) MSI-X interrupts are physical.
+ 0 = MSI-X interrupt vector addresses are standard virtual addresses and subject to SMMU
+ address translation.
+ 1 = MSI-X interrupt vector addresses are considered physical addresses and PCC MSI-X
+ interrupt delivery will bypass the SMMU. */
+ uint32_t msix_sec : 1; /**< [ 1: 1](SR/W) All MSI-X interrupts are secure. This is equivelent to setting the per-vector secure bit
+ (e.g. GTI_MSIX_VEC()_ADDR[SECVEC]) for all vectors in the block. */
+ uint32_t bcst_rsp : 1; /**< [ 2: 2](SR/W) Reserved, must be 0.
+ Internal:
+ Reserved for future use - Enable this PCC
+ instance as the responder to PCC broadcast reads/writes. */
+ uint32_t ea : 1; /**< [ 3: 3](SR/W) Enable PCI enhanced allocation.
+
+ 0 = Addresses are discovered using standard BARs, however while the BARs are
+ writable the value is ignored. PCCPF_XXX_EA_ENTRY() still indicates the BARs
+ but software will not read them as PCCPF_XXX_EA_CAP_HDR is not linked into the
+ capabilities list (see PCCPF_XXX_E_CAP_HDR[NCP], PCCPF_XXX_MSIX_CAP_HDR[NCP]).
+
+ 1 = Addresses are discovered using enhanced allocation and PCCPF_XXX_EA_ENTRY().
+ Standard BARs are read-only zero (PCCPF_XXX_BAR0L, PCCPF_XXX_BAR0U,
+ PCCPF_XXX_BAR2L, PCCPF_XXX_BAR2U, PCCPF_XXX_BAR4L, PCCPF_XXX_BAR4U,
+ PCCPF_XXX_SRIOV_BAR0L, PCCPF_XXX_SRIOV_BAR0U, PCCPF_XXX_SRIOV_BAR2L,
+ PCCPF_XXX_SRIOV_BAR2U, PCCPF_XXX_SRIOV_BAR4L, PCCPF_XXX_SRIOV_BAR4U). */
+ uint32_t node : 2; /**< [ 5: 4](SR/W/H) Node number. */
+ uint32_t reserved_6_15 : 10;
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCPF_XXX_REV[RID]. */
+ uint32_t nxtfn_s : 8; /**< [ 31: 24](SR/W) For secure accesses, the value to be presented in PCCPF_XXX_(S)ARI_NXT[NXTFN] indicating
+ the next valid function number for this device. Must be 0x0 for non-MRML PCC
+ devices. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pccpf_xxx_vsec_sctl_cn81xx cn83xx; */
+ /* struct bdk_pccpf_xxx_vsec_sctl_cn81xx cn88xxp2; */
+};
+typedef union bdk_pccpf_xxx_vsec_sctl bdk_pccpf_xxx_vsec_sctl_t;
+
+#define BDK_PCCPF_XXX_VSEC_SCTL BDK_PCCPF_XXX_VSEC_SCTL_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SCTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SCTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x10c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x10c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x114;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x10c;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_SCTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_SCTL bdk_pccpf_xxx_vsec_sctl_t
+#define bustype_BDK_PCCPF_XXX_VSEC_SCTL BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_SCTL "PCCPF_XXX_VSEC_SCTL"
+#define busnum_BDK_PCCPF_XXX_VSEC_SCTL 0
+#define arguments_BDK_PCCPF_XXX_VSEC_SCTL -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_sriov_bar0l
+ *
+ * PCC PF Vendor-Specific SR-IOV Address 0 Lower Register
+ */
+union bdk_pccpf_xxx_vsec_sriov_bar0l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_sriov_bar0l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded SR-IOV BAR 0 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR0L[LBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_offset. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded SR-IOV BAR 0 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR0L[LBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_sriov_bar0l_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_sriov_bar0l bdk_pccpf_xxx_vsec_sriov_bar0l_t;
+
+#define BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x128;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x128;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x130;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_SRIOV_BAR0L", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L bdk_pccpf_xxx_vsec_sriov_bar0l_t
+#define bustype_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L "PCCPF_XXX_VSEC_SRIOV_BAR0L"
+#define busnum_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L 0
+#define arguments_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_sriov_bar0u
+ *
+ * PCC PF Vendor-Specific SR-IOV Address 0 Upper Register
+ */
+union bdk_pccpf_xxx_vsec_sriov_bar0u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_sriov_bar0u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded SR-IOV BAR 0 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded SR-IOV BAR 0 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR0U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar0_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_sriov_bar0u_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_sriov_bar0u bdk_pccpf_xxx_vsec_sriov_bar0u_t;
+
+#define BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x12c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x12c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x134;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_SRIOV_BAR0U", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U bdk_pccpf_xxx_vsec_sriov_bar0u_t
+#define bustype_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U "PCCPF_XXX_VSEC_SRIOV_BAR0U"
+#define busnum_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U 0
+#define arguments_BDK_PCCPF_XXX_VSEC_SRIOV_BAR0U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_sriov_bar2l
+ *
+ * PCC PF Vendor-Specific SR-IOV Address 2 Lower Register
+ */
+union bdk_pccpf_xxx_vsec_sriov_bar2l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_sriov_bar2l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded SR-IOV BAR 2 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR2L[LBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_offset. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded SR-IOV BAR 2 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR2L[LBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_sriov_bar2l_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_sriov_bar2l bdk_pccpf_xxx_vsec_sriov_bar2l_t;
+
+#define BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x130;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x130;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x138;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_SRIOV_BAR2L", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L bdk_pccpf_xxx_vsec_sriov_bar2l_t
+#define bustype_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L "PCCPF_XXX_VSEC_SRIOV_BAR2L"
+#define busnum_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L 0
+#define arguments_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_sriov_bar2u
+ *
+ * PCC PF Vendor-Specific SR-IOV Address 2 Upper Register
+ */
+union bdk_pccpf_xxx_vsec_sriov_bar2u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_sriov_bar2u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded SR-IOV BAR 2 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR2U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded SR-IOV BAR 2 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR2U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar2_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_sriov_bar2u_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_sriov_bar2u bdk_pccpf_xxx_vsec_sriov_bar2u_t;
+
+#define BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x134;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x134;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x13c;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_SRIOV_BAR2U", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U bdk_pccpf_xxx_vsec_sriov_bar2u_t
+#define bustype_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U "PCCPF_XXX_VSEC_SRIOV_BAR2U"
+#define busnum_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U 0
+#define arguments_BDK_PCCPF_XXX_VSEC_SRIOV_BAR2U -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_sriov_bar4l
+ *
+ * PCC PF Vendor-Specific SR-IOV Address 4 Lower Register
+ */
+union bdk_pccpf_xxx_vsec_sriov_bar4l
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_sriov_bar4l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded SR-IOV BAR 4 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR4L[LBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_offset. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t lbab : 16; /**< [ 31: 16](RO) Lower bits of the hard-coded SR-IOV BAR 4 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR4L[LBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_sriov_bar4l_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_sriov_bar4l bdk_pccpf_xxx_vsec_sriov_bar4l_t;
+
+#define BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x138;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x138;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x140;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_SRIOV_BAR4L", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L bdk_pccpf_xxx_vsec_sriov_bar4l_t
+#define bustype_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L "PCCPF_XXX_VSEC_SRIOV_BAR4L"
+#define busnum_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L 0
+#define arguments_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4L -1,-1,-1,-1
+
+/**
+ * Register (PCCPF) pccpf_xxx_vsec_sriov_bar4u
+ *
+ * PCC PF Vendor-Specific SR-IOV Address 4 Upper Register
+ */
+union bdk_pccpf_xxx_vsec_sriov_bar4u
+{
+ uint32_t u;
+ struct bdk_pccpf_xxx_vsec_sriov_bar4u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded SR-IOV BAR 4 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR4U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t ubab : 32; /**< [ 31: 0](RO) Upper bits of the hard-coded SR-IOV BAR 4 base address; the reset value for
+ PCCPF_XXX_SRIOV_BAR4U[UBAB].
+
+ Internal:
+ From PCC's tie__vfbar4_offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccpf_xxx_vsec_sriov_bar4u_s cn; */
+};
+typedef union bdk_pccpf_xxx_vsec_sriov_bar4u bdk_pccpf_xxx_vsec_sriov_bar4u_t;
+
+#define BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U_FUNC()
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x13c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x13c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x144;
+ __bdk_csr_fatal("PCCPF_XXX_VSEC_SRIOV_BAR4U", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U bdk_pccpf_xxx_vsec_sriov_bar4u_t
+#define bustype_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U BDK_CSR_TYPE_PCCPF
+#define basename_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U "PCCPF_XXX_VSEC_SRIOV_BAR4U"
+#define busnum_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U 0
+#define arguments_BDK_PCCPF_XXX_VSEC_SRIOV_BAR4U -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_PCCPF_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pem.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pem.h
new file mode 100644
index 0000000000..e493f0d3d7
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pem.h
@@ -0,0 +1,14736 @@
+#ifndef __BDK_CSRS_PEM_H__
+#define __BDK_CSRS_PEM_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium PEM.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration pem_bar_e
+ *
+ * PEM Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_PEM_BAR_E_PEMX_PF_BAR0_CN8(a) (0x87e0c0000000ll + 0x1000000ll * (a))
+#define BDK_PEM_BAR_E_PEMX_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_PEM_BAR_E_PEMX_PF_BAR0_CN9(a) (0x8e0000000000ll + 0x1000000000ll * (a))
+#define BDK_PEM_BAR_E_PEMX_PF_BAR0_CN9_SIZE 0x40000000ull
+#define BDK_PEM_BAR_E_PEMX_PF_BAR4_CN8(a) (0x87e0c0f00000ll + 0x1000000ll * (a))
+#define BDK_PEM_BAR_E_PEMX_PF_BAR4_CN8_SIZE 0x100000ull
+#define BDK_PEM_BAR_E_PEMX_PF_BAR4_CN9(a) (0x8e0f00000000ll + 0x1000000000ll * (a))
+#define BDK_PEM_BAR_E_PEMX_PF_BAR4_CN9_SIZE 0x100000ull
+
+/**
+ * Enumeration pem_ep_functions_e
+ *
+ * PEM EP Mode Function Number Enumeration
+ * Enumerates the function numbers that an EP PEM masters.
+ */
+#define BDK_PEM_EP_FUNCTIONS_E_PF0 (0)
+#define BDK_PEM_EP_FUNCTIONS_E_PF0_VFX(a) (0 + (a))
+
+/**
+ * Enumeration pem_int_vec_e
+ *
+ * PEM MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_PEM_INT_VEC_E_DBG_INFO_CN81XX (0xb)
+#define BDK_PEM_INT_VEC_E_DBG_INFO_CN88XX (0xd)
+#define BDK_PEM_INT_VEC_E_DBG_INFO_CN83XX (0xb)
+#define BDK_PEM_INT_VEC_E_ERROR_AERI (0)
+#define BDK_PEM_INT_VEC_E_ERROR_AERI_CLEAR (1)
+#define BDK_PEM_INT_VEC_E_ERROR_PMEI (2)
+#define BDK_PEM_INT_VEC_E_ERROR_PMEI_CLEAR (3)
+#define BDK_PEM_INT_VEC_E_HP_INT (0xe)
+#define BDK_PEM_INT_VEC_E_HP_INT_CLEAR (0xf)
+#define BDK_PEM_INT_VEC_E_HP_PMEI (1)
+#define BDK_PEM_INT_VEC_E_INTA_CN9 (0)
+#define BDK_PEM_INT_VEC_E_INTA_CN81XX (2)
+#define BDK_PEM_INT_VEC_E_INTA_CN88XX (4)
+#define BDK_PEM_INT_VEC_E_INTA_CN83XX (2)
+#define BDK_PEM_INT_VEC_E_INTA_CLEAR_CN9 (1)
+#define BDK_PEM_INT_VEC_E_INTA_CLEAR_CN81XX (3)
+#define BDK_PEM_INT_VEC_E_INTA_CLEAR_CN88XX (5)
+#define BDK_PEM_INT_VEC_E_INTA_CLEAR_CN83XX (3)
+#define BDK_PEM_INT_VEC_E_INTB_CN9 (2)
+#define BDK_PEM_INT_VEC_E_INTB_CN81XX (4)
+#define BDK_PEM_INT_VEC_E_INTB_CN88XX (6)
+#define BDK_PEM_INT_VEC_E_INTB_CN83XX (4)
+#define BDK_PEM_INT_VEC_E_INTB_CLEAR_CN9 (3)
+#define BDK_PEM_INT_VEC_E_INTB_CLEAR_CN81XX (5)
+#define BDK_PEM_INT_VEC_E_INTB_CLEAR_CN88XX (7)
+#define BDK_PEM_INT_VEC_E_INTB_CLEAR_CN83XX (5)
+#define BDK_PEM_INT_VEC_E_INTC_CN9 (4)
+#define BDK_PEM_INT_VEC_E_INTC_CN81XX (6)
+#define BDK_PEM_INT_VEC_E_INTC_CN88XX (8)
+#define BDK_PEM_INT_VEC_E_INTC_CN83XX (6)
+#define BDK_PEM_INT_VEC_E_INTC_CLEAR_CN9 (5)
+#define BDK_PEM_INT_VEC_E_INTC_CLEAR_CN81XX (7)
+#define BDK_PEM_INT_VEC_E_INTC_CLEAR_CN88XX (9)
+#define BDK_PEM_INT_VEC_E_INTC_CLEAR_CN83XX (7)
+#define BDK_PEM_INT_VEC_E_INTD_CN9 (6)
+#define BDK_PEM_INT_VEC_E_INTD_CN81XX (8)
+#define BDK_PEM_INT_VEC_E_INTD_CN88XX (0xa)
+#define BDK_PEM_INT_VEC_E_INTD_CN83XX (8)
+#define BDK_PEM_INT_VEC_E_INTD_CLEAR_CN9 (7)
+#define BDK_PEM_INT_VEC_E_INTD_CLEAR_CN81XX (9)
+#define BDK_PEM_INT_VEC_E_INTD_CLEAR_CN88XX (0xb)
+#define BDK_PEM_INT_VEC_E_INTD_CLEAR_CN83XX (9)
+#define BDK_PEM_INT_VEC_E_INT_SUM_CN9 (8)
+#define BDK_PEM_INT_VEC_E_INT_SUM_CN81XX (0xa)
+#define BDK_PEM_INT_VEC_E_INT_SUM_CN88XX (0xc)
+#define BDK_PEM_INT_VEC_E_INT_SUM_CN83XX (0xa)
+
+/**
+ * Register (RSL) pem#_bar1_index#
+ *
+ * PEM BAR1 Index 0-15 Register
+ * This register contains the address index and control bits for access to memory ranges of BAR1.
+ * The index is selected from the PCIe address depending on the programmed BAR-1 size.
+ */
+union bdk_pemx_bar1_indexx
+{
+ uint64_t u;
+ struct bdk_pemx_bar1_indexx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t addr_idx : 27; /**< [ 30: 4](R/W) Address index. Address bits [48:22] sent to L2C. */
+ uint64_t ca : 1; /**< [ 3: 3](R/W) Cached. Set to 1 when access is not to be cached in L2. */
+ uint64_t end_swp : 2; /**< [ 2: 1](R/W) Endian-swap mode. */
+ uint64_t addr_v : 1; /**< [ 0: 0](R/W) Address valid. Set to 1 when the selected address range is valid. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr_v : 1; /**< [ 0: 0](R/W) Address valid. Set to 1 when the selected address range is valid. */
+ uint64_t end_swp : 2; /**< [ 2: 1](R/W) Endian-swap mode. */
+ uint64_t ca : 1; /**< [ 3: 3](R/W) Cached. Set to 1 when access is not to be cached in L2. */
+ uint64_t addr_idx : 27; /**< [ 30: 4](R/W) Address index. Address bits [48:22] sent to L2C. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_bar1_indexx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t addr_idx : 27; /**< [ 30: 4](R/W) Address index. Address bits [48:22] sent to L2C. */
+ uint64_t ca : 1; /**< [ 3: 3](R/W) Cached. Set to 1 when access is not to be cached in L2. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t addr_v : 1; /**< [ 0: 0](R/W) Address valid. Set to 1 when the selected address range is valid. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr_v : 1; /**< [ 0: 0](R/W) Address valid. Set to 1 when the selected address range is valid. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t ca : 1; /**< [ 3: 3](R/W) Cached. Set to 1 when access is not to be cached in L2. */
+ uint64_t addr_idx : 27; /**< [ 30: 4](R/W) Address index. Address bits [48:22] sent to L2C. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_bar1_indexx_cn81xx cn88xx; */
+ /* struct bdk_pemx_bar1_indexx_s cn83xx; */
+};
+typedef union bdk_pemx_bar1_indexx bdk_pemx_bar1_indexx_t;
+
+static inline uint64_t BDK_PEMX_BAR1_INDEXX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BAR1_INDEXX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=2) && (b<=15)))
+ return 0x87e0c0000100ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=15)))
+ return 0x87e0c0000100ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=5) && (b<=15)))
+ return 0x87e0c0000100ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("PEMX_BAR1_INDEXX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BAR1_INDEXX(a,b) bdk_pemx_bar1_indexx_t
+#define bustype_BDK_PEMX_BAR1_INDEXX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_BAR1_INDEXX(a,b) "PEMX_BAR1_INDEXX"
+#define device_bar_BDK_PEMX_BAR1_INDEXX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BAR1_INDEXX(a,b) (a)
+#define arguments_BDK_PEMX_BAR1_INDEXX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_bar2_mask
+ *
+ * PEM BAR2 Mask Register
+ * This register contains the mask pattern that is ANDed with the address from the PCIe core for
+ * inbound PF BAR2 hits in either RC or EP mode. This mask is only applied if
+ * PEM()_EBUS_CTL[PF_BAR2_SEL] is clear and the address hits in the PCIEEP_BAR2L / PCIEEP_BAR2U
+ * registers (EP mode) or PEM()_P2N_BAR2_START / PEM()_BAR_CTL[BAR2_SIZ] registers (RC mode).
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bar2_mask
+{
+ uint64_t u;
+ struct bdk_pemx_bar2_mask_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t mask : 49; /**< [ 52: 4](R/W) The value to be ANDED with the address sent to memory. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t mask : 49; /**< [ 52: 4](R/W) The value to be ANDED with the address sent to memory. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_bar2_mask_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t mask : 49; /**< [ 52: 4](R/W) The value to be ANDed with the address sent to memory (to IOB). */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t mask : 49; /**< [ 52: 4](R/W) The value to be ANDed with the address sent to memory (to IOB). */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_bar2_mask_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t mask : 45; /**< [ 48: 4](R/W) The value to be ANDED with the address sent to memory. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t mask : 45; /**< [ 48: 4](R/W) The value to be ANDED with the address sent to memory. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_bar2_mask_cn81xx cn88xx; */
+ struct bdk_pemx_bar2_mask_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t mask : 45; /**< [ 48: 4](R/W) The value to be ANDed with the address sent to memory (to IOB). */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t mask : 45; /**< [ 48: 4](R/W) The value to be ANDed with the address sent to memory (to IOB). */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_bar2_mask bdk_pemx_bar2_mask_t;
+
+static inline uint64_t BDK_PEMX_BAR2_MASK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BAR2_MASK(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c00000b0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c00000b0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c00000b0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000040ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BAR2_MASK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BAR2_MASK(a) bdk_pemx_bar2_mask_t
+#define bustype_BDK_PEMX_BAR2_MASK(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BAR2_MASK(a) "PEMX_BAR2_MASK"
+#define device_bar_BDK_PEMX_BAR2_MASK(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BAR2_MASK(a) (a)
+#define arguments_BDK_PEMX_BAR2_MASK(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_bar4_index#
+ *
+ * PEM BAR4 Index 0-15 Register
+ * This register contains the address index and control bits for access to memory ranges of BAR4.
+ * The index is built from the PCI inbound address \<25:22\>. The bits in this register only apply to
+ * inbound accesses targeting the NCB bus in both RC and EP modes, this register is ignored
+ * when PEM()_EBUS_CTL[PF_BAR4_SEL] is set.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bar4_indexx
+{
+ uint64_t u;
+ struct bdk_pemx_bar4_indexx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t addr_idx : 31; /**< [ 34: 4](R/W) Address index. IOVA \<52:22\> sent to NCB. */
+ uint64_t ca : 1; /**< [ 3: 3](R/W) Cached. Set to 1 when access is not to be cached in LLC. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t addr_v : 1; /**< [ 0: 0](R/W) Address valid. Set to 1 when the selected address range is valid. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr_v : 1; /**< [ 0: 0](R/W) Address valid. Set to 1 when the selected address range is valid. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t ca : 1; /**< [ 3: 3](R/W) Cached. Set to 1 when access is not to be cached in LLC. */
+ uint64_t addr_idx : 31; /**< [ 34: 4](R/W) Address index. IOVA \<52:22\> sent to NCB. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_bar4_indexx_s cn; */
+};
+typedef union bdk_pemx_bar4_indexx bdk_pemx_bar4_indexx_t;
+
+static inline uint64_t BDK_PEMX_BAR4_INDEXX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BAR4_INDEXX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=15)))
+ return 0x8e0000000700ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("PEMX_BAR4_INDEXX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BAR4_INDEXX(a,b) bdk_pemx_bar4_indexx_t
+#define bustype_BDK_PEMX_BAR4_INDEXX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BAR4_INDEXX(a,b) "PEMX_BAR4_INDEXX"
+#define device_bar_BDK_PEMX_BAR4_INDEXX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BAR4_INDEXX(a,b) (a)
+#define arguments_BDK_PEMX_BAR4_INDEXX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_bar_ctl
+ *
+ * PEM BAR Control Register
+ * This register contains control for BAR accesses. This control always
+ * applies to memory accesses targeting the NCBI bus. Some of the fields also
+ * apply to accesses targeting EBUS in RC mode only, see the individual field
+ * descriptions for more detail.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bar_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_bar_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t vf_bar4_enb : 1; /**< [ 37: 37](R/W) This bit controls whether BAR4 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR4 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR4 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR4U, PCIEEP_SRIOV_BAR4L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR4 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR4U / PCIEEP_SRIOV_BAR4L. */
+ uint64_t vf_bar2_enb : 1; /**< [ 36: 36](R/W) This bit controls whether BAR2 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR2 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR2 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR2U, PCIEEP_SRIOV_BAR2L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR2 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR2U / PCIEEP_SRIOV_BAR2L. */
+ uint64_t vf_bar0_enb : 1; /**< [ 35: 35](R/W) This bit controls whether BAR0 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR0 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR0 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR0U, PCIEEP_SRIOV_BAR0L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR0 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR0U / PCIEEP_SRIOV_BAR0L. */
+ uint64_t erom_siz : 3; /**< [ 34: 32](R/W) PCIe EROM BAR size. Used to mask off upper bits of address
+ when sending to NCBI or via private EROM interface to MIO.
+
+ 0x0 = Reserved.
+ 0x1 = 64 KB; 2^16.
+ 0x2 = 128 KB; 2^17.
+ 0x3 = 256 KB; 2^18.
+ 0x4 = 512 KB; 2^19.
+ 0x5 = 1 MB; 2^20.
+ 0x6 = 2 MB; 2^21.
+ 0x7 = 4 MB; 2^22. */
+ uint64_t bar4_enb : 1; /**< [ 31: 31](R/W) In RC mode:
+ 0 = BAR4 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR4_SEL].
+ 1 = BAR4 is enabled and will respond if the corresponding
+ bits in PEM()_BAR4_INDEX() are set and the address matches
+ an enabled indexed address range.
+
+ In EP mode:
+
+ * If PEM()_EBUS_CTL[PF_BAR4_SEL] is set, BAR4 hits are based on
+ a combination of this bit and config registers PCIEEP_BAR4U / PCIEEP_BAR4L.
+ Both enable bits must be set to enable a BAR4 hit.
+ * If PEM()_EBUS_CTL[PF_BAR4_SEL] is clear, BAR4 hits are based
+ on a combination of this bit, the config registers PCIEEP_BAR4U /
+ PCIEEP_BAR4L, and the PEM()_BAR4_INDEX() registers.
+ Both enable bits must be set along with the appropriate bits in
+ PEM()_BAR4_INDEX() in order for a BAR4 access to respond. */
+ uint64_t bar0_siz : 5; /**< [ 30: 26](R/W) PCIe BAR0 size.
+ 0x0 = Reserved.
+ 0x1 = 64 KB; 2^16.
+ 0x2 = 128 KB; 2^17.
+ 0x3 = 256 KB; 2^18.
+ 0x4 = 512 KB; 2^19.
+ 0x5 = 1 MB; 2^20.
+ 0x6 = 2 MB; 2^21.
+ 0x7 = 4 MB; 2^22.
+ 0x8 = 8 MB; 2^23.
+ 0x9 = 16 MB; 2^24.
+ 0xA = 32 MB; 2^25.
+ 0xB = 64 MB; 2^26.
+ 0xC - 0x1F = Reserved. */
+ uint64_t bar0_enb : 1; /**< [ 25: 25](R/W) In RC mode:
+ 0 = BAR0 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR0_SEL].
+ 1 = BAR0 is enabled and will respond.
+
+ In EP mode:
+
+ * BAR0 hits are based on a combination of this bit and
+ config registers PCIEEP_BAR0U / PCIEEP_BAR0L. Both enable
+ bits must be set to enable a BAR0 hit. */
+ uint64_t bar2_ebit : 6; /**< [ 24: 19](R/W) Address bits for ESX\<1:0\> in a PCIe BAR2 address.
+
+ When [BAR2_EBIT] is zero, a PCIe BAR2 address does not contain an ESX\<1:0\> field,
+ and [BAR2_ESX] is the endian-swap used for all BAR2 requests.
+
+ When [BAR2_EBIT] is non-zero, it places ESX\<1:0\> (ESX\<0\> is at PCIe BAR2 address bit
+ [BAR2_EBIT], and ESX\<1\> is at PCIe BAR2 address bit [BAR2_EBIT]+1). [BAR2_EBIT] must
+ be in the range 16 .. [BAR2_SIZ]+18 and must not conflict with a non-zero
+ [BAR2_CBIT] in this case. [BAR2_ESX] XOR ESX\<1:0\> is the endian-swap
+ used for BAR2 requests in this case. If [BAR2_EBIT] \<= 48 in this case, then
+ one or two PCIe BAR2 address ESX field bit(s) are in the address forwarded to
+ SLI and the SMMU, in the same position. */
+ uint64_t bar2_cbit : 6; /**< [ 18: 13](R/W) Address bit for CAX in a PCIe BAR2 address.
+
+ When [BAR2_CBIT] is zero, a PCIe BAR2 address does not contain a CAX bit,
+ and [BAR2_CAX] is the cache allocation for all BAR2 requests.
+
+ When [BAR2_CBIT] is non-zero, the CAX bit is at bit [BAR2_CBIT] in the PCIe
+ BAR2 address. [BAR2_CBIT] must be in the range 16 .. [BAR2_SIZ]+19 and must
+ not conflict with a non-zero [BAR2_EBIT] in this case. [BAR2_CBIT] XOR CAX is
+ the cache allocation for BAR2 requests. If [BAR2_CBIT] \<= 48 in this
+ case, then the PCIe BAR2 address CAX bit is in the address forwarded to
+ SLI and the SMMU, in the same position. */
+ uint64_t bar2_siz : 6; /**< [ 12: 7](R/W) BAR2 size. Encoded identically to PCIEEP()_CFG190[RBARS]. Resets to 0x1D (512 TB).
+
+ In EP mode, [BAR2_SIZ] must equal the corresponding PCIEEP()_CFG190[RBARS].
+
+ In RC mode, [BAR2_SIZ] determines the PEM()_P2N_BAR2_START[ADDR] bits used/compared
+ to an incoming PCIe address.
+
+ On a BAR2 match, PEM zeroes the PCIe address bits outside [BAR2_SIZ], applies
+ [BAR2_EBIT,BAR2_CBIT], and forwards the request to SLI and the SMMU. */
+ uint64_t reserved_4_6 : 3;
+ uint64_t bar2_enb : 1; /**< [ 3: 3](R/W) When set to 1, BAR2 is enabled and will respond; when clear, BAR2 access will cause UR responses. */
+ uint64_t bar2_esx : 2; /**< [ 2: 1](R/W) Value is XORed with PCIe addresses as defined by [BAR2_EBIT] to determine the
+ endian swap mode. */
+ uint64_t bar2_cax : 1; /**< [ 0: 0](R/W) Value is XORed with PCIe address \<49\> to determine the L2 cache attribute. Not cached in
+ L2 if XOR result is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t bar2_cax : 1; /**< [ 0: 0](R/W) Value is XORed with PCIe address \<49\> to determine the L2 cache attribute. Not cached in
+ L2 if XOR result is 1. */
+ uint64_t bar2_esx : 2; /**< [ 2: 1](R/W) Value is XORed with PCIe addresses as defined by [BAR2_EBIT] to determine the
+ endian swap mode. */
+ uint64_t bar2_enb : 1; /**< [ 3: 3](R/W) When set to 1, BAR2 is enabled and will respond; when clear, BAR2 access will cause UR responses. */
+ uint64_t reserved_4_6 : 3;
+ uint64_t bar2_siz : 6; /**< [ 12: 7](R/W) BAR2 size. Encoded identically to PCIEEP()_CFG190[RBARS]. Resets to 0x1D (512 TB).
+
+ In EP mode, [BAR2_SIZ] must equal the corresponding PCIEEP()_CFG190[RBARS].
+
+ In RC mode, [BAR2_SIZ] determines the PEM()_P2N_BAR2_START[ADDR] bits used/compared
+ to an incoming PCIe address.
+
+ On a BAR2 match, PEM zeroes the PCIe address bits outside [BAR2_SIZ], applies
+ [BAR2_EBIT,BAR2_CBIT], and forwards the request to SLI and the SMMU. */
+ uint64_t bar2_cbit : 6; /**< [ 18: 13](R/W) Address bit for CAX in a PCIe BAR2 address.
+
+ When [BAR2_CBIT] is zero, a PCIe BAR2 address does not contain a CAX bit,
+ and [BAR2_CAX] is the cache allocation for all BAR2 requests.
+
+ When [BAR2_CBIT] is non-zero, the CAX bit is at bit [BAR2_CBIT] in the PCIe
+ BAR2 address. [BAR2_CBIT] must be in the range 16 .. [BAR2_SIZ]+19 and must
+ not conflict with a non-zero [BAR2_EBIT] in this case. [BAR2_CBIT] XOR CAX is
+ the cache allocation for BAR2 requests. If [BAR2_CBIT] \<= 48 in this
+ case, then the PCIe BAR2 address CAX bit is in the address forwarded to
+ SLI and the SMMU, in the same position. */
+ uint64_t bar2_ebit : 6; /**< [ 24: 19](R/W) Address bits for ESX\<1:0\> in a PCIe BAR2 address.
+
+ When [BAR2_EBIT] is zero, a PCIe BAR2 address does not contain an ESX\<1:0\> field,
+ and [BAR2_ESX] is the endian-swap used for all BAR2 requests.
+
+ When [BAR2_EBIT] is non-zero, it places ESX\<1:0\> (ESX\<0\> is at PCIe BAR2 address bit
+ [BAR2_EBIT], and ESX\<1\> is at PCIe BAR2 address bit [BAR2_EBIT]+1). [BAR2_EBIT] must
+ be in the range 16 .. [BAR2_SIZ]+18 and must not conflict with a non-zero
+ [BAR2_CBIT] in this case. [BAR2_ESX] XOR ESX\<1:0\> is the endian-swap
+ used for BAR2 requests in this case. If [BAR2_EBIT] \<= 48 in this case, then
+ one or two PCIe BAR2 address ESX field bit(s) are in the address forwarded to
+ SLI and the SMMU, in the same position. */
+ uint64_t bar0_enb : 1; /**< [ 25: 25](R/W) In RC mode:
+ 0 = BAR0 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR0_SEL].
+ 1 = BAR0 is enabled and will respond.
+
+ In EP mode:
+
+ * BAR0 hits are based on a combination of this bit and
+ config registers PCIEEP_BAR0U / PCIEEP_BAR0L. Both enable
+ bits must be set to enable a BAR0 hit. */
+ uint64_t bar0_siz : 5; /**< [ 30: 26](R/W) PCIe BAR0 size.
+ 0x0 = Reserved.
+ 0x1 = 64 KB; 2^16.
+ 0x2 = 128 KB; 2^17.
+ 0x3 = 256 KB; 2^18.
+ 0x4 = 512 KB; 2^19.
+ 0x5 = 1 MB; 2^20.
+ 0x6 = 2 MB; 2^21.
+ 0x7 = 4 MB; 2^22.
+ 0x8 = 8 MB; 2^23.
+ 0x9 = 16 MB; 2^24.
+ 0xA = 32 MB; 2^25.
+ 0xB = 64 MB; 2^26.
+ 0xC - 0x1F = Reserved. */
+ uint64_t bar4_enb : 1; /**< [ 31: 31](R/W) In RC mode:
+ 0 = BAR4 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR4_SEL].
+ 1 = BAR4 is enabled and will respond if the corresponding
+ bits in PEM()_BAR4_INDEX() are set and the address matches
+ an enabled indexed address range.
+
+ In EP mode:
+
+ * If PEM()_EBUS_CTL[PF_BAR4_SEL] is set, BAR4 hits are based on
+ a combination of this bit and config registers PCIEEP_BAR4U / PCIEEP_BAR4L.
+ Both enable bits must be set to enable a BAR4 hit.
+ * If PEM()_EBUS_CTL[PF_BAR4_SEL] is clear, BAR4 hits are based
+ on a combination of this bit, the config registers PCIEEP_BAR4U /
+ PCIEEP_BAR4L, and the PEM()_BAR4_INDEX() registers.
+ Both enable bits must be set along with the appropriate bits in
+ PEM()_BAR4_INDEX() in order for a BAR4 access to respond. */
+ uint64_t erom_siz : 3; /**< [ 34: 32](R/W) PCIe EROM BAR size. Used to mask off upper bits of address
+ when sending to NCBI or via private EROM interface to MIO.
+
+ 0x0 = Reserved.
+ 0x1 = 64 KB; 2^16.
+ 0x2 = 128 KB; 2^17.
+ 0x3 = 256 KB; 2^18.
+ 0x4 = 512 KB; 2^19.
+ 0x5 = 1 MB; 2^20.
+ 0x6 = 2 MB; 2^21.
+ 0x7 = 4 MB; 2^22. */
+ uint64_t vf_bar0_enb : 1; /**< [ 35: 35](R/W) This bit controls whether BAR0 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR0 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR0 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR0U, PCIEEP_SRIOV_BAR0L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR0 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR0U / PCIEEP_SRIOV_BAR0L. */
+ uint64_t vf_bar2_enb : 1; /**< [ 36: 36](R/W) This bit controls whether BAR2 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR2 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR2 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR2U, PCIEEP_SRIOV_BAR2L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR2 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR2U / PCIEEP_SRIOV_BAR2L. */
+ uint64_t vf_bar4_enb : 1; /**< [ 37: 37](R/W) This bit controls whether BAR4 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR4 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR4 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR4U, PCIEEP_SRIOV_BAR4L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR4 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR4U / PCIEEP_SRIOV_BAR4L. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_bar_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t vf_bar4_enb : 1; /**< [ 37: 37](R/W) This bit controls whether BAR4 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR4 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR4 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR4U, PCIEEP_SRIOV_BAR4L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR4 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR4U / PCIEEP_SRIOV_BAR4L. */
+ uint64_t vf_bar2_enb : 1; /**< [ 36: 36](R/W) This bit controls whether BAR2 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR2 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR2 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR2U, PCIEEP_SRIOV_BAR2L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR2 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR2U / PCIEEP_SRIOV_BAR2L. */
+ uint64_t vf_bar0_enb : 1; /**< [ 35: 35](R/W) This bit controls whether BAR0 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR0 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR0 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR0U, PCIEEP_SRIOV_BAR0L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR0 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR0U / PCIEEP_SRIOV_BAR0L. */
+ uint64_t erom_siz : 3; /**< [ 34: 32](R/W) PCIe EROM BAR size. Used to mask off upper bits of address
+ when sending to NCBI or via private EROM interface to MIO.
+
+ 0x0 = Reserved.
+ 0x1 = 64 KB; 2^16.
+ 0x2 = 128 KB; 2^17.
+ 0x3 = 256 KB; 2^18.
+ 0x4 = 512 KB; 2^19.
+ 0x5 = 1 MB; 2^20.
+ 0x6 = 2 MB; 2^21.
+ 0x7 = 4 MB; 2^22. */
+ uint64_t bar4_enb : 1; /**< [ 31: 31](R/W) In RC mode:
+ 0 = BAR4 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR4_SEL].
+ 1 = BAR4 is enabled and will respond if the corresponding
+ bits in PEM()_BAR4_INDEX() are set and the address matches
+ an enabled indexed address range.
+
+ In EP mode:
+
+ * If PEM()_EBUS_CTL[PF_BAR4_SEL] is set, BAR4 hits are based on
+ a combination of this bit and config registers PCIEEP_BAR4U / PCIEEP_BAR4L.
+ Both enable bits must be set to enable a BAR4 hit.
+ * If PEM()_EBUS_CTL[PF_BAR4_SEL] is clear, BAR4 hits are based
+ on a combination of this bit, the config registers PCIEEP_BAR4U /
+ PCIEEP_BAR4L, and the PEM()_BAR4_INDEX() registers.
+ Both enable bits must be set along with the appropriate bits in
+ PEM()_BAR4_INDEX() in order for a BAR4 access to respond. */
+ uint64_t bar0_siz : 5; /**< [ 30: 26](R/W) PCIe BAR0 size.
+ 0x0 = Reserved.
+ 0x1 = 64 KB; 2^16.
+ 0x2 = 128 KB; 2^17.
+ 0x3 = 256 KB; 2^18.
+ 0x4 = 512 KB; 2^19.
+ 0x5 = 1 MB; 2^20.
+ 0x6 = 2 MB; 2^21.
+ 0x7 = 4 MB; 2^22.
+ 0x8 = 8 MB; 2^23.
+ 0x9 = 16 MB; 2^24.
+ 0xA = 32 MB; 2^25.
+ 0xB = 64 MB; 2^26.
+ 0xC - 0x1F = Reserved. */
+ uint64_t bar0_enb : 1; /**< [ 25: 25](R/W) In RC mode:
+ 0 = BAR0 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR0_SEL].
+ 1 = BAR0 is enabled and will respond.
+
+ In EP mode:
+
+ * BAR0 hits are based on a combination of this bit and
+ config registers PCIEEP_BAR0U / PCIEEP_BAR0L. Both enable
+ bits must be set to enable a BAR0 hit. */
+ uint64_t reserved_19_24 : 6;
+ uint64_t bar2_cbit : 6; /**< [ 18: 13](R/W) Address bit to be mapped to BAR2's CAX. When 0x0, BAR2's CAX is disabled;
+ otherwise must be 16 to 63 inclusive. Not used if PEM()_EBUS_CTL[PF_BAR2_SEL]
+ is set. */
+ uint64_t bar2_siz : 6; /**< [ 12: 7](R/W) BAR2 size. Encoded similar to PCIEEP_RBAR_CTL[RBARS]. Used in RC mode to create
+ a mask that is ANDED with the address prior to applying
+ [BAR2_CAX]. Defaults to 0x21 (8192 TB). */
+ uint64_t bar4_siz : 3; /**< [ 6: 4](R/W) PCIe Port 0 BAR4 size.
+ 0x0 = Reserved.
+ 0x1 = 64 MB; 2^26.
+ 0x2 = 128 MB; 2^27.
+ 0x3 = 256 MB; 2^28.
+ 0x4 = 512 MB; 2^29.
+ 0x5 = 1024 MB; 2^30.
+ 0x6 = 2048 MB; 2^31.
+ 0x7 = Reserved. */
+ uint64_t bar2_enb : 1; /**< [ 3: 3](R/W) In RC mode:
+ 0 = BAR2 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR2_SEL].
+ 1 = BAR2 is enabled and will respond.
+
+ In EP mode:
+
+ * BAR2 hits are based on a combination of this bit and
+ config registers PCIEEP_BAR2U / PCIEEP_BAR2L. Both enable
+ bits must be set to enable a BAR2 hit. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t bar2_cax : 1; /**< [ 0: 0](R/W) Value is XORed with PCIe address as defined by [BAR2_CBIT] to determine the LLC
+ cache attribute. Not cached in LLC if XOR result is 1. Not used if PEM()_EBUS_CTL[PF_BAR2_SEL]
+ is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t bar2_cax : 1; /**< [ 0: 0](R/W) Value is XORed with PCIe address as defined by [BAR2_CBIT] to determine the LLC
+ cache attribute. Not cached in LLC if XOR result is 1. Not used if PEM()_EBUS_CTL[PF_BAR2_SEL]
+ is set. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t bar2_enb : 1; /**< [ 3: 3](R/W) In RC mode:
+ 0 = BAR2 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR2_SEL].
+ 1 = BAR2 is enabled and will respond.
+
+ In EP mode:
+
+ * BAR2 hits are based on a combination of this bit and
+ config registers PCIEEP_BAR2U / PCIEEP_BAR2L. Both enable
+ bits must be set to enable a BAR2 hit. */
+ uint64_t bar4_siz : 3; /**< [ 6: 4](R/W) PCIe Port 0 BAR4 size.
+ 0x0 = Reserved.
+ 0x1 = 64 MB; 2^26.
+ 0x2 = 128 MB; 2^27.
+ 0x3 = 256 MB; 2^28.
+ 0x4 = 512 MB; 2^29.
+ 0x5 = 1024 MB; 2^30.
+ 0x6 = 2048 MB; 2^31.
+ 0x7 = Reserved. */
+ uint64_t bar2_siz : 6; /**< [ 12: 7](R/W) BAR2 size. Encoded similar to PCIEEP_RBAR_CTL[RBARS]. Used in RC mode to create
+ a mask that is ANDED with the address prior to applying
+ [BAR2_CAX]. Defaults to 0x21 (8192 TB). */
+ uint64_t bar2_cbit : 6; /**< [ 18: 13](R/W) Address bit to be mapped to BAR2's CAX. When 0x0, BAR2's CAX is disabled;
+ otherwise must be 16 to 63 inclusive. Not used if PEM()_EBUS_CTL[PF_BAR2_SEL]
+ is set. */
+ uint64_t reserved_19_24 : 6;
+ uint64_t bar0_enb : 1; /**< [ 25: 25](R/W) In RC mode:
+ 0 = BAR0 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR0_SEL].
+ 1 = BAR0 is enabled and will respond.
+
+ In EP mode:
+
+ * BAR0 hits are based on a combination of this bit and
+ config registers PCIEEP_BAR0U / PCIEEP_BAR0L. Both enable
+ bits must be set to enable a BAR0 hit. */
+ uint64_t bar0_siz : 5; /**< [ 30: 26](R/W) PCIe BAR0 size.
+ 0x0 = Reserved.
+ 0x1 = 64 KB; 2^16.
+ 0x2 = 128 KB; 2^17.
+ 0x3 = 256 KB; 2^18.
+ 0x4 = 512 KB; 2^19.
+ 0x5 = 1 MB; 2^20.
+ 0x6 = 2 MB; 2^21.
+ 0x7 = 4 MB; 2^22.
+ 0x8 = 8 MB; 2^23.
+ 0x9 = 16 MB; 2^24.
+ 0xA = 32 MB; 2^25.
+ 0xB = 64 MB; 2^26.
+ 0xC - 0x1F = Reserved. */
+ uint64_t bar4_enb : 1; /**< [ 31: 31](R/W) In RC mode:
+ 0 = BAR4 access will cause UR responses. This applies no
+ matter the value of PEM()_EBUS_CTL[PF_BAR4_SEL].
+ 1 = BAR4 is enabled and will respond if the corresponding
+ bits in PEM()_BAR4_INDEX() are set and the address matches
+ an enabled indexed address range.
+
+ In EP mode:
+
+ * If PEM()_EBUS_CTL[PF_BAR4_SEL] is set, BAR4 hits are based on
+ a combination of this bit and config registers PCIEEP_BAR4U / PCIEEP_BAR4L.
+ Both enable bits must be set to enable a BAR4 hit.
+ * If PEM()_EBUS_CTL[PF_BAR4_SEL] is clear, BAR4 hits are based
+ on a combination of this bit, the config registers PCIEEP_BAR4U /
+ PCIEEP_BAR4L, and the PEM()_BAR4_INDEX() registers.
+ Both enable bits must be set along with the appropriate bits in
+ PEM()_BAR4_INDEX() in order for a BAR4 access to respond. */
+ uint64_t erom_siz : 3; /**< [ 34: 32](R/W) PCIe EROM BAR size. Used to mask off upper bits of address
+ when sending to NCBI or via private EROM interface to MIO.
+
+ 0x0 = Reserved.
+ 0x1 = 64 KB; 2^16.
+ 0x2 = 128 KB; 2^17.
+ 0x3 = 256 KB; 2^18.
+ 0x4 = 512 KB; 2^19.
+ 0x5 = 1 MB; 2^20.
+ 0x6 = 2 MB; 2^21.
+ 0x7 = 4 MB; 2^22. */
+ uint64_t vf_bar0_enb : 1; /**< [ 35: 35](R/W) This bit controls whether BAR0 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR0 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR0 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR0U, PCIEEP_SRIOV_BAR0L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR0 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR0U / PCIEEP_SRIOV_BAR0L. */
+ uint64_t vf_bar2_enb : 1; /**< [ 36: 36](R/W) This bit controls whether BAR2 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR2 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR2 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR2U, PCIEEP_SRIOV_BAR2L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR2 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR2U / PCIEEP_SRIOV_BAR2L. */
+ uint64_t vf_bar4_enb : 1; /**< [ 37: 37](R/W) This bit controls whether BAR4 for all virtual functions is enabled.
+
+ In RC mode:
+ * VF BAR4 does not exist. This bit has no effect.
+
+ In EP mode:
+
+ * VF BAR4 hits are based on a combination of this bit and
+ config registers PCIEEP_SRIOV_BAR4U, PCIEEP_SRIOV_BAR4L, and
+ PCIEEP_SRIOV_CTL[VFE]. Both PCIEEP_SRIOV_CTL[VFE] and this
+ bit must be set to enable a VF BAR4 hit to the PCI address
+ specified by PCIEEP_SRIOV_BAR4U / PCIEEP_SRIOV_BAR4L. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_bar_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t bar1_siz : 3; /**< [ 6: 4](R/W) PCIe Port 0 BAR1 size.
+ 0x0 = Reserved.
+ 0x1 = 64 MB; 2^26.
+ 0x2 = 128 MB; 2^27.
+ 0x3 = 256 MB; 2^28.
+ 0x4 = 512 MB; 2^29.
+ 0x5 = 1024 MB; 2^30.
+ 0x6 = 2048 MB; 2^31.
+ 0x7 = Reserved. */
+ uint64_t bar2_enb : 1; /**< [ 3: 3](R/W) When set to 1, BAR2 is enabled and will respond; when clear, BAR2 access will cause UR responses. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t bar2_cax : 1; /**< [ 0: 0](R/W) Value is XORed with PCIe address \<49\> to determine the L2 cache attribute. Not cached in
+ L2 if XOR result is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t bar2_cax : 1; /**< [ 0: 0](R/W) Value is XORed with PCIe address \<49\> to determine the L2 cache attribute. Not cached in
+ L2 if XOR result is 1. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t bar2_enb : 1; /**< [ 3: 3](R/W) When set to 1, BAR2 is enabled and will respond; when clear, BAR2 access will cause UR responses. */
+ uint64_t bar1_siz : 3; /**< [ 6: 4](R/W) PCIe Port 0 BAR1 size.
+ 0x0 = Reserved.
+ 0x1 = 64 MB; 2^26.
+ 0x2 = 128 MB; 2^27.
+ 0x3 = 256 MB; 2^28.
+ 0x4 = 512 MB; 2^29.
+ 0x5 = 1024 MB; 2^30.
+ 0x6 = 2048 MB; 2^31.
+ 0x7 = Reserved. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_bar_ctl_cn81xx cn88xx; */
+ struct bdk_pemx_bar_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t bar2_ebit : 6; /**< [ 24: 19](R/W) Address bits for ESX\<1:0\> in a PCIe BAR2 address.
+
+ When [BAR2_EBIT] is zero, a PCIe BAR2 address does not contain an ESX\<1:0\> field,
+ and [BAR2_ESX] is the endian-swap used for all BAR2 requests.
+
+ When [BAR2_EBIT] is non-zero, it places ESX\<1:0\> (ESX\<0\> is at PCIe BAR2 address bit
+ [BAR2_EBIT], and ESX\<1\> is at PCIe BAR2 address bit [BAR2_EBIT]+1). [BAR2_EBIT] must
+ be in the range 16 .. [BAR2_SIZ]+18 and must not conflict with a non-zero
+ [BAR2_CBIT] in this case. [BAR2_ESX] XOR ESX\<1:0\> is the endian-swap
+ used for BAR2 requests in this case. If [BAR2_EBIT] \<= 48 in this case, then
+ one or two PCIe BAR2 address ESX field bit(s) are in the address forwarded to
+ SLI and the SMMU, in the same position. */
+ uint64_t bar2_cbit : 6; /**< [ 18: 13](R/W) Address bit for CAX in a PCIe BAR2 address.
+
+ When [BAR2_CBIT] is zero, a PCIe BAR2 address does not contain a CAX bit,
+ and [BAR2_CAX] is the cache allocation for all BAR2 requests.
+
+ When [BAR2_CBIT] is non-zero, the CAX bit is at bit [BAR2_CBIT] in the PCIe
+ BAR2 address. [BAR2_CBIT] must be in the range 16 .. [BAR2_SIZ]+19 and must
+ not conflict with a non-zero [BAR2_EBIT] in this case. [BAR2_CBIT] XOR CAX is
+ the cache allocation for BAR2 requests. If [BAR2_CBIT] \<= 48 in this
+ case, then the PCIe BAR2 address CAX bit is in the address forwarded to
+ SLI and the SMMU, in the same position. */
+ uint64_t bar2_siz : 6; /**< [ 12: 7](R/W) BAR2 size. Encoded identically to PCIEEP()_CFG190[RBARS]. Resets to 0x1D (512 TB).
+
+ In EP mode, [BAR2_SIZ] must equal the corresponding PCIEEP()_CFG190[RBARS].
+
+ In RC mode, [BAR2_SIZ] determines the PEM()_P2N_BAR2_START[ADDR] bits used/compared
+ to an incoming PCIe address.
+
+ On a BAR2 match, PEM zeroes the PCIe address bits outside [BAR2_SIZ], applies
+ [BAR2_EBIT,BAR2_CBIT], and forwards the request to SLI and the SMMU. */
+ uint64_t bar1_siz : 3; /**< [ 6: 4](R/W) PCIe Port 0 BAR1 size.
+ 0x0 = Reserved.
+ 0x1 = 64 MB; 2^26.
+ 0x2 = 128 MB; 2^27.
+ 0x3 = 256 MB; 2^28.
+ 0x4 = 512 MB; 2^29.
+ 0x5 = 1024 MB; 2^30.
+ 0x6 = 2048 MB; 2^31.
+ 0x7 = Reserved. */
+ uint64_t bar2_enb : 1; /**< [ 3: 3](R/W) When set to 1, BAR2 is enabled and will respond; when clear, BAR2 access will cause UR responses. */
+ uint64_t bar2_esx : 2; /**< [ 2: 1](R/W) Value is XORed with PCIe addresses as defined by [BAR2_EBIT] to determine the
+ endian swap mode. */
+ uint64_t bar2_cax : 1; /**< [ 0: 0](R/W) Value is XORed with PCIe address as defined by [BAR2_CBIT] to determine the L2
+ cache attribute. Not cached in L2 if XOR result is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t bar2_cax : 1; /**< [ 0: 0](R/W) Value is XORed with PCIe address as defined by [BAR2_CBIT] to determine the L2
+ cache attribute. Not cached in L2 if XOR result is 1. */
+ uint64_t bar2_esx : 2; /**< [ 2: 1](R/W) Value is XORed with PCIe addresses as defined by [BAR2_EBIT] to determine the
+ endian swap mode. */
+ uint64_t bar2_enb : 1; /**< [ 3: 3](R/W) When set to 1, BAR2 is enabled and will respond; when clear, BAR2 access will cause UR responses. */
+ uint64_t bar1_siz : 3; /**< [ 6: 4](R/W) PCIe Port 0 BAR1 size.
+ 0x0 = Reserved.
+ 0x1 = 64 MB; 2^26.
+ 0x2 = 128 MB; 2^27.
+ 0x3 = 256 MB; 2^28.
+ 0x4 = 512 MB; 2^29.
+ 0x5 = 1024 MB; 2^30.
+ 0x6 = 2048 MB; 2^31.
+ 0x7 = Reserved. */
+ uint64_t bar2_siz : 6; /**< [ 12: 7](R/W) BAR2 size. Encoded identically to PCIEEP()_CFG190[RBARS]. Resets to 0x1D (512 TB).
+
+ In EP mode, [BAR2_SIZ] must equal the corresponding PCIEEP()_CFG190[RBARS].
+
+ In RC mode, [BAR2_SIZ] determines the PEM()_P2N_BAR2_START[ADDR] bits used/compared
+ to an incoming PCIe address.
+
+ On a BAR2 match, PEM zeroes the PCIe address bits outside [BAR2_SIZ], applies
+ [BAR2_EBIT,BAR2_CBIT], and forwards the request to SLI and the SMMU. */
+ uint64_t bar2_cbit : 6; /**< [ 18: 13](R/W) Address bit for CAX in a PCIe BAR2 address.
+
+ When [BAR2_CBIT] is zero, a PCIe BAR2 address does not contain a CAX bit,
+ and [BAR2_CAX] is the cache allocation for all BAR2 requests.
+
+ When [BAR2_CBIT] is non-zero, the CAX bit is at bit [BAR2_CBIT] in the PCIe
+ BAR2 address. [BAR2_CBIT] must be in the range 16 .. [BAR2_SIZ]+19 and must
+ not conflict with a non-zero [BAR2_EBIT] in this case. [BAR2_CBIT] XOR CAX is
+ the cache allocation for BAR2 requests. If [BAR2_CBIT] \<= 48 in this
+ case, then the PCIe BAR2 address CAX bit is in the address forwarded to
+ SLI and the SMMU, in the same position. */
+ uint64_t bar2_ebit : 6; /**< [ 24: 19](R/W) Address bits for ESX\<1:0\> in a PCIe BAR2 address.
+
+ When [BAR2_EBIT] is zero, a PCIe BAR2 address does not contain an ESX\<1:0\> field,
+ and [BAR2_ESX] is the endian-swap used for all BAR2 requests.
+
+ When [BAR2_EBIT] is non-zero, it places ESX\<1:0\> (ESX\<0\> is at PCIe BAR2 address bit
+ [BAR2_EBIT], and ESX\<1\> is at PCIe BAR2 address bit [BAR2_EBIT]+1). [BAR2_EBIT] must
+ be in the range 16 .. [BAR2_SIZ]+18 and must not conflict with a non-zero
+ [BAR2_CBIT] in this case. [BAR2_ESX] XOR ESX\<1:0\> is the endian-swap
+ used for BAR2 requests in this case. If [BAR2_EBIT] \<= 48 in this case, then
+ one or two PCIe BAR2 address ESX field bit(s) are in the address forwarded to
+ SLI and the SMMU, in the same position. */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_bar_ctl bdk_pemx_bar_ctl_t;
+
+static inline uint64_t BDK_PEMX_BAR_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BAR_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c00000a8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c00000a8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c00000a8ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000158ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BAR_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BAR_CTL(a) bdk_pemx_bar_ctl_t
+#define bustype_BDK_PEMX_BAR_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BAR_CTL(a) "PEMX_BAR_CTL"
+#define device_bar_BDK_PEMX_BAR_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BAR_CTL(a) (a)
+#define arguments_BDK_PEMX_BAR_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_bist_status
+ *
+ * PEM BIST Status Register
+ * This register contains results from BIST runs of PEM's memories.
+ */
+union bdk_pemx_bist_status
+{
+ uint64_t u;
+ struct bdk_pemx_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t tlpn_d0 : 1; /**< [ 9: 9](RO) BIST status for tlp_n_fifo_data0. */
+ uint64_t tlpn_d1 : 1; /**< [ 8: 8](RO) BIST status for tlp_n_fifo_data1. */
+ uint64_t tlpn_ctl : 1; /**< [ 7: 7](RO) BIST status for tlp_n_fifo_ctl. */
+ uint64_t tlpp_d0 : 1; /**< [ 6: 6](RO) BIST status for tlp_p_fifo_data0. */
+ uint64_t tlpp_d1 : 1; /**< [ 5: 5](RO) BIST status for tlp_p_fifo_data1. */
+ uint64_t tlpp_ctl : 1; /**< [ 4: 4](RO) BIST status for tlp_p_fifo_ctl. */
+ uint64_t tlpc_d0 : 1; /**< [ 3: 3](RO) BIST status for tlp_c_fifo_data0. */
+ uint64_t tlpc_d1 : 1; /**< [ 2: 2](RO) BIST status for tlp_c_fifo_data1. */
+ uint64_t tlpc_ctl : 1; /**< [ 1: 1](RO) BIST status for tlp_c_fifo_ctl. */
+ uint64_t m2s : 1; /**< [ 0: 0](RO) BIST status for m2s_fifo. */
+#else /* Word 0 - Little Endian */
+ uint64_t m2s : 1; /**< [ 0: 0](RO) BIST status for m2s_fifo. */
+ uint64_t tlpc_ctl : 1; /**< [ 1: 1](RO) BIST status for tlp_c_fifo_ctl. */
+ uint64_t tlpc_d1 : 1; /**< [ 2: 2](RO) BIST status for tlp_c_fifo_data1. */
+ uint64_t tlpc_d0 : 1; /**< [ 3: 3](RO) BIST status for tlp_c_fifo_data0. */
+ uint64_t tlpp_ctl : 1; /**< [ 4: 4](RO) BIST status for tlp_p_fifo_ctl. */
+ uint64_t tlpp_d1 : 1; /**< [ 5: 5](RO) BIST status for tlp_p_fifo_data1. */
+ uint64_t tlpp_d0 : 1; /**< [ 6: 6](RO) BIST status for tlp_p_fifo_data0. */
+ uint64_t tlpn_ctl : 1; /**< [ 7: 7](RO) BIST status for tlp_n_fifo_ctl. */
+ uint64_t tlpn_d1 : 1; /**< [ 8: 8](RO) BIST status for tlp_n_fifo_data1. */
+ uint64_t tlpn_d0 : 1; /**< [ 9: 9](RO) BIST status for tlp_n_fifo_data0. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_bist_status_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t retryc : 1; /**< [ 25: 25](RO) Retry buffer memory C. */
+ uint64_t sot : 1; /**< [ 24: 24](RO) Start of transfer memory. */
+ uint64_t rqhdrb0 : 1; /**< [ 23: 23](RO) RX queue header memory buffer 0. */
+ uint64_t rqhdrb1 : 1; /**< [ 22: 22](RO) RX queue header memory buffer 1. */
+ uint64_t rqdatab0 : 1; /**< [ 21: 21](RO) RX queue data buffer 0. */
+ uint64_t rqdatab1 : 1; /**< [ 20: 20](RO) RX queue data buffer 1. */
+ uint64_t tlpan_d0 : 1; /**< [ 19: 19](RO) BIST Status for the tlp_n_afifo_data0. */
+ uint64_t tlpan_d1 : 1; /**< [ 18: 18](RO) BIST Status for the tlp_n_afifo_data1. */
+ uint64_t tlpan_ctl : 1; /**< [ 17: 17](RO) BIST Status for the tlp_n_afifo_ctl. */
+ uint64_t tlpap_d0 : 1; /**< [ 16: 16](RO) BIST Status for the tlp_p_afifo_data0. */
+ uint64_t tlpap_d1 : 1; /**< [ 15: 15](RO) BIST Status for the tlp_p_afifo_data1. */
+ uint64_t tlpap_ctl : 1; /**< [ 14: 14](RO) BIST Status for the tlp_p_afifo_ctl. */
+ uint64_t tlpac_d0 : 1; /**< [ 13: 13](RO) BIST Status for the tlp_c_afifo_data0. */
+ uint64_t tlpac_d1 : 1; /**< [ 12: 12](RO) BIST Status for the tlp_c_afifo_data1. */
+ uint64_t tlpac_ctl : 1; /**< [ 11: 11](RO) BIST Status for the tlp_c_afifo_ctl. */
+ uint64_t peai_p2e : 1; /**< [ 10: 10](RO) BIST Status for the peai__pesc_fifo. */
+ uint64_t tlpn_d0 : 1; /**< [ 9: 9](RO) BIST status for tlp_n_fifo_data0. */
+ uint64_t tlpn_d1 : 1; /**< [ 8: 8](RO) BIST status for tlp_n_fifo_data1. */
+ uint64_t tlpn_ctl : 1; /**< [ 7: 7](RO) BIST status for tlp_n_fifo_ctl. */
+ uint64_t tlpp_d0 : 1; /**< [ 6: 6](RO) BIST status for tlp_p_fifo_data0. */
+ uint64_t tlpp_d1 : 1; /**< [ 5: 5](RO) BIST status for tlp_p_fifo_data1. */
+ uint64_t tlpp_ctl : 1; /**< [ 4: 4](RO) BIST status for tlp_p_fifo_ctl. */
+ uint64_t tlpc_d0 : 1; /**< [ 3: 3](RO) BIST status for tlp_c_fifo_data0. */
+ uint64_t tlpc_d1 : 1; /**< [ 2: 2](RO) BIST status for tlp_c_fifo_data1. */
+ uint64_t tlpc_ctl : 1; /**< [ 1: 1](RO) BIST status for tlp_c_fifo_ctl. */
+ uint64_t m2s : 1; /**< [ 0: 0](RO) BIST status for m2s_fifo. */
+#else /* Word 0 - Little Endian */
+ uint64_t m2s : 1; /**< [ 0: 0](RO) BIST status for m2s_fifo. */
+ uint64_t tlpc_ctl : 1; /**< [ 1: 1](RO) BIST status for tlp_c_fifo_ctl. */
+ uint64_t tlpc_d1 : 1; /**< [ 2: 2](RO) BIST status for tlp_c_fifo_data1. */
+ uint64_t tlpc_d0 : 1; /**< [ 3: 3](RO) BIST status for tlp_c_fifo_data0. */
+ uint64_t tlpp_ctl : 1; /**< [ 4: 4](RO) BIST status for tlp_p_fifo_ctl. */
+ uint64_t tlpp_d1 : 1; /**< [ 5: 5](RO) BIST status for tlp_p_fifo_data1. */
+ uint64_t tlpp_d0 : 1; /**< [ 6: 6](RO) BIST status for tlp_p_fifo_data0. */
+ uint64_t tlpn_ctl : 1; /**< [ 7: 7](RO) BIST status for tlp_n_fifo_ctl. */
+ uint64_t tlpn_d1 : 1; /**< [ 8: 8](RO) BIST status for tlp_n_fifo_data1. */
+ uint64_t tlpn_d0 : 1; /**< [ 9: 9](RO) BIST status for tlp_n_fifo_data0. */
+ uint64_t peai_p2e : 1; /**< [ 10: 10](RO) BIST Status for the peai__pesc_fifo. */
+ uint64_t tlpac_ctl : 1; /**< [ 11: 11](RO) BIST Status for the tlp_c_afifo_ctl. */
+ uint64_t tlpac_d1 : 1; /**< [ 12: 12](RO) BIST Status for the tlp_c_afifo_data1. */
+ uint64_t tlpac_d0 : 1; /**< [ 13: 13](RO) BIST Status for the tlp_c_afifo_data0. */
+ uint64_t tlpap_ctl : 1; /**< [ 14: 14](RO) BIST Status for the tlp_p_afifo_ctl. */
+ uint64_t tlpap_d1 : 1; /**< [ 15: 15](RO) BIST Status for the tlp_p_afifo_data1. */
+ uint64_t tlpap_d0 : 1; /**< [ 16: 16](RO) BIST Status for the tlp_p_afifo_data0. */
+ uint64_t tlpan_ctl : 1; /**< [ 17: 17](RO) BIST Status for the tlp_n_afifo_ctl. */
+ uint64_t tlpan_d1 : 1; /**< [ 18: 18](RO) BIST Status for the tlp_n_afifo_data1. */
+ uint64_t tlpan_d0 : 1; /**< [ 19: 19](RO) BIST Status for the tlp_n_afifo_data0. */
+ uint64_t rqdatab1 : 1; /**< [ 20: 20](RO) RX queue data buffer 1. */
+ uint64_t rqdatab0 : 1; /**< [ 21: 21](RO) RX queue data buffer 0. */
+ uint64_t rqhdrb1 : 1; /**< [ 22: 22](RO) RX queue header memory buffer 1. */
+ uint64_t rqhdrb0 : 1; /**< [ 23: 23](RO) RX queue header memory buffer 0. */
+ uint64_t sot : 1; /**< [ 24: 24](RO) Start of transfer memory. */
+ uint64_t retryc : 1; /**< [ 25: 25](RO) Retry buffer memory C. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pemx_bist_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t retryc : 1; /**< [ 25: 25](RO) Retry buffer memory C. */
+ uint64_t sot : 1; /**< [ 24: 24](RO) Start of transfer memory. */
+ uint64_t rqhdrb0 : 1; /**< [ 23: 23](RO) RX queue header memory buffer 0. */
+ uint64_t rqhdrb1 : 1; /**< [ 22: 22](RO) RX queue header memory buffer 1. */
+ uint64_t rqdatab0 : 1; /**< [ 21: 21](RO) RX queue data buffer 0. */
+ uint64_t rqdatab1 : 1; /**< [ 20: 20](RO) RX queue data buffer 1. */
+ uint64_t tlpan_d0 : 1; /**< [ 19: 19](RO) Reserved. */
+ uint64_t tlpan_d1 : 1; /**< [ 18: 18](RO) Reserved. */
+ uint64_t tlpan_ctl : 1; /**< [ 17: 17](RO) Reserved. */
+ uint64_t tlpap_d0 : 1; /**< [ 16: 16](RO) Reserved. */
+ uint64_t tlpap_d1 : 1; /**< [ 15: 15](RO) Reserved. */
+ uint64_t tlpap_ctl : 1; /**< [ 14: 14](RO) Reserved. */
+ uint64_t tlpac_d0 : 1; /**< [ 13: 13](RO) Reserved. */
+ uint64_t tlpac_d1 : 1; /**< [ 12: 12](RO) Reserved. */
+ uint64_t tlpac_ctl : 1; /**< [ 11: 11](RO) Reserved. */
+ uint64_t peai_p2e : 1; /**< [ 10: 10](RO) Reserved. */
+ uint64_t tlpn_d0 : 1; /**< [ 9: 9](RO) BIST status for tlp_n_fifo_data0. */
+ uint64_t tlpn_d1 : 1; /**< [ 8: 8](RO) BIST status for tlp_n_fifo_data1. */
+ uint64_t tlpn_ctl : 1; /**< [ 7: 7](RO) BIST status for tlp_n_fifo_ctl. */
+ uint64_t tlpp_d0 : 1; /**< [ 6: 6](RO) BIST status for tlp_p_fifo_data0. */
+ uint64_t tlpp_d1 : 1; /**< [ 5: 5](RO) BIST status for tlp_p_fifo_data1. */
+ uint64_t tlpp_ctl : 1; /**< [ 4: 4](RO) BIST status for tlp_p_fifo_ctl. */
+ uint64_t tlpc_d0 : 1; /**< [ 3: 3](RO) BIST status for tlp_c_fifo_data0. */
+ uint64_t tlpc_d1 : 1; /**< [ 2: 2](RO) BIST status for tlp_c_fifo_data1. */
+ uint64_t tlpc_ctl : 1; /**< [ 1: 1](RO) BIST status for tlp_c_fifo_ctl. */
+ uint64_t m2s : 1; /**< [ 0: 0](RO) BIST status for m2s_fifo. */
+#else /* Word 0 - Little Endian */
+ uint64_t m2s : 1; /**< [ 0: 0](RO) BIST status for m2s_fifo. */
+ uint64_t tlpc_ctl : 1; /**< [ 1: 1](RO) BIST status for tlp_c_fifo_ctl. */
+ uint64_t tlpc_d1 : 1; /**< [ 2: 2](RO) BIST status for tlp_c_fifo_data1. */
+ uint64_t tlpc_d0 : 1; /**< [ 3: 3](RO) BIST status for tlp_c_fifo_data0. */
+ uint64_t tlpp_ctl : 1; /**< [ 4: 4](RO) BIST status for tlp_p_fifo_ctl. */
+ uint64_t tlpp_d1 : 1; /**< [ 5: 5](RO) BIST status for tlp_p_fifo_data1. */
+ uint64_t tlpp_d0 : 1; /**< [ 6: 6](RO) BIST status for tlp_p_fifo_data0. */
+ uint64_t tlpn_ctl : 1; /**< [ 7: 7](RO) BIST status for tlp_n_fifo_ctl. */
+ uint64_t tlpn_d1 : 1; /**< [ 8: 8](RO) BIST status for tlp_n_fifo_data1. */
+ uint64_t tlpn_d0 : 1; /**< [ 9: 9](RO) BIST status for tlp_n_fifo_data0. */
+ uint64_t peai_p2e : 1; /**< [ 10: 10](RO) Reserved. */
+ uint64_t tlpac_ctl : 1; /**< [ 11: 11](RO) Reserved. */
+ uint64_t tlpac_d1 : 1; /**< [ 12: 12](RO) Reserved. */
+ uint64_t tlpac_d0 : 1; /**< [ 13: 13](RO) Reserved. */
+ uint64_t tlpap_ctl : 1; /**< [ 14: 14](RO) Reserved. */
+ uint64_t tlpap_d1 : 1; /**< [ 15: 15](RO) Reserved. */
+ uint64_t tlpap_d0 : 1; /**< [ 16: 16](RO) Reserved. */
+ uint64_t tlpan_ctl : 1; /**< [ 17: 17](RO) Reserved. */
+ uint64_t tlpan_d1 : 1; /**< [ 18: 18](RO) Reserved. */
+ uint64_t tlpan_d0 : 1; /**< [ 19: 19](RO) Reserved. */
+ uint64_t rqdatab1 : 1; /**< [ 20: 20](RO) RX queue data buffer 1. */
+ uint64_t rqdatab0 : 1; /**< [ 21: 21](RO) RX queue data buffer 0. */
+ uint64_t rqhdrb1 : 1; /**< [ 22: 22](RO) RX queue header memory buffer 1. */
+ uint64_t rqhdrb0 : 1; /**< [ 23: 23](RO) RX queue header memory buffer 0. */
+ uint64_t sot : 1; /**< [ 24: 24](RO) Start of transfer memory. */
+ uint64_t retryc : 1; /**< [ 25: 25](RO) Retry buffer memory C. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_bist_status_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t retryc : 1; /**< [ 15: 15](RO) Retry buffer memory C. */
+ uint64_t sot : 1; /**< [ 14: 14](RO) Start of transfer memory. */
+ uint64_t rqhdrb0 : 1; /**< [ 13: 13](RO) RX queue header memory buffer 0. */
+ uint64_t rqhdrb1 : 1; /**< [ 12: 12](RO) RX queue header memory buffer 1. */
+ uint64_t rqdatab0 : 1; /**< [ 11: 11](RO) RX queue data buffer 0. */
+ uint64_t rqdatab1 : 1; /**< [ 10: 10](RO) RX queue data buffer 1. */
+ uint64_t tlpn_d0 : 1; /**< [ 9: 9](RO) BIST status for tlp_n_fifo_data0. */
+ uint64_t tlpn_d1 : 1; /**< [ 8: 8](RO) BIST status for tlp_n_fifo_data1. */
+ uint64_t tlpn_ctl : 1; /**< [ 7: 7](RO) BIST status for tlp_n_fifo_ctl. */
+ uint64_t tlpp_d0 : 1; /**< [ 6: 6](RO) BIST status for tlp_p_fifo_data0. */
+ uint64_t tlpp_d1 : 1; /**< [ 5: 5](RO) BIST status for tlp_p_fifo_data1. */
+ uint64_t tlpp_ctl : 1; /**< [ 4: 4](RO) BIST status for tlp_p_fifo_ctl. */
+ uint64_t tlpc_d0 : 1; /**< [ 3: 3](RO) BIST status for tlp_c_fifo_data0. */
+ uint64_t tlpc_d1 : 1; /**< [ 2: 2](RO) BIST status for tlp_c_fifo_data1. */
+ uint64_t tlpc_ctl : 1; /**< [ 1: 1](RO) BIST status for tlp_c_fifo_ctl. */
+ uint64_t m2s : 1; /**< [ 0: 0](RO) BIST status for m2s_fifo. */
+#else /* Word 0 - Little Endian */
+ uint64_t m2s : 1; /**< [ 0: 0](RO) BIST status for m2s_fifo. */
+ uint64_t tlpc_ctl : 1; /**< [ 1: 1](RO) BIST status for tlp_c_fifo_ctl. */
+ uint64_t tlpc_d1 : 1; /**< [ 2: 2](RO) BIST status for tlp_c_fifo_data1. */
+ uint64_t tlpc_d0 : 1; /**< [ 3: 3](RO) BIST status for tlp_c_fifo_data0. */
+ uint64_t tlpp_ctl : 1; /**< [ 4: 4](RO) BIST status for tlp_p_fifo_ctl. */
+ uint64_t tlpp_d1 : 1; /**< [ 5: 5](RO) BIST status for tlp_p_fifo_data1. */
+ uint64_t tlpp_d0 : 1; /**< [ 6: 6](RO) BIST status for tlp_p_fifo_data0. */
+ uint64_t tlpn_ctl : 1; /**< [ 7: 7](RO) BIST status for tlp_n_fifo_ctl. */
+ uint64_t tlpn_d1 : 1; /**< [ 8: 8](RO) BIST status for tlp_n_fifo_data1. */
+ uint64_t tlpn_d0 : 1; /**< [ 9: 9](RO) BIST status for tlp_n_fifo_data0. */
+ uint64_t rqdatab1 : 1; /**< [ 10: 10](RO) RX queue data buffer 1. */
+ uint64_t rqdatab0 : 1; /**< [ 11: 11](RO) RX queue data buffer 0. */
+ uint64_t rqhdrb1 : 1; /**< [ 12: 12](RO) RX queue header memory buffer 1. */
+ uint64_t rqhdrb0 : 1; /**< [ 13: 13](RO) RX queue header memory buffer 0. */
+ uint64_t sot : 1; /**< [ 14: 14](RO) Start of transfer memory. */
+ uint64_t retryc : 1; /**< [ 15: 15](RO) Retry buffer memory C. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_pemx_bist_status_cn81xx cn88xxp2; */
+};
+typedef union bdk_pemx_bist_status bdk_pemx_bist_status_t;
+
+static inline uint64_t BDK_PEMX_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000468ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000468ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000468ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BIST_STATUS(a) bdk_pemx_bist_status_t
+#define bustype_BDK_PEMX_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_BIST_STATUS(a) "PEMX_BIST_STATUS"
+#define device_bar_BDK_PEMX_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BIST_STATUS(a) (a)
+#define arguments_BDK_PEMX_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_bp_test0
+ *
+ * INTERNAL: PEM Backpressure Test Register 0
+ *
+ * This register is for diagnostic use only.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bp_test0
+{
+ uint64_t u;
+ struct bdk_pemx_bp_test0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit the NCBI posted FIFO, backpressure doing posted requests to ncb_gnt.
+ \<62\> = Limit the NCBI nonposted FIFO, backpressure doing nonposted requests to ncb_gnt.
+ \<61\> = Limit the NCBI completion FIFO, backpressure doing completion requests to ncb_gnt.
+ \<60\> = Limit the NCBI CSR completion FIFO, backpressure doing requests for CSR responses
+ to ncb_gnt. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit the NCBI posted FIFO, backpressure doing posted requests to ncb_gnt.
+ \<62\> = Limit the NCBI nonposted FIFO, backpressure doing nonposted requests to ncb_gnt.
+ \<61\> = Limit the NCBI completion FIFO, backpressure doing completion requests to ncb_gnt.
+ \<60\> = Limit the NCBI CSR completion FIFO, backpressure doing requests for CSR responses
+ to ncb_gnt. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_bp_test0_s cn; */
+};
+typedef union bdk_pemx_bp_test0 bdk_pemx_bp_test0_t;
+
+static inline uint64_t BDK_PEMX_BP_TEST0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BP_TEST0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001d0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BP_TEST0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BP_TEST0(a) bdk_pemx_bp_test0_t
+#define bustype_BDK_PEMX_BP_TEST0(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BP_TEST0(a) "PEMX_BP_TEST0"
+#define device_bar_BDK_PEMX_BP_TEST0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BP_TEST0(a) (a)
+#define arguments_BDK_PEMX_BP_TEST0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_bp_test1
+ *
+ * INTERNAL: PEM Backpressure Test Register 1
+ *
+ * This register is for diagnostic use only.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bp_test1
+{
+ uint64_t u;
+ struct bdk_pemx_bp_test1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Reserved.
+ \<62\> = Reserved.
+ \<61\> = Reserved.
+ \<60\> = Limit the MAC core incoming TLP FIFO; backpressure taking data from this FIFO. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Reserved.
+ \<62\> = Reserved.
+ \<61\> = Reserved.
+ \<60\> = Limit the MAC core incoming TLP FIFO; backpressure taking data from this FIFO. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_bp_test1_s cn; */
+};
+typedef union bdk_pemx_bp_test1 bdk_pemx_bp_test1_t;
+
+static inline uint64_t BDK_PEMX_BP_TEST1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BP_TEST1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001d8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BP_TEST1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BP_TEST1(a) bdk_pemx_bp_test1_t
+#define bustype_BDK_PEMX_BP_TEST1(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BP_TEST1(a) "PEMX_BP_TEST1"
+#define device_bar_BDK_PEMX_BP_TEST1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BP_TEST1(a) (a)
+#define arguments_BDK_PEMX_BP_TEST1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_bp_test2
+ *
+ * INTERNAL: PEM Backpressure Test Register 2
+ *
+ * This register is for diagnostic use only.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bp_test2
+{
+ uint64_t u;
+ struct bdk_pemx_bp_test2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ NOTE: Test backpressure will only be applied at an NCBO transaction boundary.
+ \<63\> = Limit the draining of NCBO CSR offloading FIFO.
+ \<62\> = Reserved
+ \<61\> = Limit the draining of NCBO Non-posted offloading FIFO.
+ \<60\> = Limit the draining of NCBO Posted offloading FIFO. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ NOTE: Test backpressure will only be applied at an NCBO transaction boundary.
+ \<63\> = Limit the draining of NCBO CSR offloading FIFO.
+ \<62\> = Reserved
+ \<61\> = Limit the draining of NCBO Non-posted offloading FIFO.
+ \<60\> = Limit the draining of NCBO Posted offloading FIFO. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_bp_test2_s cn; */
+};
+typedef union bdk_pemx_bp_test2 bdk_pemx_bp_test2_t;
+
+static inline uint64_t BDK_PEMX_BP_TEST2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BP_TEST2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001e8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BP_TEST2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BP_TEST2(a) bdk_pemx_bp_test2_t
+#define bustype_BDK_PEMX_BP_TEST2(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BP_TEST2(a) "PEMX_BP_TEST2"
+#define device_bar_BDK_PEMX_BP_TEST2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BP_TEST2(a) (a)
+#define arguments_BDK_PEMX_BP_TEST2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_bp_test3
+ *
+ * INTERNAL: PEM Backpressure Test Register 3
+ *
+ * This register is for diagnostic use only.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bp_test3
+{
+ uint64_t u;
+ struct bdk_pemx_bp_test3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ NOTE: Test backpressure will only be applied at a TLP boundary.
+ \<63\> = Reserved.
+ \<62\> = Limit the transfers of Completion TLPs from pemm to pemc.
+ \<61\> = Limit the transfers of Non-posted TLPs from pemm to pemc.
+ \<60\> = Limit the transfers of Posted TLPs from pemm to pemc. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ NOTE: Test backpressure will only be applied at a TLP boundary.
+ \<63\> = Reserved.
+ \<62\> = Limit the transfers of Completion TLPs from pemm to pemc.
+ \<61\> = Limit the transfers of Non-posted TLPs from pemm to pemc.
+ \<60\> = Limit the transfers of Posted TLPs from pemm to pemc. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_bp_test3_s cn; */
+};
+typedef union bdk_pemx_bp_test3 bdk_pemx_bp_test3_t;
+
+static inline uint64_t BDK_PEMX_BP_TEST3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BP_TEST3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001f0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BP_TEST3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BP_TEST3(a) bdk_pemx_bp_test3_t
+#define bustype_BDK_PEMX_BP_TEST3(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BP_TEST3(a) "PEMX_BP_TEST3"
+#define device_bar_BDK_PEMX_BP_TEST3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BP_TEST3(a) (a)
+#define arguments_BDK_PEMX_BP_TEST3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_bp_test4
+ *
+ * INTERNAL: PEM Backpressure Test Register 4
+ *
+ * This register is for diagnostic use only.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bp_test4
+{
+ uint64_t u;
+ struct bdk_pemx_bp_test4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit the EBI posted FIFO.
+ \<62\> = Limit the EBI nonposted FIFO.
+ \<61\> = Limit the EBI completion FIFO.
+ \<60\> = Limit the EBI completion fault FIFO. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit the EBI posted FIFO.
+ \<62\> = Limit the EBI nonposted FIFO.
+ \<61\> = Limit the EBI completion FIFO.
+ \<60\> = Limit the EBI completion fault FIFO. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_bp_test4_s cn; */
+};
+typedef union bdk_pemx_bp_test4 bdk_pemx_bp_test4_t;
+
+static inline uint64_t BDK_PEMX_BP_TEST4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BP_TEST4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001f8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BP_TEST4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BP_TEST4(a) bdk_pemx_bp_test4_t
+#define bustype_BDK_PEMX_BP_TEST4(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BP_TEST4(a) "PEMX_BP_TEST4"
+#define device_bar_BDK_PEMX_BP_TEST4(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BP_TEST4(a) (a)
+#define arguments_BDK_PEMX_BP_TEST4(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_bp_test5
+ *
+ * INTERNAL: PEM Backpressure Test Register 5
+ *
+ * This register is for diagnostic use only.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bp_test5
+{
+ uint64_t u;
+ struct bdk_pemx_bp_test5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ Note backpressure will only be applied at an EBO transaction boundary.
+ \<63\> = Reserved.
+ \<62\> = Limit the draining of EBO Completion offloading buffer.
+ \<61\> = Limit the draining of EBO Non-posted offloading FIFO.
+ \<60\> = Limit the draining of EBO Posted offloading FIFO. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ Note backpressure will only be applied at an EBO transaction boundary.
+ \<63\> = Reserved.
+ \<62\> = Limit the draining of EBO Completion offloading buffer.
+ \<61\> = Limit the draining of EBO Non-posted offloading FIFO.
+ \<60\> = Limit the draining of EBO Posted offloading FIFO. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_bp_test5_s cn; */
+};
+typedef union bdk_pemx_bp_test5 bdk_pemx_bp_test5_t;
+
+static inline uint64_t BDK_PEMX_BP_TEST5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BP_TEST5(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000200ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BP_TEST5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BP_TEST5(a) bdk_pemx_bp_test5_t
+#define bustype_BDK_PEMX_BP_TEST5(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BP_TEST5(a) "PEMX_BP_TEST5"
+#define device_bar_BDK_PEMX_BP_TEST5(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BP_TEST5(a) (a)
+#define arguments_BDK_PEMX_BP_TEST5(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_bp_test6
+ *
+ * INTERNAL: PEM Backpressure Test Register 6
+ *
+ * This register is for diagnostic use only.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_bp_test6
+{
+ uint64_t u;
+ struct bdk_pemx_bp_test6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Reserved.
+ \<62\> = Limit the PSPI nonposted FIFO.
+ \<61\> = Reserved.
+ \<60\> = Reserved. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Reserved.
+ \<62\> = Limit the PSPI nonposted FIFO.
+ \<61\> = Reserved.
+ \<60\> = Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_bp_test6_s cn; */
+};
+typedef union bdk_pemx_bp_test6 bdk_pemx_bp_test6_t;
+
+static inline uint64_t BDK_PEMX_BP_TEST6(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_BP_TEST6(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000208ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_BP_TEST6", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_BP_TEST6(a) bdk_pemx_bp_test6_t
+#define bustype_BDK_PEMX_BP_TEST6(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_BP_TEST6(a) "PEMX_BP_TEST6"
+#define device_bar_BDK_PEMX_BP_TEST6(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_BP_TEST6(a) (a)
+#define arguments_BDK_PEMX_BP_TEST6(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_cfg
+ *
+ * PEM Application Configuration Register
+ * This register configures the PCIe application.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on cold reset.
+ */
+union bdk_pemx_cfg
+{
+ uint64_t u;
+ struct bdk_pemx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_cfg_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t pipe_grp_ptr : 3; /**< [ 7: 5](R/W/H) Each PEM brings in 24 lanes of RX Pipe.
+ Configures the PEM to point to the RX Pipe quad containing
+ Lane 0.
+ 0x0 = grp0 (lane 0).
+ 0x1 = grp1 (lane 4).
+ 0x2 = grp2 (lane 8).
+ 0x3 = grp3 (lane 12).
+ 0x4 = grp4 (lane 16).
+ 0x5 = grp5 (lane 20).
+ 0x6 - 0x7 = Reserved.
+
+ CN93XX configuration:
+ \<pre\>
+ PEM0: Configure to grp0 (QLM0/1/2/3).
+ PEM1: Configure to grp0 (QLM1).
+ Configure to grp1 (DLM4/5).
+ PEM2: Configure to grp0 (QLM3).
+ Configure to grp1 (DLM5).
+ Configure to grp2 (QLM6/7).
+ PEM3: Configure to grp0 (QLM2/3).
+ \</pre\> */
+ uint64_t pipe : 2; /**< [ 4: 3](R/W/H) Configures the PEM pipe sources.
+ 0x0 = Pipe 0.
+ 0x1 = Pipe 1.
+ 0x2 = Pipe 2.
+ 0x3 = Reserved.
+
+ CN93XX configuration:
+ \<pre\>
+ PEM0: Configure to Pipe 0 (QLM0/1/2/3).
+ PEM1: Configure to Pipe 0 (QLM1).
+ Configure to Pipe 1 (DLM4/5).
+ PEM2: Configure to Pipe 0 (QLM3).
+ Configure to Pipe 1 (DLM5).
+ Configure to Pipe 2 (QLM6/7).
+ PEM3: Configure to Pipe 0 (QLM2/3).
+ \</pre\> */
+ uint64_t lanes : 2; /**< [ 2: 1](R/W/H) Ties off RX Pipe for unused lanes.
+ 0x0 = 2 lanes.
+ 0x1 = 4 lanes.
+ 0x2 = 8 lanes.
+ 0x3 = 16 lanes.
+
+ CN93XX configuration:
+ \<pre\>
+ PEM0: Configure to 16 Lanes (QLM0/1/2/3).
+ Configure to 8 Lanes (QLM0/1).
+ Configure to 4 Lanes (QLM0).
+ PEM1: Configure to 4 Lanes (QLM1 or DLM4/5).
+ Configure to 2 Lanes (DLM4).
+ PEM2: Configure to 4 Lanes (QLM3).
+ Configure to 2 Lanes (DLM5).
+ Configure to 8 Lanes (QLM6/7).
+ Configure to 4 Lanes (QLM6).
+ PEM3: Configure to 8 Lanes (QLM2/3 or QLM6/7).
+ Configure to 4 Lanes (QLM 2 or QLM6).
+ \</pre\> */
+ uint64_t hostmd : 1; /**< [ 0: 0](R/W/H) Host mode.
+ 0 = PEM is configured to be an end point (EP mode).
+ 1 = PEM is configured to be a root complex (RC mode). */
+#else /* Word 0 - Little Endian */
+ uint64_t hostmd : 1; /**< [ 0: 0](R/W/H) Host mode.
+ 0 = PEM is configured to be an end point (EP mode).
+ 1 = PEM is configured to be a root complex (RC mode). */
+ uint64_t lanes : 2; /**< [ 2: 1](R/W/H) Ties off RX Pipe for unused lanes.
+ 0x0 = 2 lanes.
+ 0x1 = 4 lanes.
+ 0x2 = 8 lanes.
+ 0x3 = 16 lanes.
+
+ CN93XX configuration:
+ \<pre\>
+ PEM0: Configure to 16 Lanes (QLM0/1/2/3).
+ Configure to 8 Lanes (QLM0/1).
+ Configure to 4 Lanes (QLM0).
+ PEM1: Configure to 4 Lanes (QLM1 or DLM4/5).
+ Configure to 2 Lanes (DLM4).
+ PEM2: Configure to 4 Lanes (QLM3).
+ Configure to 2 Lanes (DLM5).
+ Configure to 8 Lanes (QLM6/7).
+ Configure to 4 Lanes (QLM6).
+ PEM3: Configure to 8 Lanes (QLM2/3 or QLM6/7).
+ Configure to 4 Lanes (QLM 2 or QLM6).
+ \</pre\> */
+ uint64_t pipe : 2; /**< [ 4: 3](R/W/H) Configures the PEM pipe sources.
+ 0x0 = Pipe 0.
+ 0x1 = Pipe 1.
+ 0x2 = Pipe 2.
+ 0x3 = Reserved.
+
+ CN93XX configuration:
+ \<pre\>
+ PEM0: Configure to Pipe 0 (QLM0/1/2/3).
+ PEM1: Configure to Pipe 0 (QLM1).
+ Configure to Pipe 1 (DLM4/5).
+ PEM2: Configure to Pipe 0 (QLM3).
+ Configure to Pipe 1 (DLM5).
+ Configure to Pipe 2 (QLM6/7).
+ PEM3: Configure to Pipe 0 (QLM2/3).
+ \</pre\> */
+ uint64_t pipe_grp_ptr : 3; /**< [ 7: 5](R/W/H) Each PEM brings in 24 lanes of RX Pipe.
+ Configures the PEM to point to the RX Pipe quad containing
+ Lane 0.
+ 0x0 = grp0 (lane 0).
+ 0x1 = grp1 (lane 4).
+ 0x2 = grp2 (lane 8).
+ 0x3 = grp3 (lane 12).
+ 0x4 = grp4 (lane 16).
+ 0x5 = grp5 (lane 20).
+ 0x6 - 0x7 = Reserved.
+
+ CN93XX configuration:
+ \<pre\>
+ PEM0: Configure to grp0 (QLM0/1/2/3).
+ PEM1: Configure to grp0 (QLM1).
+ Configure to grp1 (DLM4/5).
+ PEM2: Configure to grp0 (QLM3).
+ Configure to grp1 (DLM5).
+ Configure to grp2 (QLM6/7).
+ PEM3: Configure to grp0 (QLM2/3).
+ \</pre\> */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_cfg_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t lanes4 : 1; /**< [ 5: 5](R/W) Determines the number of lanes.
+ When set, the PEM is configured for a maximum of 4 lanes. When clear, the PEM is
+ configured for a maximum of 2 lanes. This value is used to set the maximum link width
+ field in the core's link capabilities register (CFG031) to indicate the maximum number of
+ lanes supported. Note that less lanes than the specified maximum can be configured for use
+ via the core's link control register (CFG032) negotiated link width field. */
+ uint64_t laneswap : 1; /**< [ 4: 4](R/W) Determines lane swapping. When set, lane swapping is
+ performed to/from the SerDes. When clear, no lane swapping is performed. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t md : 2; /**< [ 1: 0](R/W) Determines the speed.
+ 0x0 = Gen1 speed.
+ 0x1 = Gen2 speed.
+ 0x2 = Gen3 speed.
+ 0x3 = Gen3 speed. */
+#else /* Word 0 - Little Endian */
+ uint64_t md : 2; /**< [ 1: 0](R/W) Determines the speed.
+ 0x0 = Gen1 speed.
+ 0x1 = Gen2 speed.
+ 0x2 = Gen3 speed.
+ 0x3 = Gen3 speed. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t laneswap : 1; /**< [ 4: 4](R/W) Determines lane swapping. When set, lane swapping is
+ performed to/from the SerDes. When clear, no lane swapping is performed. */
+ uint64_t lanes4 : 1; /**< [ 5: 5](R/W) Determines the number of lanes.
+ When set, the PEM is configured for a maximum of 4 lanes. When clear, the PEM is
+ configured for a maximum of 2 lanes. This value is used to set the maximum link width
+ field in the core's link capabilities register (CFG031) to indicate the maximum number of
+ lanes supported. Note that less lanes than the specified maximum can be configured for use
+ via the core's link control register (CFG032) negotiated link width field. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_cfg_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t laneswap : 1; /**< [ 4: 4](R/W/H) Determines lane swapping. When set, lane swapping is
+ performed to/from the SerDes. When clear, no lane swapping is performed. */
+ uint64_t lanes8 : 1; /**< [ 3: 3](R/W/H) Determines the number of lanes.
+ When set, the PEM is configured for a maximum of 8 lanes. When clear, the PEM is
+ configured for a maximum of 4 lanes. This value is used to set the maximum link width
+ field in the core's link capabilities register (CFG031) to indicate the maximum number of
+ lanes supported. Note that less lanes than the specified maximum can be configured for use
+ via the core's link control register (CFG032) negotiated link width field. */
+ uint64_t reserved_2 : 1;
+ uint64_t md : 2; /**< [ 1: 0](R/W/H) Determines the speed.
+ 0x0 = Gen1 speed.
+ 0x1 = Gen2 speed.
+ 0x2 = Gen3 speed.
+ 0x3 = Gen3 speed. */
+#else /* Word 0 - Little Endian */
+ uint64_t md : 2; /**< [ 1: 0](R/W/H) Determines the speed.
+ 0x0 = Gen1 speed.
+ 0x1 = Gen2 speed.
+ 0x2 = Gen3 speed.
+ 0x3 = Gen3 speed. */
+ uint64_t reserved_2 : 1;
+ uint64_t lanes8 : 1; /**< [ 3: 3](R/W/H) Determines the number of lanes.
+ When set, the PEM is configured for a maximum of 8 lanes. When clear, the PEM is
+ configured for a maximum of 4 lanes. This value is used to set the maximum link width
+ field in the core's link capabilities register (CFG031) to indicate the maximum number of
+ lanes supported. Note that less lanes than the specified maximum can be configured for use
+ via the core's link control register (CFG032) negotiated link width field. */
+ uint64_t laneswap : 1; /**< [ 4: 4](R/W/H) Determines lane swapping. When set, lane swapping is
+ performed to/from the SerDes. When clear, no lane swapping is performed. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pemx_cfg_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t laneswap : 1; /**< [ 4: 4](R/W/H) Enables overwriting the value for lane swapping. The reset value is captured on
+ cold reset by the pin straps (see PEM()_STRAP[PILANESWAP]). When set, lane swapping is
+ performed to/from the SerDes. When clear, no lane swapping is performed. */
+ uint64_t lanes8 : 1; /**< [ 3: 3](R/W/H) Enables overwriting the value for the maximum number of lanes. The reset value
+ is captured on cold reset by the pin straps (see PEM()_STRAP[PILANES8]). When set, the
+ PEM is configured for a maximum of 8 lanes. When clear, the PEM is configured for a
+ maximum of 4 or 2 lanes. This value is used to set the maximum link width field in the
+ core's
+ link capabilities register (CFG031) to indicate the maximum number of lanes
+ supported. Note that less lanes than the specified maximum can be configured for use via
+ the core's link control register (CFG032) negotiated link width field. */
+ uint64_t hostmd : 1; /**< [ 2: 2](R/W/H) Enables overwriting the value for host mode. The reset value is captured on
+ cold reset by the pin straps. (See PEM()_STRAP[PIMODE]. The HOSTMD reset value is the
+ bit-wise AND of the PIMODE straps. When set, the PEM is configured to be a root complex.
+ When clear, the PEM is configured to be an end point. */
+ uint64_t md : 2; /**< [ 1: 0](R/W/H) Enables overwriting the value for speed. The reset value is captured on cold
+ reset by the pin straps (see PEM()_STRAP[PIMODE]). For a root complex configuration
+ that is not running at Gen3 speed, the HOSTMD bit of this register must be set when this
+ field is changed.
+ 0x0 = Gen1 speed.
+ 0x1 = Gen2 speed.
+ 0x2 = Gen3 speed.
+ 0x3 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t md : 2; /**< [ 1: 0](R/W/H) Enables overwriting the value for speed. The reset value is captured on cold
+ reset by the pin straps (see PEM()_STRAP[PIMODE]). For a root complex configuration
+ that is not running at Gen3 speed, the HOSTMD bit of this register must be set when this
+ field is changed.
+ 0x0 = Gen1 speed.
+ 0x1 = Gen2 speed.
+ 0x2 = Gen3 speed.
+ 0x3 = Reserved. */
+ uint64_t hostmd : 1; /**< [ 2: 2](R/W/H) Enables overwriting the value for host mode. The reset value is captured on
+ cold reset by the pin straps. (See PEM()_STRAP[PIMODE]. The HOSTMD reset value is the
+ bit-wise AND of the PIMODE straps. When set, the PEM is configured to be a root complex.
+ When clear, the PEM is configured to be an end point. */
+ uint64_t lanes8 : 1; /**< [ 3: 3](R/W/H) Enables overwriting the value for the maximum number of lanes. The reset value
+ is captured on cold reset by the pin straps (see PEM()_STRAP[PILANES8]). When set, the
+ PEM is configured for a maximum of 8 lanes. When clear, the PEM is configured for a
+ maximum of 4 or 2 lanes. This value is used to set the maximum link width field in the
+ core's
+ link capabilities register (CFG031) to indicate the maximum number of lanes
+ supported. Note that less lanes than the specified maximum can be configured for use via
+ the core's link control register (CFG032) negotiated link width field. */
+ uint64_t laneswap : 1; /**< [ 4: 4](R/W/H) Enables overwriting the value for lane swapping. The reset value is captured on
+ cold reset by the pin straps (see PEM()_STRAP[PILANESWAP]). When set, lane swapping is
+ performed to/from the SerDes. When clear, no lane swapping is performed. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_cfg bdk_pemx_cfg_t;
+
+static inline uint64_t BDK_PEMX_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000410ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000410ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000410ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000c8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CFG(a) bdk_pemx_cfg_t
+#define bustype_BDK_PEMX_CFG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CFG(a) "PEMX_CFG"
+#define device_bar_BDK_PEMX_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CFG(a) (a)
+#define arguments_BDK_PEMX_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_cfg_rd
+ *
+ * PEM Configuration Read Register
+ * This register allows read access to the configuration in the PCIe core, but is for
+ * legacy application use. PEM()_PF()_CS()_PFCFG() and PEM()_PF()_VF()_VFCFG() should
+ * typically be used instead.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_cfg_rd
+{
+ uint64_t u;
+ struct bdk_pemx_cfg_rd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 32; /**< [ 63: 32](R/W/H) Data. */
+ uint64_t addr : 32; /**< [ 31: 0](R/W/H) Address to read. A write to this register starts a read operation.
+ Following are the sub-fields of the ADDR field.
+
+ \<11:0\> The offset of the PCIe core CFG register being accessed. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 32; /**< [ 31: 0](R/W/H) Address to read. A write to this register starts a read operation.
+ Following are the sub-fields of the ADDR field.
+
+ \<11:0\> The offset of the PCIe core CFG register being accessed. */
+ uint64_t data : 32; /**< [ 63: 32](R/W/H) Data. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_cfg_rd_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 32; /**< [ 63: 32](R/W/H) Data. */
+ uint64_t addr : 32; /**< [ 31: 0](R/W) Address to read. A write to this register starts a read operation.
+ Following are the subfields of [ADDR].
+
+ \<31:30\> = Reserved. Must be zero.
+
+ \<29:22\> = The selected virtual function. Must be zero when \<17\> is
+ clear. Must be zero in RC mode.
+
+ \<21:18\> = The selected physical function. Must be zero in RC mode.
+
+ \<17\> = When clear, the write accesses the physical function. When set,
+ the write accesses the virtual function selected by \<29:22\>.
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode.
+
+ \<16\> = When clear, the write is the same as a config space write received
+ from external. When set, the write can modify more fields than
+ an external write could (i.e. configuration mask register).
+
+ Corresponds to the CS2 field in Byte2 of the EEPROM.
+
+ \<15\> = Must be 1.
+
+ \<14:12\> = Reserved. Must be zero.
+
+ \<11:0\> = Selects the PCIe config space register being written in the
+ function.
+
+ Internal:
+ \<16\> = asserts dbi_cs2 at PCIe core.
+ \<17\> = dbi_vfunc_active to the core.
+ \<29:22\> = dbi_vfunc_num to the core. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 32; /**< [ 31: 0](R/W) Address to read. A write to this register starts a read operation.
+ Following are the subfields of [ADDR].
+
+ \<31:30\> = Reserved. Must be zero.
+
+ \<29:22\> = The selected virtual function. Must be zero when \<17\> is
+ clear. Must be zero in RC mode.
+
+ \<21:18\> = The selected physical function. Must be zero in RC mode.
+
+ \<17\> = When clear, the write accesses the physical function. When set,
+ the write accesses the virtual function selected by \<29:22\>.
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode.
+
+ \<16\> = When clear, the write is the same as a config space write received
+ from external. When set, the write can modify more fields than
+ an external write could (i.e. configuration mask register).
+
+ Corresponds to the CS2 field in Byte2 of the EEPROM.
+
+ \<15\> = Must be 1.
+
+ \<14:12\> = Reserved. Must be zero.
+
+ \<11:0\> = Selects the PCIe config space register being written in the
+ function.
+
+ Internal:
+ \<16\> = asserts dbi_cs2 at PCIe core.
+ \<17\> = dbi_vfunc_active to the core.
+ \<29:22\> = dbi_vfunc_num to the core. */
+ uint64_t data : 32; /**< [ 63: 32](R/W/H) Data. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_pemx_cfg_rd_s cn81xx; */
+ /* struct bdk_pemx_cfg_rd_s cn88xx; */
+ struct bdk_pemx_cfg_rd_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 32; /**< [ 63: 32](R/W/H) Data. */
+ uint64_t addr : 32; /**< [ 31: 0](R/W) Address to read. A write to this register starts a read operation.
+ Following are the subfields of the ADDR field.
+
+ \<31:24\> = Reserved. Must be zero.
+
+ \<23\> = When clear, the read accesses the physical function. When set,
+ the read accesses the virtual function selected by \<22:12\>.
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode.
+
+ \<22:18\> = Reserved. Must be zero.
+
+ \<17:12\> = The selected virtual function. Must be zero when \<23\> is
+ clear. Must be zero in RC mode.
+
+ \<11:0\> = Selects the PCIe config space register being read in the
+ function.
+
+ Internal:
+ \<31\> = asserts dbi_cs2 at PCIe core.
+ \<23\> = dbi_vfunc_active to the core.
+ \<22:12\> = dbi_vfunc_num to the core. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 32; /**< [ 31: 0](R/W) Address to read. A write to this register starts a read operation.
+ Following are the subfields of the ADDR field.
+
+ \<31:24\> = Reserved. Must be zero.
+
+ \<23\> = When clear, the read accesses the physical function. When set,
+ the read accesses the virtual function selected by \<22:12\>.
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode.
+
+ \<22:18\> = Reserved. Must be zero.
+
+ \<17:12\> = The selected virtual function. Must be zero when \<23\> is
+ clear. Must be zero in RC mode.
+
+ \<11:0\> = Selects the PCIe config space register being read in the
+ function.
+
+ Internal:
+ \<31\> = asserts dbi_cs2 at PCIe core.
+ \<23\> = dbi_vfunc_active to the core.
+ \<22:12\> = dbi_vfunc_num to the core. */
+ uint64_t data : 32; /**< [ 63: 32](R/W/H) Data. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_cfg_rd bdk_pemx_cfg_rd_t;
+
+static inline uint64_t BDK_PEMX_CFG_RD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CFG_RD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000030ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000030ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000030ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000020ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CFG_RD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CFG_RD(a) bdk_pemx_cfg_rd_t
+#define bustype_BDK_PEMX_CFG_RD(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CFG_RD(a) "PEMX_CFG_RD"
+#define device_bar_BDK_PEMX_CFG_RD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CFG_RD(a) (a)
+#define arguments_BDK_PEMX_CFG_RD(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_cfg_tbl#
+ *
+ * PEM Configuration Table Registers
+ * Software managed table with list of config registers to update when
+ * PEM()_CTL_STATUS[LNK_ENB] is written with a 1. Typically the last
+ * table action should be to set PEM()_CTL_STATUS[SCR_DONE].
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on cold reset.
+ */
+union bdk_pemx_cfg_tblx
+{
+ uint64_t u;
+ struct bdk_pemx_cfg_tblx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 32; /**< [ 63: 32](R/W) Data to write. */
+ uint64_t broadcast : 1; /**< [ 31: 31](R/W) When set, [PF] field is ignored and the write will occur to every Physical Function.
+ When set and [VF_ACTIVE] is set, write will occur to VF0 within every Physical Function. */
+ uint64_t reserved_30 : 1;
+ uint64_t vf : 8; /**< [ 29: 22](R/W) The selected virtual function. Must be zero when [VF_ACTIVE] is clear, or when configured
+ for RC mode. */
+ uint64_t pf : 4; /**< [ 21: 18](R/W) Physical function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t vf_active : 1; /**< [ 17: 17](R/W) VF active.
+ 0 = Write accesses the physical function.
+ 1 = Write accesses the virtual function selected by [VF] belonging to [PF].
+
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode. */
+ uint64_t shadow : 1; /**< [ 16: 16](R/W) Shadow space.
+ 0 = The destination CSR is the standard PCI configuration write register.
+ This may write WRSL fields.
+ 1 = The destination is the shadow CSR space, e.g. PCIEEP_BAR0_MASKL. */
+ uint64_t wmask : 4; /**< [ 15: 12](R/W) Byte mask to apply when writing data. If set, the corresponding byte will be written. */
+ uint64_t offset : 12; /**< [ 11: 0](R/W) Selects the PCIe config space register being written in the function. */
+#else /* Word 0 - Little Endian */
+ uint64_t offset : 12; /**< [ 11: 0](R/W) Selects the PCIe config space register being written in the function. */
+ uint64_t wmask : 4; /**< [ 15: 12](R/W) Byte mask to apply when writing data. If set, the corresponding byte will be written. */
+ uint64_t shadow : 1; /**< [ 16: 16](R/W) Shadow space.
+ 0 = The destination CSR is the standard PCI configuration write register.
+ This may write WRSL fields.
+ 1 = The destination is the shadow CSR space, e.g. PCIEEP_BAR0_MASKL. */
+ uint64_t vf_active : 1; /**< [ 17: 17](R/W) VF active.
+ 0 = Write accesses the physical function.
+ 1 = Write accesses the virtual function selected by [VF] belonging to [PF].
+
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode. */
+ uint64_t pf : 4; /**< [ 21: 18](R/W) Physical function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t vf : 8; /**< [ 29: 22](R/W) The selected virtual function. Must be zero when [VF_ACTIVE] is clear, or when configured
+ for RC mode. */
+ uint64_t reserved_30 : 1;
+ uint64_t broadcast : 1; /**< [ 31: 31](R/W) When set, [PF] field is ignored and the write will occur to every Physical Function.
+ When set and [VF_ACTIVE] is set, write will occur to VF0 within every Physical Function. */
+ uint64_t data : 32; /**< [ 63: 32](R/W) Data to write. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_cfg_tblx_s cn; */
+};
+typedef union bdk_pemx_cfg_tblx bdk_pemx_cfg_tblx_t;
+
+static inline uint64_t BDK_PEMX_CFG_TBLX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CFG_TBLX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1023)))
+ return 0x8e0000002000ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3ff);
+ __bdk_csr_fatal("PEMX_CFG_TBLX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CFG_TBLX(a,b) bdk_pemx_cfg_tblx_t
+#define bustype_BDK_PEMX_CFG_TBLX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CFG_TBLX(a,b) "PEMX_CFG_TBLX"
+#define device_bar_BDK_PEMX_CFG_TBLX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CFG_TBLX(a,b) (a)
+#define arguments_BDK_PEMX_CFG_TBLX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_cfg_tbl_size
+ *
+ * PEM Configuration Table Size Register
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on cold reset.
+ */
+union bdk_pemx_cfg_tbl_size
+{
+ uint64_t u;
+ struct bdk_pemx_cfg_tbl_size_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t size : 11; /**< [ 10: 0](R/W) The number of valid entries in PEM()_CFG_TBL(). When hardware plays out the
+ PEM()_CFG_TBL() table, it will read PEM()_CFG_TBL() entries 0x0 through
+ [SIZE]-1, or take no action if [SIZE] is 0x0.
+
+ Software, before rewriting PEM()_CFG_TBL(), should clear [SIZE], write all of
+ the desired entries, then write the [SIZE] with the number of written entries. */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 11; /**< [ 10: 0](R/W) The number of valid entries in PEM()_CFG_TBL(). When hardware plays out the
+ PEM()_CFG_TBL() table, it will read PEM()_CFG_TBL() entries 0x0 through
+ [SIZE]-1, or take no action if [SIZE] is 0x0.
+
+ Software, before rewriting PEM()_CFG_TBL(), should clear [SIZE], write all of
+ the desired entries, then write the [SIZE] with the number of written entries. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_cfg_tbl_size_s cn; */
+};
+typedef union bdk_pemx_cfg_tbl_size bdk_pemx_cfg_tbl_size_t;
+
+static inline uint64_t BDK_PEMX_CFG_TBL_SIZE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CFG_TBL_SIZE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000218ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CFG_TBL_SIZE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CFG_TBL_SIZE(a) bdk_pemx_cfg_tbl_size_t
+#define bustype_BDK_PEMX_CFG_TBL_SIZE(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CFG_TBL_SIZE(a) "PEMX_CFG_TBL_SIZE"
+#define device_bar_BDK_PEMX_CFG_TBL_SIZE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CFG_TBL_SIZE(a) (a)
+#define arguments_BDK_PEMX_CFG_TBL_SIZE(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_cfg_wr
+ *
+ * PEM Configuration Write Register
+ * This register allows write access to the configuration in the PCIe core, but is for
+ * legacy application use. PEM()_PF()_CS()_PFCFG() and PEM()_PF()_VF()_VFCFG() should
+ * typically be used instead.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_cfg_wr
+{
+ uint64_t u;
+ struct bdk_pemx_cfg_wr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 32; /**< [ 63: 32](R/W/H) Data to write. A write to this register starts a write operation. */
+ uint64_t addr : 32; /**< [ 31: 0](R/W/H) Address to write. A write to this register starts a write operation.
+ Following are the sub-fields of the ADDR field.
+
+ \<31\> When set, asserts dbi_cs2 at PCIe core.
+ \<11:0\> The offset of the PCIe core CFG register being accessed. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 32; /**< [ 31: 0](R/W/H) Address to write. A write to this register starts a write operation.
+ Following are the sub-fields of the ADDR field.
+
+ \<31\> When set, asserts dbi_cs2 at PCIe core.
+ \<11:0\> The offset of the PCIe core CFG register being accessed. */
+ uint64_t data : 32; /**< [ 63: 32](R/W/H) Data to write. A write to this register starts a write operation. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_cfg_wr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 32; /**< [ 63: 32](R/W) Data to write. A write to this register starts a write operation. */
+ uint64_t addr : 32; /**< [ 31: 0](R/W) Address to write. A write to this register starts a write operation.
+ Following are the subfields of [ADDR].
+
+ \<31:30\> = Reserved. Must be zero.
+
+ \<29:22\> = The selected virtual function. Must be zero when \<17\> is
+ clear. Must be zero in RC mode.
+
+ \<21:18\> = The selected physical function. Must be zero in RC mode.
+
+ \<17\> = When clear, the write accesses the physical function. When set,
+ the write accesses the virtual function selected by \<29:22\>.
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode.
+
+ \<16\> = When clear, the write is the same as a config space write received
+ from external. When set, the write can modify more fields than
+ an external write could (i.e. configuration mask register).
+
+ Corresponds to the CS2 field in Byte2 of the EEPROM.
+
+ \<15\> = Must be 1.
+
+ \<14:12\> = Reserved. Must be zero.
+
+ \<11:0\> = Selects the PCIe config space register being written in the
+ function.
+
+ Internal:
+ \<16\> = asserts dbi_cs2 at PCIe core.
+ \<17\> = dbi_vfunc_active to the core.
+ \<29:22\> = dbi_vfunc_num to the core. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 32; /**< [ 31: 0](R/W) Address to write. A write to this register starts a write operation.
+ Following are the subfields of [ADDR].
+
+ \<31:30\> = Reserved. Must be zero.
+
+ \<29:22\> = The selected virtual function. Must be zero when \<17\> is
+ clear. Must be zero in RC mode.
+
+ \<21:18\> = The selected physical function. Must be zero in RC mode.
+
+ \<17\> = When clear, the write accesses the physical function. When set,
+ the write accesses the virtual function selected by \<29:22\>.
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode.
+
+ \<16\> = When clear, the write is the same as a config space write received
+ from external. When set, the write can modify more fields than
+ an external write could (i.e. configuration mask register).
+
+ Corresponds to the CS2 field in Byte2 of the EEPROM.
+
+ \<15\> = Must be 1.
+
+ \<14:12\> = Reserved. Must be zero.
+
+ \<11:0\> = Selects the PCIe config space register being written in the
+ function.
+
+ Internal:
+ \<16\> = asserts dbi_cs2 at PCIe core.
+ \<17\> = dbi_vfunc_active to the core.
+ \<29:22\> = dbi_vfunc_num to the core. */
+ uint64_t data : 32; /**< [ 63: 32](R/W) Data to write. A write to this register starts a write operation. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_pemx_cfg_wr_s cn81xx; */
+ /* struct bdk_pemx_cfg_wr_s cn88xx; */
+ struct bdk_pemx_cfg_wr_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 32; /**< [ 63: 32](R/W) Data to write. A write to this register starts a write operation. */
+ uint64_t addr : 32; /**< [ 31: 0](R/W) Address to write. A write to this register starts a write operation.
+ Following are the subfields of the ADDR field.
+
+ \<31\> = When clear, the write is the same as a config space write received
+ from external. When set, the write can modify more fields than
+ an external write could (i.e. configuration mask register).
+
+ Corresponds to the CS2 field in Byte2 of the EEPROM.
+
+ \<30:24\> = Reserved. Must be zero.
+
+ \<23\> = When clear, the write accesses the physical function. When set,
+ the write accesses the virtual function selected by \<22:12\>.
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode.
+
+ \<22:18\> = Reserved. Must be zero.
+
+ \<17:12\> = The selected virtual function. Must be zero when \<23\> is
+ clear. Must be zero in RC mode.
+
+ \<11:0\> = Selects the PCIe config space register being written in the
+ function.
+
+ Internal:
+ \<31\> = asserts dbi_cs2 at PCIe core.
+ \<23\> = dbi_vfunc_active to the core.
+ \<22:12\> = dbi_vfunc_num to the core. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 32; /**< [ 31: 0](R/W) Address to write. A write to this register starts a write operation.
+ Following are the subfields of the ADDR field.
+
+ \<31\> = When clear, the write is the same as a config space write received
+ from external. When set, the write can modify more fields than
+ an external write could (i.e. configuration mask register).
+
+ Corresponds to the CS2 field in Byte2 of the EEPROM.
+
+ \<30:24\> = Reserved. Must be zero.
+
+ \<23\> = When clear, the write accesses the physical function. When set,
+ the write accesses the virtual function selected by \<22:12\>.
+ Must be zero when SR-IOV is not used in the physical function.
+ Must be zero in RC mode.
+
+ \<22:18\> = Reserved. Must be zero.
+
+ \<17:12\> = The selected virtual function. Must be zero when \<23\> is
+ clear. Must be zero in RC mode.
+
+ \<11:0\> = Selects the PCIe config space register being written in the
+ function.
+
+ Internal:
+ \<31\> = asserts dbi_cs2 at PCIe core.
+ \<23\> = dbi_vfunc_active to the core.
+ \<22:12\> = dbi_vfunc_num to the core. */
+ uint64_t data : 32; /**< [ 63: 32](R/W) Data to write. A write to this register starts a write operation. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_cfg_wr bdk_pemx_cfg_wr_t;
+
+static inline uint64_t BDK_PEMX_CFG_WR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CFG_WR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000028ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000018ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CFG_WR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CFG_WR(a) bdk_pemx_cfg_wr_t
+#define bustype_BDK_PEMX_CFG_WR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CFG_WR(a) "PEMX_CFG_WR"
+#define device_bar_BDK_PEMX_CFG_WR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CFG_WR(a) (a)
+#define arguments_BDK_PEMX_CFG_WR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_clk_en
+ *
+ * PEM Clock Enable Register
+ * This register contains the clock enable for CSCLK and PCE_CLK.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on cold reset.
+ */
+union bdk_pemx_clk_en
+{
+ uint64_t u;
+ struct bdk_pemx_clk_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t pemc_pceclkx_force : 1; /**< [ 4: 4](R/W) When set, the pclk is forced on at all times to the PEM core MAC memories.
+ When clear, the pclk to the PEM core MAC memories can be gated in hardware. */
+ uint64_t pemc_macclk_force : 1; /**< [ 3: 3](R/W) When set, aux_clk & radm_clk are forced on at all times to the PEM core MAC.
+ When clear, aux_clk & radm_clk can be gated by the PEM core MAC. */
+ uint64_t reserved_0_2 : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_2 : 3;
+ uint64_t pemc_macclk_force : 1; /**< [ 3: 3](R/W) When set, aux_clk & radm_clk are forced on at all times to the PEM core MAC.
+ When clear, aux_clk & radm_clk can be gated by the PEM core MAC. */
+ uint64_t pemc_pceclkx_force : 1; /**< [ 4: 4](R/W) When set, the pclk is forced on at all times to the PEM core MAC memories.
+ When clear, the pclk to the PEM core MAC memories can be gated in hardware. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_clk_en_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t pemc_pceclkx_force : 1; /**< [ 4: 4](R/W) When set, the pclk is forced on at all times to the PEM core MAC memories.
+ When clear, the pclk to the PEM core MAC memories can be gated in hardware. */
+ uint64_t pemc_macclk_force : 1; /**< [ 3: 3](R/W) When set, aux_clk & radm_clk are forced on at all times to the PEM core MAC.
+ When clear, aux_clk & radm_clk can be gated by the PEM core MAC. */
+ uint64_t pceclk_gate : 1; /**< [ 2: 2](R/W) When set, PCE_CLK is gated off in PEM core.
+ When clear, PCE_CLK is enabled in PEM core. */
+ uint64_t pemc_csclk_gate : 1; /**< [ 1: 1](R/W) When set, SCLK is gated off in PEM core.
+ When clear, SCLK is enabled in PEM core. */
+ uint64_t pemm_csclk_force : 1; /**< [ 0: 0](R/W) When set, CSCLK is forced on at all times in PEM main.
+ When clear, CSCLK gating in PEM main is controlled by hardware. */
+#else /* Word 0 - Little Endian */
+ uint64_t pemm_csclk_force : 1; /**< [ 0: 0](R/W) When set, CSCLK is forced on at all times in PEM main.
+ When clear, CSCLK gating in PEM main is controlled by hardware. */
+ uint64_t pemc_csclk_gate : 1; /**< [ 1: 1](R/W) When set, SCLK is gated off in PEM core.
+ When clear, SCLK is enabled in PEM core. */
+ uint64_t pceclk_gate : 1; /**< [ 2: 2](R/W) When set, PCE_CLK is gated off in PEM core.
+ When clear, PCE_CLK is enabled in PEM core. */
+ uint64_t pemc_macclk_force : 1; /**< [ 3: 3](R/W) When set, aux_clk & radm_clk are forced on at all times to the PEM core MAC.
+ When clear, aux_clk & radm_clk can be gated by the PEM core MAC. */
+ uint64_t pemc_pceclkx_force : 1; /**< [ 4: 4](R/W) When set, the pclk is forced on at all times to the PEM core MAC memories.
+ When clear, the pclk to the PEM core MAC memories can be gated in hardware. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_clk_en_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t pceclk_gate : 1; /**< [ 1: 1](R/W/H) When set, PCE_CLK is gated off. When clear, PCE_CLK is enabled.
+ Software should set this bit when the PEM is in reset or otherwise not
+ being used in order to reduce power. */
+ uint64_t csclk_gate : 1; /**< [ 0: 0](R/W/H) When set, CSCLK is gated off. When clear, CSCLK is enabled.
+ Software should set this bit when the PEM is in reset or otherwise not
+ being used in order to reduce power. */
+#else /* Word 0 - Little Endian */
+ uint64_t csclk_gate : 1; /**< [ 0: 0](R/W/H) When set, CSCLK is gated off. When clear, CSCLK is enabled.
+ Software should set this bit when the PEM is in reset or otherwise not
+ being used in order to reduce power. */
+ uint64_t pceclk_gate : 1; /**< [ 1: 1](R/W/H) When set, PCE_CLK is gated off. When clear, PCE_CLK is enabled.
+ Software should set this bit when the PEM is in reset or otherwise not
+ being used in order to reduce power. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_clk_en_cn81xx cn88xx; */
+ struct bdk_pemx_clk_en_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t pceclk_gate : 1; /**< [ 1: 1](R/W/H) When set, PCE_CLK is gated off. When clear, PCE_CLK is enabled.
+ PEM0 & PEM2 will come up with clocks disabled when configured as
+ an RC i.e. PEM()_STRAP[PIMODE] set to 0x3.
+ PEM1 & PEM3 always come up with clocks disabled. */
+ uint64_t csclk_gate : 1; /**< [ 0: 0](R/W/H) When set, ECLK is gated off. When clear, ECLK is enabled.
+ PEM0 & PEM2 will come up with clocks disabled when configured as
+ an RC i.e. PEM()_STRAP[PIMODE] set to 0x3.
+ PEM1 & PEM3 always come up with clocks disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t csclk_gate : 1; /**< [ 0: 0](R/W/H) When set, ECLK is gated off. When clear, ECLK is enabled.
+ PEM0 & PEM2 will come up with clocks disabled when configured as
+ an RC i.e. PEM()_STRAP[PIMODE] set to 0x3.
+ PEM1 & PEM3 always come up with clocks disabled. */
+ uint64_t pceclk_gate : 1; /**< [ 1: 1](R/W/H) When set, PCE_CLK is gated off. When clear, PCE_CLK is enabled.
+ PEM0 & PEM2 will come up with clocks disabled when configured as
+ an RC i.e. PEM()_STRAP[PIMODE] set to 0x3.
+ PEM1 & PEM3 always come up with clocks disabled. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_clk_en bdk_pemx_clk_en_t;
+
+static inline uint64_t BDK_PEMX_CLK_EN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CLK_EN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000400ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000400ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000400ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000b8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CLK_EN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CLK_EN(a) bdk_pemx_clk_en_t
+#define bustype_BDK_PEMX_CLK_EN(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CLK_EN(a) "PEMX_CLK_EN"
+#define device_bar_BDK_PEMX_CLK_EN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CLK_EN(a) (a)
+#define arguments_BDK_PEMX_CLK_EN(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_cmerge_merged_pc
+ *
+ * PEM Merge Completions Merged Performance Counter Register
+ * This register is a performance counter of how many completions merged within the
+ * outbound completion merge units.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_cmerge_merged_pc
+{
+ uint64_t u;
+ struct bdk_pemx_cmerge_merged_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cmerge_merged : 64; /**< [ 63: 0](R/W/H) Each NCBO completion operation that merges with a previous
+ read will increment this count. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmerge_merged : 64; /**< [ 63: 0](R/W/H) Each NCBO completion operation that merges with a previous
+ read will increment this count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_cmerge_merged_pc_s cn; */
+};
+typedef union bdk_pemx_cmerge_merged_pc bdk_pemx_cmerge_merged_pc_t;
+
+static inline uint64_t BDK_PEMX_CMERGE_MERGED_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CMERGE_MERGED_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001a8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CMERGE_MERGED_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CMERGE_MERGED_PC(a) bdk_pemx_cmerge_merged_pc_t
+#define bustype_BDK_PEMX_CMERGE_MERGED_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CMERGE_MERGED_PC(a) "PEMX_CMERGE_MERGED_PC"
+#define device_bar_BDK_PEMX_CMERGE_MERGED_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CMERGE_MERGED_PC(a) (a)
+#define arguments_BDK_PEMX_CMERGE_MERGED_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_cmerge_received_pc
+ *
+ * PEM Merge Completions Received Performance Counter Register
+ * This register reports the number of reads that enter the outbound read merge unit.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_cmerge_received_pc
+{
+ uint64_t u;
+ struct bdk_pemx_cmerge_received_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cmerge_reads : 64; /**< [ 63: 0](R/W/H) Each NCBO completion operation increments this count. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmerge_reads : 64; /**< [ 63: 0](R/W/H) Each NCBO completion operation increments this count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_cmerge_received_pc_s cn; */
+};
+typedef union bdk_pemx_cmerge_received_pc bdk_pemx_cmerge_received_pc_t;
+
+static inline uint64_t BDK_PEMX_CMERGE_RECEIVED_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CMERGE_RECEIVED_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001a0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CMERGE_RECEIVED_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CMERGE_RECEIVED_PC(a) bdk_pemx_cmerge_received_pc_t
+#define bustype_BDK_PEMX_CMERGE_RECEIVED_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CMERGE_RECEIVED_PC(a) "PEMX_CMERGE_RECEIVED_PC"
+#define device_bar_BDK_PEMX_CMERGE_RECEIVED_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CMERGE_RECEIVED_PC(a) (a)
+#define arguments_BDK_PEMX_CMERGE_RECEIVED_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_const_acc
+ *
+ * PEM Constant ACC Register
+ * Contains contant attributes related to the PEM ACC tables.
+ */
+union bdk_pemx_const_acc
+{
+ uint64_t u;
+ struct bdk_pemx_const_acc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bits_huge : 16; /**< [ 63: 48](RO) Number of bits in the address aperture for a huge-sized ACC table entry. See
+ PEM()_REG_HUGE()_ACC. */
+ uint64_t num_huge : 16; /**< [ 47: 32](RO) Number of huge-sized ACC table entries. See PEM()_REG_HUGE()_ACC. */
+ uint64_t bits_norm : 16; /**< [ 31: 16](RO) Number of bits in the address aperture for a normal-sized ACC table entry. See
+ PEM()_REG_NORM()_ACC. */
+ uint64_t num_norm : 16; /**< [ 15: 0](RO) Number of normal-sized ACC table entries. See PEM()_REG_NORM()_ACC. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_norm : 16; /**< [ 15: 0](RO) Number of normal-sized ACC table entries. See PEM()_REG_NORM()_ACC. */
+ uint64_t bits_norm : 16; /**< [ 31: 16](RO) Number of bits in the address aperture for a normal-sized ACC table entry. See
+ PEM()_REG_NORM()_ACC. */
+ uint64_t num_huge : 16; /**< [ 47: 32](RO) Number of huge-sized ACC table entries. See PEM()_REG_HUGE()_ACC. */
+ uint64_t bits_huge : 16; /**< [ 63: 48](RO) Number of bits in the address aperture for a huge-sized ACC table entry. See
+ PEM()_REG_HUGE()_ACC. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_const_acc_s cn; */
+};
+typedef union bdk_pemx_const_acc bdk_pemx_const_acc_t;
+
+static inline uint64_t BDK_PEMX_CONST_ACC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CONST_ACC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000210ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CONST_ACC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CONST_ACC(a) bdk_pemx_const_acc_t
+#define bustype_BDK_PEMX_CONST_ACC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CONST_ACC(a) "PEMX_CONST_ACC"
+#define device_bar_BDK_PEMX_CONST_ACC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CONST_ACC(a) (a)
+#define arguments_BDK_PEMX_CONST_ACC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_cpl_lut_valid
+ *
+ * PEM Completion Lookup Table Valid Register
+ * This register specifies how many tags are outstanding for reads.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_cpl_lut_valid
+{
+ uint64_t u;
+ struct bdk_pemx_cpl_lut_valid_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tag : 64; /**< [ 63: 0](RO/H) Bit vector set corresponds to an outstanding tag. */
+#else /* Word 0 - Little Endian */
+ uint64_t tag : 64; /**< [ 63: 0](RO/H) Bit vector set corresponds to an outstanding tag. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_cpl_lut_valid_s cn8; */
+ struct bdk_pemx_cpl_lut_valid_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t tag : 10; /**< [ 9: 0](RO/H) Number of read tags outstanding for outbound reads on PCIe. */
+#else /* Word 0 - Little Endian */
+ uint64_t tag : 10; /**< [ 9: 0](RO/H) Number of read tags outstanding for outbound reads on PCIe. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_cpl_lut_valid bdk_pemx_cpl_lut_valid_t;
+
+static inline uint64_t BDK_PEMX_CPL_LUT_VALID(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CPL_LUT_VALID(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000098ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000098ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000098ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000038ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CPL_LUT_VALID", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CPL_LUT_VALID(a) bdk_pemx_cpl_lut_valid_t
+#define bustype_BDK_PEMX_CPL_LUT_VALID(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CPL_LUT_VALID(a) "PEMX_CPL_LUT_VALID"
+#define device_bar_BDK_PEMX_CPL_LUT_VALID(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CPL_LUT_VALID(a) (a)
+#define arguments_BDK_PEMX_CPL_LUT_VALID(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_csclk_active_pc
+ *
+ * PEM Conditional Coprocessor Clock Counter Register
+ * This register counts conditional clocks for power management.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_csclk_active_pc
+{
+ uint64_t u;
+ struct bdk_pemx_csclk_active_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Count of conditional coprocessor-clock cycles since reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Count of conditional coprocessor-clock cycles since reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_csclk_active_pc_s cn; */
+};
+typedef union bdk_pemx_csclk_active_pc bdk_pemx_csclk_active_pc_t;
+
+static inline uint64_t BDK_PEMX_CSCLK_ACTIVE_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CSCLK_ACTIVE_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000050ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CSCLK_ACTIVE_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CSCLK_ACTIVE_PC(a) bdk_pemx_csclk_active_pc_t
+#define bustype_BDK_PEMX_CSCLK_ACTIVE_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CSCLK_ACTIVE_PC(a) "PEMX_CSCLK_ACTIVE_PC"
+#define device_bar_BDK_PEMX_CSCLK_ACTIVE_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CSCLK_ACTIVE_PC(a) (a)
+#define arguments_BDK_PEMX_CSCLK_ACTIVE_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ctl_status
+ *
+ * PEM Control Status Register
+ * This is a general control and status register of the PEM.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC cold reset. Note this differs from PEM()_CTL_STATUS2's reset.
+ */
+union bdk_pemx_ctl_status
+{
+ uint64_t u;
+ struct bdk_pemx_ctl_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t inb_grant_limit : 3; /**< [ 54: 52](R/W) The number of inbound TLPs allowed in flight in PEM. */
+ uint64_t rd_flt : 1; /**< [ 51: 51](RO) Read fault.
+
+ 0 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will
+ return to the NCB/cores all-ones and non-fault.
+ This is compatible with CN88XX pass 1.0.
+ 1 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will
+ return to the NCB/cores all-ones and fault. In the case of a read by a core,
+ this fault will cause a synchronous external abort in the core.
+
+ Config reads which are terminated by PCIe with an error (UR, etc), or config reads
+ when the PEM is disabled or link is down, will return to the NCB/cores all-ones and
+ non-fault regardless of this bit. */
+ uint64_t inv_dpar : 1; /**< [ 50: 50](R/W) Invert the generated parity to be written into the most significant data queue buffer RAM
+ block to force a parity error when it is later read. */
+ uint64_t reserved_32_49 : 18;
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t pm_xpme : 1; /**< [ 10: 10](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_pme port. EP mode. */
+ uint64_t ob_p_cmd : 1; /**< [ 9: 9](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core outband_pwrup_cmd
+ port. EP mode. */
+ uint64_t clk_req_n : 1; /**< [ 8: 8](R/W) Indicates that the application logic is ready to have reference clock
+ removed. The application should set this signal when it is ready to
+ have reference clock removed trhough either L1 PM Sub-states or L1 CPM.
+ If the application does not want to remove reference clock it should
+ set this clear this bit.
+
+ Internal:
+ Controls app_clk_req_n input to the DW core. */
+ uint64_t rdy_entr_l23 : 1; /**< [ 7: 7](R/W) Application ready to enter L23. Indication that the application is
+ ready to enter the L23 state. This provides control of the L23 entry
+ (in case certain tasks must be performed before going into L23).
+ The Mac delays sending PM_Enter_L23 (in response to PM_Turn_Off)
+ until this signal becomes active. When this signal has been asserted
+ by the application, it must be kept asserted until L2 entry has completed
+
+ Internal:
+ Controls app_ready_entr_l23 input to the DW core. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t reserved_3 : 1;
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+#else /* Word 0 - Little Endian */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t reserved_3 : 1;
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t rdy_entr_l23 : 1; /**< [ 7: 7](R/W) Application ready to enter L23. Indication that the application is
+ ready to enter the L23 state. This provides control of the L23 entry
+ (in case certain tasks must be performed before going into L23).
+ The Mac delays sending PM_Enter_L23 (in response to PM_Turn_Off)
+ until this signal becomes active. When this signal has been asserted
+ by the application, it must be kept asserted until L2 entry has completed
+
+ Internal:
+ Controls app_ready_entr_l23 input to the DW core. */
+ uint64_t clk_req_n : 1; /**< [ 8: 8](R/W) Indicates that the application logic is ready to have reference clock
+ removed. The application should set this signal when it is ready to
+ have reference clock removed trhough either L1 PM Sub-states or L1 CPM.
+ If the application does not want to remove reference clock it should
+ set this clear this bit.
+
+ Internal:
+ Controls app_clk_req_n input to the DW core. */
+ uint64_t ob_p_cmd : 1; /**< [ 9: 9](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core outband_pwrup_cmd
+ port. EP mode. */
+ uint64_t pm_xpme : 1; /**< [ 10: 10](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_pme port. EP mode. */
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t reserved_32_49 : 18;
+ uint64_t inv_dpar : 1; /**< [ 50: 50](R/W) Invert the generated parity to be written into the most significant data queue buffer RAM
+ block to force a parity error when it is later read. */
+ uint64_t rd_flt : 1; /**< [ 51: 51](RO) Read fault.
+
+ 0 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will
+ return to the NCB/cores all-ones and non-fault.
+ This is compatible with CN88XX pass 1.0.
+ 1 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will
+ return to the NCB/cores all-ones and fault. In the case of a read by a core,
+ this fault will cause a synchronous external abort in the core.
+
+ Config reads which are terminated by PCIe with an error (UR, etc), or config reads
+ when the PEM is disabled or link is down, will return to the NCB/cores all-ones and
+ non-fault regardless of this bit. */
+ uint64_t inb_grant_limit : 3; /**< [ 54: 52](R/W) The number of inbound TLPs allowed in flight in PEM. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_ctl_status_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t rd_flt : 1; /**< [ 51: 51](RO) Read fault.
+
+ 0 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will
+ return to the NCB/cores all-ones and non-fault.
+ This is compatible with CN88XX pass 1.0.
+ 1 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will
+ return to the NCB/cores all-ones and fault. In the case of a read by a core,
+ this fault will cause a synchronous external abort in the core.
+
+ Config reads which are terminated by PCIe with an error (UR, etc), or config reads
+ when the PEM is disabled or link is down, will return to the NCB/cores all-ones and
+ non-fault regardless of this bit. */
+ uint64_t inv_dpar : 1; /**< [ 50: 50](R/W) Invert the generated parity to be written into the most significant data queue buffer RAM
+ block to force a parity error when it is later read. */
+ uint64_t reserved_48_49 : 2;
+ uint64_t auto_sd : 1; /**< [ 47: 47](RO/H) Link hardware autonomous speed disable. */
+ uint64_t dnum : 5; /**< [ 46: 42](RO/H) Primary bus device number. */
+ uint64_t pbus : 8; /**< [ 41: 34](RO/H) Primary bus number. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t spares : 4; /**< [ 15: 12](R/W) Spare flops. */
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t reserved_6_10 : 5;
+ uint64_t dly_one : 1; /**< [ 5: 5](R/W/H) When set the output client state machines will wait one cycle before starting a new TLP out. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t ro_ctlp : 1; /**< [ 3: 3](R/W) When set, C-TLPs that have the RO bit set will not wait for P-TLPs that are normally sent first. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+#else /* Word 0 - Little Endian */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t ro_ctlp : 1; /**< [ 3: 3](R/W) When set, C-TLPs that have the RO bit set will not wait for P-TLPs that are normally sent first. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t dly_one : 1; /**< [ 5: 5](R/W/H) When set the output client state machines will wait one cycle before starting a new TLP out. */
+ uint64_t reserved_6_10 : 5;
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t spares : 4; /**< [ 15: 12](R/W) Spare flops. */
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t pbus : 8; /**< [ 41: 34](RO/H) Primary bus number. */
+ uint64_t dnum : 5; /**< [ 46: 42](RO/H) Primary bus device number. */
+ uint64_t auto_sd : 1; /**< [ 47: 47](RO/H) Link hardware autonomous speed disable. */
+ uint64_t reserved_48_49 : 2;
+ uint64_t inv_dpar : 1; /**< [ 50: 50](R/W) Invert the generated parity to be written into the most significant data queue buffer RAM
+ block to force a parity error when it is later read. */
+ uint64_t rd_flt : 1; /**< [ 51: 51](RO) Read fault.
+
+ 0 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will
+ return to the NCB/cores all-ones and non-fault.
+ This is compatible with CN88XX pass 1.0.
+ 1 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will
+ return to the NCB/cores all-ones and fault. In the case of a read by a core,
+ this fault will cause a synchronous external abort in the core.
+
+ Config reads which are terminated by PCIe with an error (UR, etc), or config reads
+ when the PEM is disabled or link is down, will return to the NCB/cores all-ones and
+ non-fault regardless of this bit. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pemx_ctl_status_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t auto_sd : 1; /**< [ 45: 45](RO/H) Link hardware autonomous speed disable. */
+ uint64_t dnum : 5; /**< [ 44: 40](RO/H) Primary bus device number. */
+ uint64_t pbus : 8; /**< [ 39: 32](RO/H) Primary bus number. */
+ uint64_t reserved_16_31 : 16;
+ uint64_t spares : 3; /**< [ 15: 13](R/W) Spare flops. */
+ uint64_t scr_done : 1; /**< [ 12: 12](R/W) The ROM script (if present) can test this bit to see if the ROM script has
+ already run. Typical usage is for the ROM script to test [SCR_DONE] and exit if
+ true, else at the end of the ROM script, the script sets this bit. */
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](WO) When written with one, a single cycle pulse to request from application
+ to generate a PM_Turn_Off message. RC mode.
+
+ Internal:
+ Controls apps_pm_xmt_turnoff input to the DW core. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t clk_req_n : 1; /**< [ 8: 8](R/W) Indicates that the application logic is ready to have reference clock
+ removed. The application should set this signal when it is ready to
+ have reference clock removed trhough either L1 PM Sub-states or L1 CPM.
+ If the application does not want to remove reference clock it should
+ set this clear this bit.
+
+ Internal:
+ Controls app_clk_req_n input to the DW core. */
+ uint64_t rdy_entr_l23 : 1; /**< [ 7: 7](R/W) Application ready to enter L23. Indication that the application is
+ ready to enter the L23 state. This provides control of the L23 entry
+ (in case certain tasks must be performed before going into L23).
+ The Mac delays sending PM_Enter_L23 (in response to PM_Turn_Off)
+ until this signal becomes active. When this signal has been asserted
+ by the application, it must be kept asserted until L2 entry has completed
+
+ Internal:
+ Controls app_ready_entr_l23 input to the DW core. */
+ uint64_t margin_rdy : 1; /**< [ 6: 6](R/W) Margining ready. Indicates when the PHY ready to accept margining commands. This
+ signal is reflected in PCIEEP_MRG_PORT_CAP_STAT[M_RDY] /
+ PCIERC_MRG_PORT_CAP_STAT[M_RDY].
+
+ Internal:
+ Controls app_margining_ready input to the DW core. */
+ uint64_t frc_retry : 1; /**< [ 5: 5](R/W) When set, forces CRS status to be returned for any config access.
+ Internal:
+ Controls app_req_retry_en input to the DW core. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear the link is disabled.
+ Once set, can only be cleared by a MAC power reset.
+
+ Internal:
+ Controls app_ltssm_en input to the DW core. */
+ uint64_t l1_exit : 1; /**< [ 3: 3](R/W) L1 exit control.
+ 0 = Hardware is allowed to enter L1 power state and will only exit when woken
+ up by the remote link partner or traffic arrives on NCBO or EBO busses.
+ 1 = Entry into L1 state is disabled and if already in L1 state, will force an
+ exit.
+
+ Internal:
+ Controls app_req_exit_l1 input high to the DW core. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode.
+ Internal:
+ Controls diag_ctrl_bus[2] input to the DW core. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted.
+ Internal:
+ Controls diag_ctrl_bus[1] input to the DW core. */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted.
+ Internal:
+ Controls diag_ctrl_bus[0] input to the DW core. */
+#else /* Word 0 - Little Endian */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted.
+ Internal:
+ Controls diag_ctrl_bus[0] input to the DW core. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted.
+ Internal:
+ Controls diag_ctrl_bus[1] input to the DW core. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode.
+ Internal:
+ Controls diag_ctrl_bus[2] input to the DW core. */
+ uint64_t l1_exit : 1; /**< [ 3: 3](R/W) L1 exit control.
+ 0 = Hardware is allowed to enter L1 power state and will only exit when woken
+ up by the remote link partner or traffic arrives on NCBO or EBO busses.
+ 1 = Entry into L1 state is disabled and if already in L1 state, will force an
+ exit.
+
+ Internal:
+ Controls app_req_exit_l1 input high to the DW core. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear the link is disabled.
+ Once set, can only be cleared by a MAC power reset.
+
+ Internal:
+ Controls app_ltssm_en input to the DW core. */
+ uint64_t frc_retry : 1; /**< [ 5: 5](R/W) When set, forces CRS status to be returned for any config access.
+ Internal:
+ Controls app_req_retry_en input to the DW core. */
+ uint64_t margin_rdy : 1; /**< [ 6: 6](R/W) Margining ready. Indicates when the PHY ready to accept margining commands. This
+ signal is reflected in PCIEEP_MRG_PORT_CAP_STAT[M_RDY] /
+ PCIERC_MRG_PORT_CAP_STAT[M_RDY].
+
+ Internal:
+ Controls app_margining_ready input to the DW core. */
+ uint64_t rdy_entr_l23 : 1; /**< [ 7: 7](R/W) Application ready to enter L23. Indication that the application is
+ ready to enter the L23 state. This provides control of the L23 entry
+ (in case certain tasks must be performed before going into L23).
+ The Mac delays sending PM_Enter_L23 (in response to PM_Turn_Off)
+ until this signal becomes active. When this signal has been asserted
+ by the application, it must be kept asserted until L2 entry has completed
+
+ Internal:
+ Controls app_ready_entr_l23 input to the DW core. */
+ uint64_t clk_req_n : 1; /**< [ 8: 8](R/W) Indicates that the application logic is ready to have reference clock
+ removed. The application should set this signal when it is ready to
+ have reference clock removed trhough either L1 PM Sub-states or L1 CPM.
+ If the application does not want to remove reference clock it should
+ set this clear this bit.
+
+ Internal:
+ Controls app_clk_req_n input to the DW core. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](WO) When written with one, a single cycle pulse to request from application
+ to generate a PM_Turn_Off message. RC mode.
+
+ Internal:
+ Controls apps_pm_xmt_turnoff input to the DW core. */
+ uint64_t scr_done : 1; /**< [ 12: 12](R/W) The ROM script (if present) can test this bit to see if the ROM script has
+ already run. Typical usage is for the ROM script to test [SCR_DONE] and exit if
+ true, else at the end of the ROM script, the script sets this bit. */
+ uint64_t spares : 3; /**< [ 15: 13](R/W) Spare flops. */
+ uint64_t reserved_16_31 : 16;
+ uint64_t pbus : 8; /**< [ 39: 32](RO/H) Primary bus number. */
+ uint64_t dnum : 5; /**< [ 44: 40](RO/H) Primary bus device number. */
+ uint64_t auto_sd : 1; /**< [ 45: 45](RO/H) Link hardware autonomous speed disable. */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_ctl_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t inb_grant_limit : 3; /**< [ 54: 52](R/W) The number of inbound TLPs allowed in flight in PEM.to improve
+ small TLP performance. */
+ uint64_t reserved_51 : 1;
+ uint64_t inv_dpar : 1; /**< [ 50: 50](R/W) Invert the generated parity to be written into the most significant data queue buffer RAM
+ block to force a parity error when it is later read. */
+ uint64_t reserved_48_49 : 2;
+ uint64_t auto_sd : 1; /**< [ 47: 47](RO/H) Link hardware autonomous speed disable. */
+ uint64_t dnum : 5; /**< [ 46: 42](RO/H) Primary bus device number. */
+ uint64_t pbus : 8; /**< [ 41: 34](RO/H) Primary bus number. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t spares : 4; /**< [ 15: 12](R/W) Spare flops. */
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t reserved_6_10 : 5;
+ uint64_t dly_one : 1; /**< [ 5: 5](R/W/H) When set the output client state machines will wait one cycle before starting a new TLP out. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t ro_ctlp : 1; /**< [ 3: 3](R/W) When set, C-TLPs that have the RO bit set will not wait for P-TLPs that are normally sent first. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+#else /* Word 0 - Little Endian */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t ro_ctlp : 1; /**< [ 3: 3](R/W) When set, C-TLPs that have the RO bit set will not wait for P-TLPs that are normally sent first. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t dly_one : 1; /**< [ 5: 5](R/W/H) When set the output client state machines will wait one cycle before starting a new TLP out. */
+ uint64_t reserved_6_10 : 5;
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t spares : 4; /**< [ 15: 12](R/W) Spare flops. */
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t pbus : 8; /**< [ 41: 34](RO/H) Primary bus number. */
+ uint64_t dnum : 5; /**< [ 46: 42](RO/H) Primary bus device number. */
+ uint64_t auto_sd : 1; /**< [ 47: 47](RO/H) Link hardware autonomous speed disable. */
+ uint64_t reserved_48_49 : 2;
+ uint64_t inv_dpar : 1; /**< [ 50: 50](R/W) Invert the generated parity to be written into the most significant data queue buffer RAM
+ block to force a parity error when it is later read. */
+ uint64_t reserved_51 : 1;
+ uint64_t inb_grant_limit : 3; /**< [ 54: 52](R/W) The number of inbound TLPs allowed in flight in PEM.to improve
+ small TLP performance. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_ctl_status_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t auto_sd : 1; /**< [ 47: 47](RO/H) Link hardware autonomous speed disable. */
+ uint64_t dnum : 5; /**< [ 46: 42](RO/H) Primary bus device number. */
+ uint64_t pbus : 8; /**< [ 41: 34](RO/H) Primary bus number. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t spares : 4; /**< [ 15: 12](R/W) Spare flops. */
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t pm_xpme : 1; /**< [ 10: 10](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_pme port. EP mode. */
+ uint64_t ob_p_cmd : 1; /**< [ 9: 9](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core outband_pwrup_cmd
+ port. EP mode. */
+ uint64_t reserved_8 : 1;
+ uint64_t reserved_7 : 1;
+ uint64_t nf_ecrc : 1; /**< [ 6: 6](R/W) Do not forward peer-to-peer ECRC TLPs. */
+ uint64_t reserved_5 : 1;
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t ro_ctlp : 1; /**< [ 3: 3](R/W) When set, C-TLPs that have the RO bit set will not wait for P-TLPs that are normally sent first. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+#else /* Word 0 - Little Endian */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t ro_ctlp : 1; /**< [ 3: 3](R/W) When set, C-TLPs that have the RO bit set will not wait for P-TLPs that are normally sent first. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t reserved_5 : 1;
+ uint64_t nf_ecrc : 1; /**< [ 6: 6](R/W) Do not forward peer-to-peer ECRC TLPs. */
+ uint64_t reserved_7 : 1;
+ uint64_t reserved_8 : 1;
+ uint64_t ob_p_cmd : 1; /**< [ 9: 9](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core outband_pwrup_cmd
+ port. EP mode. */
+ uint64_t pm_xpme : 1; /**< [ 10: 10](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_pme port. EP mode. */
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t spares : 4; /**< [ 15: 12](R/W) Spare flops. */
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t pbus : 8; /**< [ 41: 34](RO/H) Primary bus number. */
+ uint64_t dnum : 5; /**< [ 46: 42](RO/H) Primary bus device number. */
+ uint64_t auto_sd : 1; /**< [ 47: 47](RO/H) Link hardware autonomous speed disable. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_pemx_ctl_status_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t inb_grant_limit : 3; /**< [ 54: 52](R/W) The number of inbound TLPs allowed in flight in PEM. */
+ uint64_t reserved_51 : 1;
+ uint64_t inv_dpar : 1; /**< [ 50: 50](R/W) Invert the generated parity to be written into the most significant data queue buffer RAM
+ block to force a parity error when it is later read. */
+ uint64_t reserved_48_49 : 2;
+ uint64_t auto_sd : 1; /**< [ 47: 47](RO/H) Link hardware autonomous speed disable. */
+ uint64_t dnum : 5; /**< [ 46: 42](RO/H) Primary bus device number. */
+ uint64_t pbus : 8; /**< [ 41: 34](RO/H) Primary bus number. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t spares : 4; /**< [ 15: 12](R/W) Spare flops. */
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t reserved_6_10 : 5;
+ uint64_t dly_one : 1; /**< [ 5: 5](R/W/H) When set the output client state machines will wait one cycle before starting a new TLP out. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t ro_ctlp : 1; /**< [ 3: 3](R/W) When set, C-TLPs that have the RO bit set will not wait for P-TLPs that are normally sent first. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+#else /* Word 0 - Little Endian */
+ uint64_t inv_lcrc : 1; /**< [ 0: 0](R/W) When set, causes the LSB of the LCRC to be inverted. */
+ uint64_t inv_ecrc : 1; /**< [ 1: 1](R/W) When set, causes the LSB of the ECRC to be inverted. */
+ uint64_t fast_lm : 1; /**< [ 2: 2](R/W) When set, forces fast link mode. */
+ uint64_t ro_ctlp : 1; /**< [ 3: 3](R/W) When set, C-TLPs that have the RO bit set will not wait for P-TLPs that are normally sent first. */
+ uint64_t lnk_enb : 1; /**< [ 4: 4](R/W) When set, the link is enabled; when clear (0) the link is disabled. This bit only is
+ active when in RC mode. */
+ uint64_t dly_one : 1; /**< [ 5: 5](R/W/H) When set the output client state machines will wait one cycle before starting a new TLP out. */
+ uint64_t reserved_6_10 : 5;
+ uint64_t pm_xtoff : 1; /**< [ 11: 11](R/W/H) When written with one, a single cycle pulse is sent to the PCIe core pm_xmt_turnoff port. RC mode. */
+ uint64_t spares : 4; /**< [ 15: 12](R/W) Spare flops. */
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 0x10000 in coprocessor clocks to wait for a CPL to a
+ configuration read that does not carry a retry status. Until such time that the
+ timeout occurs and retry status is received for a configuration read, the read
+ will be resent. A value of 0 disables retries and treats a CPL retry as a CPL
+ UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200ms or
+ less, although the PCI Express Base Specification allows up to 900ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t reserved_32_33 : 2;
+ uint64_t pbus : 8; /**< [ 41: 34](RO/H) Primary bus number. */
+ uint64_t dnum : 5; /**< [ 46: 42](RO/H) Primary bus device number. */
+ uint64_t auto_sd : 1; /**< [ 47: 47](RO/H) Link hardware autonomous speed disable. */
+ uint64_t reserved_48_49 : 2;
+ uint64_t inv_dpar : 1; /**< [ 50: 50](R/W) Invert the generated parity to be written into the most significant data queue buffer RAM
+ block to force a parity error when it is later read. */
+ uint64_t reserved_51 : 1;
+ uint64_t inb_grant_limit : 3; /**< [ 54: 52](R/W) The number of inbound TLPs allowed in flight in PEM. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pemx_ctl_status bdk_pemx_ctl_status_t;
+
+static inline uint64_t BDK_PEMX_CTL_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CTL_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000000ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000000ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CTL_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CTL_STATUS(a) bdk_pemx_ctl_status_t
+#define bustype_BDK_PEMX_CTL_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CTL_STATUS(a) "PEMX_CTL_STATUS"
+#define device_bar_BDK_PEMX_CTL_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CTL_STATUS(a) (a)
+#define arguments_BDK_PEMX_CTL_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ctl_status2
+ *
+ * PEM Control Status 2 Register
+ * This register contains additional general control and status of the PEM.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset. Note this differs from PEM()_CTL_STATUS's reset.
+ */
+union bdk_pemx_ctl_status2
+{
+ uint64_t u;
+ struct bdk_pemx_ctl_status2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t trgt1_ecc_cor_dis : 1; /**< [ 32: 32](R/W) Disable correction of single bit ECC errors on TRGT1 data from PEMC to PEMM. */
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 655,360 ns clocks to wait for a CPL to an
+ outbound configuration read that does not carry a retry status. Until such time
+ that the timeout occurs and retry status is received for a configuration read,
+ the read will be resent. A value of zero disables retries and treats a CPL retry
+ as a CPL UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200 ms or
+ less, although the PCI express base specification allows up to 900 ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t no_fwd_prg : 16; /**< [ 15: 0](R/W) The time * 0x10000 in core clocks to wait for the TLP FIFOs to be able to unload an entry.
+ If there is no forward progress, such that the timeout occurs, credits are returned to the
+ SLI and an interrupt (if enabled) is asserted. Any more TLPs received are dropped on the
+ floor and the credits associated with those TLPs are returned as well. Note that 0xFFFF is
+ a reserved value that will put the PEM in the 'forward progress stopped' state
+ immediately. This state holds until a MAC reset is received. */
+#else /* Word 0 - Little Endian */
+ uint64_t no_fwd_prg : 16; /**< [ 15: 0](R/W) The time * 0x10000 in core clocks to wait for the TLP FIFOs to be able to unload an entry.
+ If there is no forward progress, such that the timeout occurs, credits are returned to the
+ SLI and an interrupt (if enabled) is asserted. Any more TLPs received are dropped on the
+ floor and the credits associated with those TLPs are returned as well. Note that 0xFFFF is
+ a reserved value that will put the PEM in the 'forward progress stopped' state
+ immediately. This state holds until a MAC reset is received. */
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 655,360 ns clocks to wait for a CPL to an
+ outbound configuration read that does not carry a retry status. Until such time
+ that the timeout occurs and retry status is received for a configuration read,
+ the read will be resent. A value of zero disables retries and treats a CPL retry
+ as a CPL UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200 ms or
+ less, although the PCI express base specification allows up to 900 ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t trgt1_ecc_cor_dis : 1; /**< [ 32: 32](R/W) Disable correction of single bit ECC errors on TRGT1 data from PEMC to PEMM. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_ctl_status2_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t no_fwd_prg : 16; /**< [ 15: 0](R/W) The time * 0x10000 in core clocks to wait for the TLP FIFOs to be able to unload an entry.
+ If there is no forward progress, such that the timeout occurs, credits are returned to the
+ SLI and an interrupt (if enabled) is asserted. Any more TLPs received are dropped on the
+ floor and the credits associated with those TLPs are returned as well. Note that 0xFFFF is
+ a reserved value that will put the PEM in the 'forward progress stopped' state
+ immediately. This state holds until a MAC reset is received. */
+#else /* Word 0 - Little Endian */
+ uint64_t no_fwd_prg : 16; /**< [ 15: 0](R/W) The time * 0x10000 in core clocks to wait for the TLP FIFOs to be able to unload an entry.
+ If there is no forward progress, such that the timeout occurs, credits are returned to the
+ SLI and an interrupt (if enabled) is asserted. Any more TLPs received are dropped on the
+ floor and the credits associated with those TLPs are returned as well. Note that 0xFFFF is
+ a reserved value that will put the PEM in the 'forward progress stopped' state
+ immediately. This state holds until a MAC reset is received. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pemx_ctl_status2_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t trgt1_ecc_cor_dis : 1; /**< [ 32: 32](R/W) Disable correction of single bit ECC errors on TRGT1 data from PEMC to PEMM. */
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 655,360 ns clocks to wait for a CPL to an
+ outbound configuration read that does not carry a retry status. Until such time
+ that the timeout occurs and retry status is received for a configuration read,
+ the read will be resent. A value of zero disables retries and treats a CPL retry
+ as a CPL UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200 ms or
+ less, although the PCI express base specification allows up to 900 ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t no_fwd_prg : 16; /**< [ 15: 0](R/W) The time in units of 655,360 ns clocks to wait for the TLP FIFOs to be able to
+ unload an outbound entry. If there is no forward progress, such that the timeout
+ occurs, credits are returned to the originating bus and an interrupt (if enabled)
+ is asserted. Further TLPs received are dropped on the floor and the credits
+ associated with those TLPs are returned as well. Non-Posted are dropped with a
+ completion returned (all 1's if config else completion with fault). Note that 0x0000
+ will block detection of no forward progress. Note that 0xFFFF is a reserved value
+ that will immediately place the PEM into the 'forward progress stopped' state.
+ This state holds until a MAC reset is received. */
+#else /* Word 0 - Little Endian */
+ uint64_t no_fwd_prg : 16; /**< [ 15: 0](R/W) The time in units of 655,360 ns clocks to wait for the TLP FIFOs to be able to
+ unload an outbound entry. If there is no forward progress, such that the timeout
+ occurs, credits are returned to the originating bus and an interrupt (if enabled)
+ is asserted. Further TLPs received are dropped on the floor and the credits
+ associated with those TLPs are returned as well. Non-Posted are dropped with a
+ completion returned (all 1's if config else completion with fault). Note that 0x0000
+ will block detection of no forward progress. Note that 0xFFFF is a reserved value
+ that will immediately place the PEM into the 'forward progress stopped' state.
+ This state holds until a MAC reset is received. */
+ uint64_t cfg_rtry : 16; /**< [ 31: 16](R/W) The time in units of 655,360 ns clocks to wait for a CPL to an
+ outbound configuration read that does not carry a retry status. Until such time
+ that the timeout occurs and retry status is received for a configuration read,
+ the read will be resent. A value of zero disables retries and treats a CPL retry
+ as a CPL UR.
+
+ To use, it is recommended [CFG_RTRY] be set value corresponding to 200 ms or
+ less, although the PCI express base specification allows up to 900 ms for a
+ device to send a successful completion. When enabled, only one CFG RD may be
+ issued until either successful completion or CPL UR. */
+ uint64_t trgt1_ecc_cor_dis : 1; /**< [ 32: 32](R/W) Disable correction of single bit ECC errors on TRGT1 data from PEMC to PEMM. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_ctl_status2 bdk_pemx_ctl_status2_t;
+
+static inline uint64_t BDK_PEMX_CTL_STATUS2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CTL_STATUS2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000008ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000120ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CTL_STATUS2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CTL_STATUS2(a) bdk_pemx_ctl_status2_t
+#define bustype_BDK_PEMX_CTL_STATUS2(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_CTL_STATUS2(a) "PEMX_CTL_STATUS2"
+#define device_bar_BDK_PEMX_CTL_STATUS2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CTL_STATUS2(a) (a)
+#define arguments_BDK_PEMX_CTL_STATUS2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_ctl_stream
+ *
+ * PEM EP Mode Stream Register
+ * This register is used to generate the SMMU stream ID when in endpoint mode.
+ */
+union bdk_pemx_ctl_stream
+{
+ uint64_t u;
+ struct bdk_pemx_ctl_stream_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t epsbmax : 16; /**< [ 31: 16](R/W) The maximum SSU stream ID that will be generated by inbound endpoint
+ transactions. See [EPSBBASE]. Resets to PCC_DEV_CON_E::PCIERC({a}) where {a} is
+ the PEM number. */
+ uint64_t epsbbase : 16; /**< [ 15: 0](R/W) The base SMMU stream ID that will be generated by inbound endpoint
+ transactions. Resets to PCC_DEV_CON_E::PCIERC({a}) where {a} is the PEM number.
+
+ When in EP mode:
+ _ stream id = min( (PCI_stream_id\<15:0\> + [EPSBBASE]), [EPSBMAX]).
+
+ When [EPSBBASE]/[EPSBMAX] are changed from the reset values then:
+
+ * Different endpoint requestors will map to different SMMU streams, enabling the
+ possibility of having different SMMU translations for each endpoint requestor.
+
+ * Software must ensure that [EPSBBASE]...[EPSBMAX] are non-overlapping between
+ all endpoint PEMs and non-overlapping with existing PCC devices.
+
+ * IOBN()_SLITAG()_CONTROL[BITS_DIS] must be set. */
+#else /* Word 0 - Little Endian */
+ uint64_t epsbbase : 16; /**< [ 15: 0](R/W) The base SMMU stream ID that will be generated by inbound endpoint
+ transactions. Resets to PCC_DEV_CON_E::PCIERC({a}) where {a} is the PEM number.
+
+ When in EP mode:
+ _ stream id = min( (PCI_stream_id\<15:0\> + [EPSBBASE]), [EPSBMAX]).
+
+ When [EPSBBASE]/[EPSBMAX] are changed from the reset values then:
+
+ * Different endpoint requestors will map to different SMMU streams, enabling the
+ possibility of having different SMMU translations for each endpoint requestor.
+
+ * Software must ensure that [EPSBBASE]...[EPSBMAX] are non-overlapping between
+ all endpoint PEMs and non-overlapping with existing PCC devices.
+
+ * IOBN()_SLITAG()_CONTROL[BITS_DIS] must be set. */
+ uint64_t epsbmax : 16; /**< [ 31: 16](R/W) The maximum SSU stream ID that will be generated by inbound endpoint
+ transactions. See [EPSBBASE]. Resets to PCC_DEV_CON_E::PCIERC({a}) where {a} is
+ the PEM number. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ctl_stream_s cn; */
+};
+typedef union bdk_pemx_ctl_stream bdk_pemx_ctl_stream_t;
+
+static inline uint64_t BDK_PEMX_CTL_STREAM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_CTL_STREAM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c00004d0ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_CTL_STREAM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_CTL_STREAM(a) bdk_pemx_ctl_stream_t
+#define bustype_BDK_PEMX_CTL_STREAM(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_CTL_STREAM(a) "PEMX_CTL_STREAM"
+#define device_bar_BDK_PEMX_CTL_STREAM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_CTL_STREAM(a) (a)
+#define arguments_BDK_PEMX_CTL_STREAM(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_dbg_ena_w1c
+ *
+ * PEM Debug Information Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_pemx_dbg_ena_w1c
+{
+ uint64_t u;
+ struct bdk_pemx_dbg_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t reserved_32 : 1;
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t reserved_32 : 1;
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_dbg_ena_w1c_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pemx_dbg_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Reads or clears enable for PEM(0..2)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_dbg_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t rasdp : 1; /**< [ 55: 55](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RASDP]. */
+ uint64_t m2s_d_dbe : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[M2S_D_DBE]. */
+ uint64_t m2s_d_sbe : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[M2S_D_SBE]. */
+ uint64_t m2s_c_dbe : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[M2S_C_DBE]. */
+ uint64_t m2s_c_sbe : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[M2S_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_D0_SBE]. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[BMD_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[SPOISON]. */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[SPOISON]. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[LOFP]. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[BMD_E]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[C_C_DBE]. */
+ uint64_t m2s_c_sbe : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[M2S_C_SBE]. */
+ uint64_t m2s_c_dbe : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[M2S_C_DBE]. */
+ uint64_t m2s_d_sbe : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[M2S_D_SBE]. */
+ uint64_t m2s_d_dbe : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[M2S_D_DBE]. */
+ uint64_t rasdp : 1; /**< [ 55: 55](R/W1C/H) Reads or clears enable for PEM(0..3)_DBG_INFO[RASDP]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_pemx_dbg_ena_w1c_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Reads or clears enable for PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pemx_dbg_ena_w1c bdk_pemx_dbg_ena_w1c_t;
+
+static inline uint64_t BDK_PEMX_DBG_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_DBG_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000458ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000458ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000458ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_DBG_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_DBG_ENA_W1C(a) bdk_pemx_dbg_ena_w1c_t
+#define bustype_BDK_PEMX_DBG_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_DBG_ENA_W1C(a) "PEMX_DBG_ENA_W1C"
+#define device_bar_BDK_PEMX_DBG_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_DBG_ENA_W1C(a) (a)
+#define arguments_BDK_PEMX_DBG_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_dbg_ena_w1s
+ *
+ * PEM Debug Information Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_pemx_dbg_ena_w1s
+{
+ uint64_t u;
+ struct bdk_pemx_dbg_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t reserved_32 : 1;
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t reserved_32 : 1;
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_dbg_ena_w1s_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pemx_dbg_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets enable for PEM(0..2)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_dbg_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t rasdp : 1; /**< [ 55: 55](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RASDP]. */
+ uint64_t m2s_d_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[M2S_D_DBE]. */
+ uint64_t m2s_d_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[M2S_D_SBE]. */
+ uint64_t m2s_c_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[M2S_C_DBE]. */
+ uint64_t m2s_c_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[M2S_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_D0_SBE]. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[BMD_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[SPOISON]. */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[SPOISON]. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[LOFP]. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[BMD_E]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[C_C_DBE]. */
+ uint64_t m2s_c_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[M2S_C_SBE]. */
+ uint64_t m2s_c_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[M2S_C_DBE]. */
+ uint64_t m2s_d_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[M2S_D_SBE]. */
+ uint64_t m2s_d_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[M2S_D_DBE]. */
+ uint64_t rasdp : 1; /**< [ 55: 55](R/W1S/H) Reads or sets enable for PEM(0..3)_DBG_INFO[RASDP]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_pemx_dbg_ena_w1s_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets enable for PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pemx_dbg_ena_w1s bdk_pemx_dbg_ena_w1s_t;
+
+static inline uint64_t BDK_PEMX_DBG_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_DBG_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000460ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000460ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000460ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_DBG_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_DBG_ENA_W1S(a) bdk_pemx_dbg_ena_w1s_t
+#define bustype_BDK_PEMX_DBG_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_DBG_ENA_W1S(a) "PEMX_DBG_ENA_W1S"
+#define device_bar_BDK_PEMX_DBG_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_DBG_ENA_W1S(a) (a)
+#define arguments_BDK_PEMX_DBG_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_dbg_info
+ *
+ * PEM Debug Information Register
+ * This is a debug information register of the PEM.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_dbg_info
+{
+ uint64_t u;
+ struct bdk_pemx_dbg_info_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Detected a M2S FIFO parity error. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Detected a core header queue bank1 double-bit error. */
+ uint64_t reserved_32_55 : 24;
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the REPLAY_TIMER expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Received unlock message (EP mode only).
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message (RC mode only). This bit is set when a message with ERR_FATAL
+ is set.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message (RC mode only).
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message (RC mode only).
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or Vendor Message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent.
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent.
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or Vendor Message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message (RC mode only).
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message (RC mode only).
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message (RC mode only). This bit is set when a message with ERR_FATAL
+ is set.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Received unlock message (EP mode only).
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the REPLAY_TIMER expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t reserved_32_55 : 24;
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Detected a core header queue bank1 double-bit error. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Detected a M2S FIFO parity error. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_dbg_info_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Detected a core header queue bank1 double-bit error. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Detected a core header queue bank1 single-bit error. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Detected a core header queue bank0 double-bit error. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Detected a core header queue bank0 single-bit error. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Detected a core retry RAM double-bit error. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Detected a core retry RAM single-bit error. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Detected a TLP CPL FIFO control double-bit error. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Detected a TLP CPL FIFO control single-bit error. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Detected a TLP CPL FIFO data1 double-bit error. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Detected a TLP CPL FIFO data1 single-bit error. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Detected a TLP CPL FIFO data0 double-bit error. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Detected a TLP CPL FIFO data0 single-bit error. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Detected a TLP NP FIFO control double-bit error. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Detected a TLP NP FIFO control single-bit error. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Detected a TLP NP FIFO data1 double-bit error. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Detected a TLP NP FIFO data1 single-bit error. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Detected a TLP NP FIFO data0 double-bit error. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Detected a TLP NP FIFO data0 single-bit error. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Detected a TLP posted FIFO control double-bit error. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Detected a TLP posted FIFO control single-bit error. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Detected a TLP posted FIFO data1 double-bit error. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Detected a TLP posted FIFO data1 single-bit error. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Detected a TLP posted FIFO data0 double-bit error. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Detected a TLP posted FIFO data0 single-bit error. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Detected a data queue RAM parity error. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the REPLAY_TIMER expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message (RC mode only). This bit is set when a message with ERR_FATAL
+ is set.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message (RC mode only).
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message (RC mode only).
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or Vendor Message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent.
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent.
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or Vendor Message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message (RC mode only).
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message (RC mode only).
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message (RC mode only). This bit is set when a message with ERR_FATAL
+ is set.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the REPLAY_TIMER expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Detected a data queue RAM parity error. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Detected a TLP posted FIFO data0 single-bit error. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Detected a TLP posted FIFO data0 double-bit error. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Detected a TLP posted FIFO data1 single-bit error. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Detected a TLP posted FIFO data1 double-bit error. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Detected a TLP posted FIFO control single-bit error. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Detected a TLP posted FIFO control double-bit error. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Detected a TLP NP FIFO data0 single-bit error. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Detected a TLP NP FIFO data0 double-bit error. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Detected a TLP NP FIFO data1 single-bit error. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Detected a TLP NP FIFO data1 double-bit error. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Detected a TLP NP FIFO control single-bit error. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Detected a TLP NP FIFO control double-bit error. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Detected a TLP CPL FIFO data0 single-bit error. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Detected a TLP CPL FIFO data0 double-bit error. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Detected a TLP CPL FIFO data1 single-bit error. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Detected a TLP CPL FIFO data1 double-bit error. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Detected a TLP CPL FIFO control single-bit error. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Detected a TLP CPL FIFO control double-bit error. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Detected a core retry RAM single-bit error. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Detected a core retry RAM double-bit error. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Detected a core header queue bank0 single-bit error. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Detected a core header queue bank0 double-bit error. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Detected a core header queue bank1 single-bit error. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Detected a core header queue bank1 double-bit error. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pemx_dbg_info_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t rasdp : 1; /**< [ 33: 33](R/W1C/H) Core entered RAS data protection error mode. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1C/H) A NP or P TLP was seen in the outbound path, but it was not allowed to master the bus.
+ If a PF TLP and the PCIEEP_CMD[ME] is not set.
+ For VF TLP, either the PCIEEP_CMD[ME]/PCIEEPVF_CMD[ME] are not set. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the replay timer expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Received unlock message (EP mode only).
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message. This bit is set when a message with ERR_FATAL
+ is received.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message.
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message.
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP not to be forwarded to the peer.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or vendor message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent. This legacy interrupt is deprecated and is never set. */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent. This legacy interrupt is deprecated and is never set. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or vendor message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP not to be forwarded to the peer.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message.
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message.
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message. This bit is set when a message with ERR_FATAL
+ is received.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Received unlock message (EP mode only).
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the replay timer expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1C/H) A NP or P TLP was seen in the outbound path, but it was not allowed to master the bus.
+ If a PF TLP and the PCIEEP_CMD[ME] is not set.
+ For VF TLP, either the PCIEEP_CMD[ME]/PCIEEPVF_CMD[ME] are not set. */
+ uint64_t rasdp : 1; /**< [ 33: 33](R/W1C/H) Core entered RAS data protection error mode. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_dbg_info_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Detected a M2S FIFO parity error. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Detected a core header queue bank1 double-bit error. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Detected a core header queue bank1 single-bit error. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Detected a core header queue bank0 double-bit error. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Detected a core header queue bank0 single-bit error. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Detected a core retry RAM double-bit error. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Detected a core retry RAM single-bit error. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Detected a TLP CPL FIFO control double-bit error. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Detected a TLP CPL FIFO control single-bit error. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Detected a TLP CPL FIFO data1 double-bit error. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Detected a TLP CPL FIFO data1 single-bit error. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Detected a TLP CPL FIFO data0 double-bit error. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Detected a TLP CPL FIFO data0 single-bit error. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Detected a TLP NP FIFO control double-bit error. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Detected a TLP NP FIFO control single-bit error. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Detected a TLP NP FIFO data1 double-bit error. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Detected a TLP NP FIFO data1 single-bit error. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Detected a TLP NP FIFO data0 double-bit error. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Detected a TLP NP FIFO data0 single-bit error. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Detected a TLP posted FIFO control double-bit error. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Detected a TLP posted FIFO control single-bit error. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Detected a TLP posted FIFO data1 double-bit error. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Detected a TLP posted FIFO data1 single-bit error. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Detected a TLP posted FIFO data0 double-bit error. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Detected a TLP posted FIFO data0 single-bit error. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Detected a data queue RAM parity error. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the REPLAY_TIMER expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message (RC mode only). This bit is set when a message with ERR_FATAL
+ is set.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message (RC mode only).
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message (RC mode only).
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or Vendor Message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent.
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent.
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or Vendor Message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message (RC mode only).
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message (RC mode only).
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message (RC mode only). This bit is set when a message with ERR_FATAL
+ is set.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the REPLAY_TIMER expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1C/H) Detected a data queue RAM parity error. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Detected a TLP posted FIFO data0 single-bit error. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Detected a TLP posted FIFO data0 double-bit error. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Detected a TLP posted FIFO data1 single-bit error. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Detected a TLP posted FIFO data1 double-bit error. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Detected a TLP posted FIFO control single-bit error. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Detected a TLP posted FIFO control double-bit error. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Detected a TLP NP FIFO data0 single-bit error. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Detected a TLP NP FIFO data0 double-bit error. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Detected a TLP NP FIFO data1 single-bit error. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Detected a TLP NP FIFO data1 double-bit error. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Detected a TLP NP FIFO control single-bit error. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Detected a TLP NP FIFO control double-bit error. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Detected a TLP CPL FIFO data0 single-bit error. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Detected a TLP CPL FIFO data0 double-bit error. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Detected a TLP CPL FIFO data1 single-bit error. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Detected a TLP CPL FIFO data1 double-bit error. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Detected a TLP CPL FIFO control single-bit error. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Detected a TLP CPL FIFO control double-bit error. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1C/H) Detected a core retry RAM single-bit error. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1C/H) Detected a core retry RAM double-bit error. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1C/H) Detected a core header queue bank0 single-bit error. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1C/H) Detected a core header queue bank0 double-bit error. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1C/H) Detected a core header queue bank1 single-bit error. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1C/H) Detected a core header queue bank1 double-bit error. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1C/H) Detected a M2S FIFO parity error. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_dbg_info_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t rasdp : 1; /**< [ 55: 55](R/W1C/H) Core entered RAS data protection error mode. */
+ uint64_t m2s_d_dbe : 1; /**< [ 54: 54](R/W1C/H) Detected a M2S data fifo double bit error. */
+ uint64_t m2s_d_sbe : 1; /**< [ 53: 53](R/W1C/H) Detected a M2S data fifo single bit error. */
+ uint64_t m2s_c_dbe : 1; /**< [ 52: 52](R/W1C/H) Detected a M2S data fifo double bit error. */
+ uint64_t m2s_c_sbe : 1; /**< [ 51: 51](R/W1C/H) Detected a M2S control fifo single bit error. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Detected a TLP CPL FIFO control double-bit error. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Detected a TLP CPL FIFO control single-bit error. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Detected a TLP CPL FIFO data1 double-bit error. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Detected a TLP CPL FIFO data1 single-bit error. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Detected a TLP CPL FIFO data0 double-bit error. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Detected a TLP CPL FIFO data0 single-bit error. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Detected a TLP NP FIFO control double-bit error. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Detected a TLP NP FIFO control single-bit error. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Detected a TLP NP FIFO data1 double-bit error. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Detected a TLP NP FIFO data1 single-bit error. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Detected a TLP NP FIFO data0 double-bit error. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Detected a TLP NP FIFO data0 single-bit error. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Detected a TLP posted FIFO control double-bit error. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Detected a TLP posted FIFO control single-bit error. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Detected a TLP posted FIFO data1 double-bit error. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Detected a TLP posted FIFO data1 single-bit error. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Detected a TLP posted FIFO data0 double-bit error. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Detected a TLP posted FIFO data0 single-bit error. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1C/H) A NP or P TLP was seen in the outbound path, but it was not allowed to master the bus.
+ If a PF TLP and the PCIEEP()_CFG001[ME] is not set.
+ For VF TLP, either the PCIEEP()_CFG001[ME]/PCIEEPVF()_CFG001[ME] are not set. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the REPLAY_TIMER expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Received unlock message (EP mode only).
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message. This bit is set when a message with ERR_FATAL
+ is set.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message.
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message.
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP not to be forwarded to the peer.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or Vendor Message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent. This legacy interrupt is deprecated and is never set. */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1C/H) Poisoned TLP sent. This legacy interrupt is deprecated and is never set. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1C/H) Received TLP is malformed or a message. If the core receives a MSG (or Vendor Message) or
+ if a received AtomicOp violates address/length rules, this bit is set as well.
+
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1C/H) Received TLP has link layer error.
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1C/H) Received ECRC error.
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1C/H) Received poisoned TLP not to be forwarded to the peer.
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1C/H) Received correctable error message.
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1C/H) Received nonfatal error message.
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1C/H) Received fatal-error message. This bit is set when a message with ERR_FATAL
+ is set.
+
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1C/H) Received PME message (RC mode only).
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1C/H) Received PME turnoff acknowledge message (RC mode only).
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1C/H) Received unlock message (EP mode only).
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1C/H) Received vendor-defined message.
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1C/H) A completion timeout occurred.
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1C/H) Replay timer expired. This bit is set when the REPLAY_TIMER expires in the PCIe core. The
+ probability of this bit being set increases with the traffic load.
+
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1C/H) Maximum number of retries exceeded.
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1C/H) Received DLLP with datalink layer error.
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1C/H) Received TLP with datalink layer error.
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1C/H) DLLP protocol error (out of sequence DLLP).
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1C/H) Flow control protocol violation (watchdog timer).
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1C/H) PHY reported an 8 B/10 B decode error (RxStatus = 0x4) or disparity error (RxStatus =
+ 0x7).
+
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1C/H) Flow control update violation.
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1C/H) Receive queue overflow. Normally happens only when flow control advertisements are
+ ignored.
+
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1C/H) Received an unexpected completion.
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1C/H) Received a completion with UR status.
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1C/H) Received a completion with CA status.
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1C/H) Completer aborted a request. This bit is never set because CNXXXX does not generate
+ completer aborts. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1C/H) Received a request which device does not support.
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1C/H) Received a malformed TLP.
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1C/H) Received a completion with poisoned payload.
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1C/H) Received a write with poisoned payload.
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1C/H) Received an ECRC error. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1C/H) Lack of forward progress at TLP FIFOs timeout occurred. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1C/H) A NP or P TLP was seen in the outbound path, but it was not allowed to master the bus.
+ If a PF TLP and the PCIEEP()_CFG001[ME] is not set.
+ For VF TLP, either the PCIEEP()_CFG001[ME]/PCIEEPVF()_CFG001[ME] are not set. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1C/H) Detected a TLP posted FIFO data0 single-bit error. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1C/H) Detected a TLP posted FIFO data0 double-bit error. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1C/H) Detected a TLP posted FIFO data1 single-bit error. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1C/H) Detected a TLP posted FIFO data1 double-bit error. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1C/H) Detected a TLP posted FIFO control single-bit error. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1C/H) Detected a TLP posted FIFO control double-bit error. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1C/H) Detected a TLP NP FIFO data0 single-bit error. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1C/H) Detected a TLP NP FIFO data0 double-bit error. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1C/H) Detected a TLP NP FIFO data1 single-bit error. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1C/H) Detected a TLP NP FIFO data1 double-bit error. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1C/H) Detected a TLP NP FIFO control single-bit error. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1C/H) Detected a TLP NP FIFO control double-bit error. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1C/H) Detected a TLP CPL FIFO data0 single-bit error. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1C/H) Detected a TLP CPL FIFO data0 double-bit error. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1C/H) Detected a TLP CPL FIFO data1 single-bit error. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1C/H) Detected a TLP CPL FIFO data1 double-bit error. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1C/H) Detected a TLP CPL FIFO control single-bit error. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1C/H) Detected a TLP CPL FIFO control double-bit error. */
+ uint64_t m2s_c_sbe : 1; /**< [ 51: 51](R/W1C/H) Detected a M2S control fifo single bit error. */
+ uint64_t m2s_c_dbe : 1; /**< [ 52: 52](R/W1C/H) Detected a M2S data fifo double bit error. */
+ uint64_t m2s_d_sbe : 1; /**< [ 53: 53](R/W1C/H) Detected a M2S data fifo single bit error. */
+ uint64_t m2s_d_dbe : 1; /**< [ 54: 54](R/W1C/H) Detected a M2S data fifo double bit error. */
+ uint64_t rasdp : 1; /**< [ 55: 55](R/W1C/H) Core entered RAS data protection error mode. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_pemx_dbg_info_cn81xx cn88xxp2; */
+};
+typedef union bdk_pemx_dbg_info bdk_pemx_dbg_info_t;
+
+static inline uint64_t BDK_PEMX_DBG_INFO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_DBG_INFO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000448ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000448ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000448ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000f8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_DBG_INFO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_DBG_INFO(a) bdk_pemx_dbg_info_t
+#define bustype_BDK_PEMX_DBG_INFO(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_DBG_INFO(a) "PEMX_DBG_INFO"
+#define device_bar_BDK_PEMX_DBG_INFO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_DBG_INFO(a) (a)
+#define arguments_BDK_PEMX_DBG_INFO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_dbg_info_w1s
+ *
+ * PEM Debug Information Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_pemx_dbg_info_w1s
+{
+ uint64_t u;
+ struct bdk_pemx_dbg_info_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t reserved_32 : 1;
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t reserved_32 : 1;
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_dbg_info_w1s_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pemx_dbg_info_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets PEM(0..2)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_dbg_info_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t rasdp : 1; /**< [ 55: 55](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RASDP]. */
+ uint64_t m2s_d_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[M2S_D_DBE]. */
+ uint64_t m2s_d_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[M2S_D_SBE]. */
+ uint64_t m2s_c_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[M2S_C_DBE]. */
+ uint64_t m2s_c_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[M2S_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_D0_SBE]. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[BMD_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[SPOISON]. */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[SPOISON]. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rumep : 1; /**< [ 10: 10](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RUMEP].
+ Internal:
+ pedc_radm_msg_unlock. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[LOFP]. */
+ uint64_t bmd_e : 1; /**< [ 32: 32](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[BMD_E]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[C_C_DBE]. */
+ uint64_t m2s_c_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[M2S_C_SBE]. */
+ uint64_t m2s_c_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[M2S_C_DBE]. */
+ uint64_t m2s_d_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[M2S_D_SBE]. */
+ uint64_t m2s_d_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[M2S_D_DBE]. */
+ uint64_t rasdp : 1; /**< [ 55: 55](R/W1S/H) Reads or sets PEM(0..3)_DBG_INFO[RASDP]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_pemx_dbg_info_w1s_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t reserved_10 : 1;
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+#else /* Word 0 - Little Endian */
+ uint64_t spoison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[SPOISON].
+ Internal:
+ peai__client0_tlp_ep & peai__client0_tlp_hv or
+ peai__client1_tlp_ep & peai__client1_tlp_hv (atomic_op). */
+ uint64_t rtlpmal : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPMAL].
+ Internal:
+ pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot. */
+ uint64_t rtlplle : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTLPLLE].
+ Internal:
+ pedc_radm_trgt1_dllp_abort &
+ pedc__radm_trgt1_eot. */
+ uint64_t recrce : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RECRCE].
+ Internal:
+ pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot. */
+ uint64_t rpoison : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPOISON].
+ Internal:
+ pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv. */
+ uint64_t rcemrc : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RCEMRC].
+ Internal:
+ pedc_radm_correctable_err. */
+ uint64_t rnfemrc : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RNFEMRC].
+ Internal:
+ pedc_radm_nonfatal_err. */
+ uint64_t rfemrc : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RFEMRC].
+ Internal:
+ pedc_radm_fatal_err. */
+ uint64_t rpmerc : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPMERC].
+ Internal:
+ pedc_radm_pm_pme. */
+ uint64_t rptamrc : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPTAMRC].
+ Internal:
+ pedc_radm_pm_to_ack. */
+ uint64_t reserved_10 : 1;
+ uint64_t rvdm : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RVDM].
+ Internal:
+ pedc_radm_vendor_msg. */
+ uint64_t acto : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ACTO].
+ Internal:
+ pedc_radm_cpl_timeout. */
+ uint64_t rte : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTE].
+ Internal:
+ xdlh_replay_timeout_err. */
+ uint64_t mre : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[MRE].
+ Internal:
+ xdlh_replay_num_rlover_err. */
+ uint64_t rdwdle : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RDWDLE].
+ Internal:
+ rdlh_bad_dllp_err. */
+ uint64_t rtwdle : 1; /**< [ 16: 16](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTWDLE].
+ Internal:
+ rdlh_bad_tlp_err. */
+ uint64_t dpeoosd : 1; /**< [ 17: 17](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DPEOOSD].
+ Internal:
+ rdlh_prot_err. */
+ uint64_t fcpvwt : 1; /**< [ 18: 18](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCPVWT].
+ Internal:
+ rtlh_fc_prot_err. */
+ uint64_t rpe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RPE].
+ Internal:
+ rmlh_rcvd_err. */
+ uint64_t fcuv : 1; /**< [ 20: 20](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[FCUV].
+ Internal:
+ (opt. checks) int_xadm_fc_prot_err. */
+ uint64_t rqo : 1; /**< [ 21: 21](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RQO].
+ Internal:
+ radm_qoverflow. */
+ uint64_t rauc : 1; /**< [ 22: 22](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAUC].
+ Internal:
+ radm_unexp_cpl_err. */
+ uint64_t racur : 1; /**< [ 23: 23](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACUR].
+ Internal:
+ radm_rcvd_cpl_ur. */
+ uint64_t racca : 1; /**< [ 24: 24](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACCA].
+ Internal:
+ radm_rcvd_cpl_ca. */
+ uint64_t caar : 1; /**< [ 25: 25](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[CAAR]. */
+ uint64_t rarwdns : 1; /**< [ 26: 26](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RARWDNS].
+ Internal:
+ radm_rcvd_ur_req. */
+ uint64_t ramtlp : 1; /**< [ 27: 27](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAMTLP].
+ Internal:
+ radm_mlf_tlp_err. */
+ uint64_t racpp : 1; /**< [ 28: 28](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RACPP].
+ Internal:
+ radm_rcvd_cpl_poisoned. */
+ uint64_t rawwpp : 1; /**< [ 29: 29](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RAWWPP].
+ Internal:
+ radm_rcvd_wreq_poisoned. */
+ uint64_t ecrc_e : 1; /**< [ 30: 30](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[ECRC_E]. */
+ uint64_t lofp : 1; /**< [ 31: 31](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[LOFP]. */
+ uint64_t datq_pe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[DATQ_PE]. */
+ uint64_t p_d0_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_SBE]. */
+ uint64_t p_d0_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D0_DBE]. */
+ uint64_t p_d1_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_SBE]. */
+ uint64_t p_d1_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_D1_DBE]. */
+ uint64_t p_c_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_SBE]. */
+ uint64_t p_c_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[P_C_DBE]. */
+ uint64_t n_d0_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_SBE]. */
+ uint64_t n_d0_dbe : 1; /**< [ 40: 40](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D0_DBE]. */
+ uint64_t n_d1_sbe : 1; /**< [ 41: 41](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_SBE]. */
+ uint64_t n_d1_dbe : 1; /**< [ 42: 42](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_D1_DBE]. */
+ uint64_t n_c_sbe : 1; /**< [ 43: 43](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_SBE]. */
+ uint64_t n_c_dbe : 1; /**< [ 44: 44](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[N_C_DBE]. */
+ uint64_t c_d0_sbe : 1; /**< [ 45: 45](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_SBE]. */
+ uint64_t c_d0_dbe : 1; /**< [ 46: 46](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D0_DBE]. */
+ uint64_t c_d1_sbe : 1; /**< [ 47: 47](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_SBE]. */
+ uint64_t c_d1_dbe : 1; /**< [ 48: 48](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_D1_DBE]. */
+ uint64_t c_c_sbe : 1; /**< [ 49: 49](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_SBE]. */
+ uint64_t c_c_dbe : 1; /**< [ 50: 50](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[C_C_DBE]. */
+ uint64_t rtry_sbe : 1; /**< [ 51: 51](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTRY_SBE]. */
+ uint64_t rtry_dbe : 1; /**< [ 52: 52](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[RTRY_DBE]. */
+ uint64_t qhdr_b0_sbe : 1; /**< [ 53: 53](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B0_SBE]. */
+ uint64_t qhdr_b0_dbe : 1; /**< [ 54: 54](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B0_DBE]. */
+ uint64_t qhdr_b1_sbe : 1; /**< [ 55: 55](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_SBE]. */
+ uint64_t qhdr_b1_dbe : 1; /**< [ 56: 56](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[QHDR_B1_DBE]. */
+ uint64_t m2s_pe : 1; /**< [ 57: 57](R/W1S/H) Reads or sets PEM(0..5)_DBG_INFO[M2S_PE]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pemx_dbg_info_w1s bdk_pemx_dbg_info_w1s_t;
+
+static inline uint64_t BDK_PEMX_DBG_INFO_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_DBG_INFO_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000450ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000450ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000450ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_DBG_INFO_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_DBG_INFO_W1S(a) bdk_pemx_dbg_info_w1s_t
+#define bustype_BDK_PEMX_DBG_INFO_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_DBG_INFO_W1S(a) "PEMX_DBG_INFO_W1S"
+#define device_bar_BDK_PEMX_DBG_INFO_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_DBG_INFO_W1S(a) (a)
+#define arguments_BDK_PEMX_DBG_INFO_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_debug
+ *
+ * PEM Debug Register
+ * This register contains status of level interrupts for debugging purposes.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on cold reset.
+ */
+union bdk_pemx_debug
+{
+ uint64_t u;
+ struct bdk_pemx_debug_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t n_tlp_cnt : 8; /**< [ 39: 32](RO/H) The current count (depth) of the outbound NP TLP FIFO.
+ The value represents the number of used credits out of a total of 32. */
+ uint64_t reserved_31 : 1;
+ uint64_t c_tlp_cnt : 11; /**< [ 30: 20](RO/H) The current count (depth) of the outbound C TLP FIFO.
+ The value represents the number of used credits out of a total of 244. */
+ uint64_t reserved_19 : 1;
+ uint64_t p_tlp_cnt : 11; /**< [ 18: 8](RO/H) The current count (depth) of the outbound P TLP FIFO.
+ The value represents the number of used credits out of a total of 244. */
+ uint64_t inv_m2s_par : 1; /**< [ 7: 7](R/W) Invert the generated parity to be written into the M2S FIFO
+ to force a parity error when it is later read. */
+ uint64_t intval : 7; /**< [ 6: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+#else /* Word 0 - Little Endian */
+ uint64_t intval : 7; /**< [ 6: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+ uint64_t inv_m2s_par : 1; /**< [ 7: 7](R/W) Invert the generated parity to be written into the M2S FIFO
+ to force a parity error when it is later read. */
+ uint64_t p_tlp_cnt : 11; /**< [ 18: 8](RO/H) The current count (depth) of the outbound P TLP FIFO.
+ The value represents the number of used credits out of a total of 244. */
+ uint64_t reserved_19 : 1;
+ uint64_t c_tlp_cnt : 11; /**< [ 30: 20](RO/H) The current count (depth) of the outbound C TLP FIFO.
+ The value represents the number of used credits out of a total of 244. */
+ uint64_t reserved_31 : 1;
+ uint64_t n_tlp_cnt : 8; /**< [ 39: 32](RO/H) The current count (depth) of the outbound NP TLP FIFO.
+ The value represents the number of used credits out of a total of 32. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_debug_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t intval : 7; /**< [ 6: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+#else /* Word 0 - Little Endian */
+ uint64_t intval : 7; /**< [ 6: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pemx_debug_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t intval : 6; /**< [ 5: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+#else /* Word 0 - Little Endian */
+ uint64_t intval : 6; /**< [ 5: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_debug_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t inv_m2s_par : 1; /**< [ 7: 7](R/W) Invert the generated parity to be written into the M2S FIFO
+ to force a parity error when it is later read. */
+ uint64_t reserved_6 : 1;
+ uint64_t intval : 6; /**< [ 5: 0](RO/H) Status of INTX, HP_PMEI, and AERI interrupts. */
+#else /* Word 0 - Little Endian */
+ uint64_t intval : 6; /**< [ 5: 0](RO/H) Status of INTX, HP_PMEI, and AERI interrupts. */
+ uint64_t reserved_6 : 1;
+ uint64_t inv_m2s_par : 1; /**< [ 7: 7](R/W) Invert the generated parity to be written into the M2S FIFO
+ to force a parity error when it is later read. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_debug_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t n_tlp_cnt : 8; /**< [ 39: 32](RO/H) The current count (depth) of the outbound NP TLP FIFO.
+ The value represents the number of used credits out of a total of 32. */
+ uint64_t reserved_31 : 1;
+ uint64_t c_tlp_cnt : 11; /**< [ 30: 20](RO/H) The current count (depth) of the outbound C TLP FIFO.
+ The value represents the number of used credits out of a total of 244. */
+ uint64_t reserved_19 : 1;
+ uint64_t p_tlp_cnt : 11; /**< [ 18: 8](RO/H) The current count (depth) of the outbound P TLP FIFO.
+ The value represents the number of used credits out of a total of 244. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t intval : 6; /**< [ 5: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+#else /* Word 0 - Little Endian */
+ uint64_t intval : 6; /**< [ 5: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t p_tlp_cnt : 11; /**< [ 18: 8](RO/H) The current count (depth) of the outbound P TLP FIFO.
+ The value represents the number of used credits out of a total of 244. */
+ uint64_t reserved_19 : 1;
+ uint64_t c_tlp_cnt : 11; /**< [ 30: 20](RO/H) The current count (depth) of the outbound C TLP FIFO.
+ The value represents the number of used credits out of a total of 244. */
+ uint64_t reserved_31 : 1;
+ uint64_t n_tlp_cnt : 8; /**< [ 39: 32](RO/H) The current count (depth) of the outbound NP TLP FIFO.
+ The value represents the number of used credits out of a total of 32. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_pemx_debug_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t inv_m2s_par : 1; /**< [ 7: 7](R/W) Invert the generated parity to be written into the M2S FIFO
+ to force a parity error when it is later read. */
+ uint64_t intval : 7; /**< [ 6: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+#else /* Word 0 - Little Endian */
+ uint64_t intval : 7; /**< [ 6: 0](RO/H) Status of INTX, PMEI, and AERI interrupts. */
+ uint64_t inv_m2s_par : 1; /**< [ 7: 7](R/W) Invert the generated parity to be written into the M2S FIFO
+ to force a parity error when it is later read. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pemx_debug bdk_pemx_debug_t;
+
+static inline uint64_t BDK_PEMX_DEBUG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_DEBUG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000480ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000480ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000480ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000100ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_DEBUG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_DEBUG(a) bdk_pemx_debug_t
+#define bustype_BDK_PEMX_DEBUG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_DEBUG(a) "PEMX_DEBUG"
+#define device_bar_BDK_PEMX_DEBUG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_DEBUG(a) (a)
+#define arguments_BDK_PEMX_DEBUG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_diag_status
+ *
+ * PEM Diagnostic Status Register
+ * This register contains selection control for the core diagnostic bus.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_diag_status
+{
+ uint64_t u;
+ struct bdk_pemx_diag_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_diag_status_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t ltssm : 6; /**< [ 9: 4](RO/H) Current smlh_ltssm_state. */
+ uint64_t pwrdwn : 4; /**< [ 3: 0](RO/H) Current mac_phy_powerdown state.
+ 0x0 = D0.
+ 0x1 = D1.
+ 0x2 = D2.
+ 0x3 = D3.
+ 0x4 - 0x7: Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t pwrdwn : 4; /**< [ 3: 0](RO/H) Current mac_phy_powerdown state.
+ 0x0 = D0.
+ 0x1 = D1.
+ 0x2 = D2.
+ 0x3 = D3.
+ 0x4 - 0x7: Reserved. */
+ uint64_t ltssm : 6; /**< [ 9: 4](RO/H) Current smlh_ltssm_state. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_diag_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t pwrdwn : 3; /**< [ 8: 6](RO/H) Current mac_phy_powerdown state. */
+ uint64_t pm_dst : 3; /**< [ 5: 3](RO/H) Current power management DSTATE. */
+ uint64_t pm_stat : 1; /**< [ 2: 2](RO) Power management status. */
+ uint64_t pm_en : 1; /**< [ 1: 1](RO) Power management event enable. */
+ uint64_t aux_en : 1; /**< [ 0: 0](RO) Auxiliary power enable. Always read as zero as auxiliary power is not supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t aux_en : 1; /**< [ 0: 0](RO) Auxiliary power enable. Always read as zero as auxiliary power is not supported. */
+ uint64_t pm_en : 1; /**< [ 1: 1](RO) Power management event enable. */
+ uint64_t pm_stat : 1; /**< [ 2: 2](RO) Power management status. */
+ uint64_t pm_dst : 3; /**< [ 5: 3](RO/H) Current power management DSTATE. */
+ uint64_t pwrdwn : 3; /**< [ 8: 6](RO/H) Current mac_phy_powerdown state. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_diag_status_cn81xx cn88xx; */
+ struct bdk_pemx_diag_status_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t pwrdwn : 3; /**< [ 8: 6](RO/H) Current mac_phy_powerdown state. */
+ uint64_t pm_dst : 3; /**< [ 5: 3](RO/H) Current power management DSTATE. */
+ uint64_t pm_stat : 1; /**< [ 2: 2](RO/H) Power management status. */
+ uint64_t pm_en : 1; /**< [ 1: 1](RO/H) Power management event enable. */
+ uint64_t aux_en : 1; /**< [ 0: 0](RO/H) Auxiliary power enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t aux_en : 1; /**< [ 0: 0](RO/H) Auxiliary power enable. */
+ uint64_t pm_en : 1; /**< [ 1: 1](RO/H) Power management event enable. */
+ uint64_t pm_stat : 1; /**< [ 2: 2](RO/H) Power management status. */
+ uint64_t pm_dst : 3; /**< [ 5: 3](RO/H) Current power management DSTATE. */
+ uint64_t pwrdwn : 3; /**< [ 8: 6](RO/H) Current mac_phy_powerdown state. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_diag_status bdk_pemx_diag_status_t;
+
+static inline uint64_t BDK_PEMX_DIAG_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_DIAG_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000020ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000020ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000020ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000010ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_DIAG_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_DIAG_STATUS(a) bdk_pemx_diag_status_t
+#define bustype_BDK_PEMX_DIAG_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_DIAG_STATUS(a) "PEMX_DIAG_STATUS"
+#define device_bar_BDK_PEMX_DIAG_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_DIAG_STATUS(a) (a)
+#define arguments_BDK_PEMX_DIAG_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_dis_port
+ *
+ * PEM Disable Port Register
+ * This register controls whether traffic is allowed to be sent out the PCIe link.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_dis_port
+{
+ uint64_t u;
+ struct bdk_pemx_dis_port_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dis_port : 1; /**< [ 0: 0](R/W1C/H) When set, outbound read and writes are disabled (dropped) and reads will
+ return completion with fault over NCBI or EBUS. Software must clear this bit after
+ power-on reset to start normal activity. Further, this bit will be set by
+ hardware when either MAC reset or core reset completes an assertion phase.
+ Writing a one to this location clears the bit and will allow outbound operations
+ to be sent to the MAC at the beginning of the next transfer. This bit cannot
+ be set while PEM()_ON[PEMOOR] is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis_port : 1; /**< [ 0: 0](R/W1C/H) When set, outbound read and writes are disabled (dropped) and reads will
+ return completion with fault over NCBI or EBUS. Software must clear this bit after
+ power-on reset to start normal activity. Further, this bit will be set by
+ hardware when either MAC reset or core reset completes an assertion phase.
+ Writing a one to this location clears the bit and will allow outbound operations
+ to be sent to the MAC at the beginning of the next transfer. This bit cannot
+ be set while PEM()_ON[PEMOOR] is set. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_dis_port_s cn; */
+};
+typedef union bdk_pemx_dis_port bdk_pemx_dis_port_t;
+
+static inline uint64_t BDK_PEMX_DIS_PORT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_DIS_PORT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000048ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_DIS_PORT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_DIS_PORT(a) bdk_pemx_dis_port_t
+#define bustype_BDK_PEMX_DIS_PORT(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_DIS_PORT(a) "PEMX_DIS_PORT"
+#define device_bar_BDK_PEMX_DIS_PORT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_DIS_PORT(a) (a)
+#define arguments_BDK_PEMX_DIS_PORT(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ebi_tlp_credits
+ *
+ * PEM EBUS TLP Credits Register
+ * This register specifies the number of credits for use in moving TLPs. When this register is
+ * written, the credit values are reset to the register value. This register is for diagnostic
+ * use only, and should only be written when PEM()_CTL_STATUS[LNK_ENB] is clear.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_ebi_tlp_credits
+{
+ uint64_t u;
+ struct bdk_pemx_ebi_tlp_credits_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ebi_cpl : 11; /**< [ 31: 21](R/W) TLP 32 B credits for completion TLPs in the PEMs inbound EBUS buffers.
+ Legal values are 0x21 to 0x100. */
+ uint64_t ebi_np : 10; /**< [ 20: 11](R/W) TLP headers for non-posted TLPs in the PEMs inbound EBUS buffers.
+ Legal values are 0x1 to 0x20. */
+ uint64_t ebi_p : 11; /**< [ 10: 0](R/W) TLP 32 B credits for posted TLPs in the PEMs inbound EBUS buffers.
+ Legal values are 0x21 to 0x100. */
+#else /* Word 0 - Little Endian */
+ uint64_t ebi_p : 11; /**< [ 10: 0](R/W) TLP 32 B credits for posted TLPs in the PEMs inbound EBUS buffers.
+ Legal values are 0x21 to 0x100. */
+ uint64_t ebi_np : 10; /**< [ 20: 11](R/W) TLP headers for non-posted TLPs in the PEMs inbound EBUS buffers.
+ Legal values are 0x1 to 0x20. */
+ uint64_t ebi_cpl : 11; /**< [ 31: 21](R/W) TLP 32 B credits for completion TLPs in the PEMs inbound EBUS buffers.
+ Legal values are 0x21 to 0x100. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ebi_tlp_credits_s cn; */
+};
+typedef union bdk_pemx_ebi_tlp_credits bdk_pemx_ebi_tlp_credits_t;
+
+static inline uint64_t BDK_PEMX_EBI_TLP_CREDITS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_EBI_TLP_CREDITS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000028ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_EBI_TLP_CREDITS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_EBI_TLP_CREDITS(a) bdk_pemx_ebi_tlp_credits_t
+#define bustype_BDK_PEMX_EBI_TLP_CREDITS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_EBI_TLP_CREDITS(a) "PEMX_EBI_TLP_CREDITS"
+#define device_bar_BDK_PEMX_EBI_TLP_CREDITS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_EBI_TLP_CREDITS(a) (a)
+#define arguments_BDK_PEMX_EBI_TLP_CREDITS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ebo_fifo_status
+ *
+ * PEM EBO Offloading FIFO Status Register
+ * This register contains status about the PEM EBO offloading FIFOs.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_ebo_fifo_status
+{
+ uint64_t u;
+ struct bdk_pemx_ebo_fifo_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t c_cmd_volume : 6; /**< [ 53: 48](RO/H) Reports the number of valid EBO completion data beats currently held in the
+ EBO completion buffer. Each entry represents a beat of the EBO bus related to a
+ completion operation and the value read can range from 0x0 to a maximum of 0x20
+ which would represent completely full. For diagnostic use only. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t n_cmd_volume : 6; /**< [ 45: 40](RO/H) Reports the number of valid entries currently held in the EBO non-posted
+ offloading FIFO. Each entry represents a beat of the EBO bus related to a
+ Non-Posted operation and the value read can range from 0x0 to a maximum of 0x20
+ which would represent completely full.
+ For diagnostic use only. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t p_cmd_volume : 6; /**< [ 37: 32](RO/H) Reports the number of valid entries currently held in the EBO posted offloading
+ FIFO. Each entry represents a beat of the EBO bus related to a memory store and
+ the value read can range from 0x0 to a maximum of 0x20 which would represent
+ completely full.
+ For diagnostic use only. */
+ uint64_t c_data_volume : 8; /**< [ 31: 24](RO/H) Reports the number of valid EBO completion data beats currently held in the
+ EBO completion buffer. Each entry represents a beat of the EBO bus related to a
+ completion operation and the value read can range from 0x0 to a maximum of 0x40
+ which would represent completely full. For diagnostic use only.
+
+ Internal:
+ Maximum is 32 for 512b EBUS, 64 for 256b EBUS, 128 for 128b EBUS. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t n_data_volume : 8; /**< [ 19: 12](RO/H) Reports the number of valid entries currently held in the EBO non-posted
+ offloading FIFO. Each entry represents a beat of the EBO bus related to a
+ Non-Posted operation and the value read can range from 0x0 to a maximum of 0x40
+ which would represent completely full.
+ For diagnostic use only.
+
+ Internal:
+ Maximum is 32 for 512b EBUS, 64 for 256b EBUS, 128 for 128b EBUS. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t p_data_volume : 8; /**< [ 7: 0](RO/H) Reports the number of valid entries currently held in the EBO posted offloading
+ FIFO. Each entry represents a beat of the EBO bus related to a memory store and
+ the value read can range from 0x0 to a maximum of 0x40 which would represent
+ completely full.
+ For diagnostic use only.
+
+ Internal:
+ Maximum is 32 for 512b EBUS, 64 for 256b EBUS, 128 for 128b EBUS. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_data_volume : 8; /**< [ 7: 0](RO/H) Reports the number of valid entries currently held in the EBO posted offloading
+ FIFO. Each entry represents a beat of the EBO bus related to a memory store and
+ the value read can range from 0x0 to a maximum of 0x40 which would represent
+ completely full.
+ For diagnostic use only.
+
+ Internal:
+ Maximum is 32 for 512b EBUS, 64 for 256b EBUS, 128 for 128b EBUS. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t n_data_volume : 8; /**< [ 19: 12](RO/H) Reports the number of valid entries currently held in the EBO non-posted
+ offloading FIFO. Each entry represents a beat of the EBO bus related to a
+ Non-Posted operation and the value read can range from 0x0 to a maximum of 0x40
+ which would represent completely full.
+ For diagnostic use only.
+
+ Internal:
+ Maximum is 32 for 512b EBUS, 64 for 256b EBUS, 128 for 128b EBUS. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t c_data_volume : 8; /**< [ 31: 24](RO/H) Reports the number of valid EBO completion data beats currently held in the
+ EBO completion buffer. Each entry represents a beat of the EBO bus related to a
+ completion operation and the value read can range from 0x0 to a maximum of 0x40
+ which would represent completely full. For diagnostic use only.
+
+ Internal:
+ Maximum is 32 for 512b EBUS, 64 for 256b EBUS, 128 for 128b EBUS. */
+ uint64_t p_cmd_volume : 6; /**< [ 37: 32](RO/H) Reports the number of valid entries currently held in the EBO posted offloading
+ FIFO. Each entry represents a beat of the EBO bus related to a memory store and
+ the value read can range from 0x0 to a maximum of 0x20 which would represent
+ completely full.
+ For diagnostic use only. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t n_cmd_volume : 6; /**< [ 45: 40](RO/H) Reports the number of valid entries currently held in the EBO non-posted
+ offloading FIFO. Each entry represents a beat of the EBO bus related to a
+ Non-Posted operation and the value read can range from 0x0 to a maximum of 0x20
+ which would represent completely full.
+ For diagnostic use only. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c_cmd_volume : 6; /**< [ 53: 48](RO/H) Reports the number of valid EBO completion data beats currently held in the
+ EBO completion buffer. Each entry represents a beat of the EBO bus related to a
+ completion operation and the value read can range from 0x0 to a maximum of 0x20
+ which would represent completely full. For diagnostic use only. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ebo_fifo_status_s cn; */
+};
+typedef union bdk_pemx_ebo_fifo_status bdk_pemx_ebo_fifo_status_t;
+
+static inline uint64_t BDK_PEMX_EBO_FIFO_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_EBO_FIFO_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000130ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_EBO_FIFO_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_EBO_FIFO_STATUS(a) bdk_pemx_ebo_fifo_status_t
+#define bustype_BDK_PEMX_EBO_FIFO_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_EBO_FIFO_STATUS(a) "PEMX_EBO_FIFO_STATUS"
+#define device_bar_BDK_PEMX_EBO_FIFO_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_EBO_FIFO_STATUS(a) (a)
+#define arguments_BDK_PEMX_EBO_FIFO_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ebus_ctl
+ *
+ * PEM EBUS Control Register
+ * This register contains EBUS related control bits.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_ebus_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_ebus_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t ebo_stf : 1; /**< [ 32: 32](R/W) If set, force store and forward mode for offloading FIFOs on outbound EBUS.
+ This might be useful in a system with an EBUS which cannot keep up with
+ the PCIe link bandwidth (e.g. 128B EBUS or many idle cycles on EBUS) where
+ idle cycles introduced in the packet stream could interfere with NCBO
+ performance. In general it should not be set. */
+ uint64_t reserved_16_31 : 16;
+ uint64_t clken_force : 1; /**< [ 15: 15](R/W) Force clock enable on inbound EBUS to be always asserted. For diagnostic use only. */
+ uint64_t erom_sel : 1; /**< [ 14: 14](R/W) If set, inbound PF EROM BAR accesses are directed to EBUS instead of NCB. This
+ must be clear when the PEM is configured for RC mode.
+
+ For CNXXXX this bit must be clear. */
+ uint64_t vf_bar2_sel : 1; /**< [ 13: 13](RO) If set, inbound VF BAR2 accesses are directed to EBUS instead of NCB. This bit is
+ hard-coded to 1. */
+ uint64_t vf_bar4_sel : 1; /**< [ 12: 12](RO) If set, inbound VF BAR4 accesses are directed to EBUS instead of NCB. This bit is
+ hard-coded to 1. */
+ uint64_t vf_bar0_sel : 1; /**< [ 11: 11](RO) If set, inbound VF BAR0 accesses are directed to EBUS instead of NCB. This bit is
+ hard-coded to 1. */
+ uint64_t pf_bar2_sel : 1; /**< [ 10: 10](R/W) If set, inbound PF BAR2 accesses are directed to EBUS instead of NCB. In RC mode,
+ registers PEM()_P2N_BAR2_START / PEM()_BAR_CTL[BAR2_SIZ] are used to determine a BAR2 hit
+ rather than standard PCIe config registers.
+
+ For CNXXXX this bit must be clear. */
+ uint64_t pf_bar4_sel : 1; /**< [ 9: 9](R/W) If set, inbound PF BAR4 accesses are directed to EBUS instead of NCB. In RC mode,
+ registers PEM()_P2N_BAR4_START / PEM()_BAR_CTL[BAR4_SIZ] are used to determine a BAR4 hit
+ rather than standard PCIe config registers.
+
+ For CNXXXX this bit must be clear. */
+ uint64_t pf_bar0_sel : 1; /**< [ 8: 8](R/W) If set, inbound PF BAR0 accesses are directed to EBUS instead of NCB. In RC mode,
+ registers PEM()_P2N_BAR0_START / PEM()_BAR_CTL[BAR0_SIZ] are used to determine a BAR0 hit
+ rather than standard PCIe config registers.
+
+ For CNXXXX this bit must be set. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ntlp_ro_dis : 1; /**< [ 5: 5](R/W) Relaxed ordering disable for non-posted TLPs. Forces relaxed ordering bit off
+ when incoming non-posted TLPs arrive targeting EBUS. */
+ uint64_t inv_par : 1; /**< [ 4: 4](R/W) When set, causes the parity bit on inbound EBUS to be inverted. This bit is for
+ debug only. */
+ uint64_t atomic_dis : 1; /**< [ 3: 3](R/W) If set, incoming atomics targeting EBUS are discarded and a completion with
+ status of unsupported request is returned to the sender.
+
+ This bit must be set. */
+ uint64_t ctlp_ro_dis : 1; /**< [ 2: 2](R/W) Relaxed ordering disable for completion TLPs. Forces relaxed ordering bit off
+ when incoming completion TLPs arrive targeting EBUS. */
+ uint64_t ptlp_ro_dis : 1; /**< [ 1: 1](R/W) Relaxed ordering disable for posted TLPs. Forces relaxed ordering bit off when
+ incoming posted TLPs arrive targeting EBUS. */
+ uint64_t vdm_dis : 1; /**< [ 0: 0](R/W) If set, incoming vendor defined messages from PCIe will be discarded rather than
+ forwarded on EBUS.
+
+ For CNXXXX this bit must be set. */
+#else /* Word 0 - Little Endian */
+ uint64_t vdm_dis : 1; /**< [ 0: 0](R/W) If set, incoming vendor defined messages from PCIe will be discarded rather than
+ forwarded on EBUS.
+
+ For CNXXXX this bit must be set. */
+ uint64_t ptlp_ro_dis : 1; /**< [ 1: 1](R/W) Relaxed ordering disable for posted TLPs. Forces relaxed ordering bit off when
+ incoming posted TLPs arrive targeting EBUS. */
+ uint64_t ctlp_ro_dis : 1; /**< [ 2: 2](R/W) Relaxed ordering disable for completion TLPs. Forces relaxed ordering bit off
+ when incoming completion TLPs arrive targeting EBUS. */
+ uint64_t atomic_dis : 1; /**< [ 3: 3](R/W) If set, incoming atomics targeting EBUS are discarded and a completion with
+ status of unsupported request is returned to the sender.
+
+ This bit must be set. */
+ uint64_t inv_par : 1; /**< [ 4: 4](R/W) When set, causes the parity bit on inbound EBUS to be inverted. This bit is for
+ debug only. */
+ uint64_t ntlp_ro_dis : 1; /**< [ 5: 5](R/W) Relaxed ordering disable for non-posted TLPs. Forces relaxed ordering bit off
+ when incoming non-posted TLPs arrive targeting EBUS. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pf_bar0_sel : 1; /**< [ 8: 8](R/W) If set, inbound PF BAR0 accesses are directed to EBUS instead of NCB. In RC mode,
+ registers PEM()_P2N_BAR0_START / PEM()_BAR_CTL[BAR0_SIZ] are used to determine a BAR0 hit
+ rather than standard PCIe config registers.
+
+ For CNXXXX this bit must be set. */
+ uint64_t pf_bar4_sel : 1; /**< [ 9: 9](R/W) If set, inbound PF BAR4 accesses are directed to EBUS instead of NCB. In RC mode,
+ registers PEM()_P2N_BAR4_START / PEM()_BAR_CTL[BAR4_SIZ] are used to determine a BAR4 hit
+ rather than standard PCIe config registers.
+
+ For CNXXXX this bit must be clear. */
+ uint64_t pf_bar2_sel : 1; /**< [ 10: 10](R/W) If set, inbound PF BAR2 accesses are directed to EBUS instead of NCB. In RC mode,
+ registers PEM()_P2N_BAR2_START / PEM()_BAR_CTL[BAR2_SIZ] are used to determine a BAR2 hit
+ rather than standard PCIe config registers.
+
+ For CNXXXX this bit must be clear. */
+ uint64_t vf_bar0_sel : 1; /**< [ 11: 11](RO) If set, inbound VF BAR0 accesses are directed to EBUS instead of NCB. This bit is
+ hard-coded to 1. */
+ uint64_t vf_bar4_sel : 1; /**< [ 12: 12](RO) If set, inbound VF BAR4 accesses are directed to EBUS instead of NCB. This bit is
+ hard-coded to 1. */
+ uint64_t vf_bar2_sel : 1; /**< [ 13: 13](RO) If set, inbound VF BAR2 accesses are directed to EBUS instead of NCB. This bit is
+ hard-coded to 1. */
+ uint64_t erom_sel : 1; /**< [ 14: 14](R/W) If set, inbound PF EROM BAR accesses are directed to EBUS instead of NCB. This
+ must be clear when the PEM is configured for RC mode.
+
+ For CNXXXX this bit must be clear. */
+ uint64_t clken_force : 1; /**< [ 15: 15](R/W) Force clock enable on inbound EBUS to be always asserted. For diagnostic use only. */
+ uint64_t reserved_16_31 : 16;
+ uint64_t ebo_stf : 1; /**< [ 32: 32](R/W) If set, force store and forward mode for offloading FIFOs on outbound EBUS.
+ This might be useful in a system with an EBUS which cannot keep up with
+ the PCIe link bandwidth (e.g. 128B EBUS or many idle cycles on EBUS) where
+ idle cycles introduced in the packet stream could interfere with NCBO
+ performance. In general it should not be set. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ebus_ctl_s cn; */
+};
+typedef union bdk_pemx_ebus_ctl bdk_pemx_ebus_ctl_t;
+
+static inline uint64_t BDK_PEMX_EBUS_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_EBUS_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000078ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_EBUS_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_EBUS_CTL(a) bdk_pemx_ebus_ctl_t
+#define bustype_BDK_PEMX_EBUS_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_EBUS_CTL(a) "PEMX_EBUS_CTL"
+#define device_bar_BDK_PEMX_EBUS_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_EBUS_CTL(a) (a)
+#define arguments_BDK_PEMX_EBUS_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_ecc_ena
+ *
+ * PEM ECC Enable Register
+ * Contains enables for TLP FIFO ECC RAMs.
+ */
+union bdk_pemx_ecc_ena
+{
+ uint64_t u;
+ struct bdk_pemx_ecc_ena_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t qhdr_b1_ena : 1; /**< [ 34: 34](R/W) ECC enable for Core's Q HDR Bank1 RAM. */
+ uint64_t qhdr_b0_ena : 1; /**< [ 33: 33](R/W) ECC enable for Core's Q HDR Bank0 RAM. */
+ uint64_t rtry_ena : 1; /**< [ 32: 32](R/W) ECC enable for Core's RETRY RA. */
+ uint64_t reserved_11_31 : 21;
+ uint64_t m2s_c_ena : 1; /**< [ 10: 10](R/W) ECC enable for M2S Control FIFO. */
+ uint64_t m2s_d_ena : 1; /**< [ 9: 9](R/W) ECC enable for M2S Data FIFO. */
+ uint64_t c_c_ena : 1; /**< [ 8: 8](R/W) ECC enable for TLP CPL control FIFO. */
+ uint64_t c_d1_ena : 1; /**< [ 7: 7](R/W) ECC enable for TLP CPL data1 FIFO. */
+ uint64_t c_d0_ena : 1; /**< [ 6: 6](R/W) ECC enable for TLP CPL data0 FIFO. */
+ uint64_t n_c_ena : 1; /**< [ 5: 5](R/W) ECC enable for TLP NP control FIFO. */
+ uint64_t n_d1_ena : 1; /**< [ 4: 4](R/W) ECC enable for TLP NP data1 FIFO. */
+ uint64_t n_d0_ena : 1; /**< [ 3: 3](R/W) ECC enable for TLP NP data0 FIFO. */
+ uint64_t p_c_ena : 1; /**< [ 2: 2](R/W) ECC enable for TLP posted control FIFO. */
+ uint64_t p_d1_ena : 1; /**< [ 1: 1](R/W) ECC enable for TLP posted data1 FIFO. */
+ uint64_t p_d0_ena : 1; /**< [ 0: 0](R/W) ECC enable for TLP posted data0 FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_d0_ena : 1; /**< [ 0: 0](R/W) ECC enable for TLP posted data0 FIFO. */
+ uint64_t p_d1_ena : 1; /**< [ 1: 1](R/W) ECC enable for TLP posted data1 FIFO. */
+ uint64_t p_c_ena : 1; /**< [ 2: 2](R/W) ECC enable for TLP posted control FIFO. */
+ uint64_t n_d0_ena : 1; /**< [ 3: 3](R/W) ECC enable for TLP NP data0 FIFO. */
+ uint64_t n_d1_ena : 1; /**< [ 4: 4](R/W) ECC enable for TLP NP data1 FIFO. */
+ uint64_t n_c_ena : 1; /**< [ 5: 5](R/W) ECC enable for TLP NP control FIFO. */
+ uint64_t c_d0_ena : 1; /**< [ 6: 6](R/W) ECC enable for TLP CPL data0 FIFO. */
+ uint64_t c_d1_ena : 1; /**< [ 7: 7](R/W) ECC enable for TLP CPL data1 FIFO. */
+ uint64_t c_c_ena : 1; /**< [ 8: 8](R/W) ECC enable for TLP CPL control FIFO. */
+ uint64_t m2s_d_ena : 1; /**< [ 9: 9](R/W) ECC enable for M2S Data FIFO. */
+ uint64_t m2s_c_ena : 1; /**< [ 10: 10](R/W) ECC enable for M2S Control FIFO. */
+ uint64_t reserved_11_31 : 21;
+ uint64_t rtry_ena : 1; /**< [ 32: 32](R/W) ECC enable for Core's RETRY RA. */
+ uint64_t qhdr_b0_ena : 1; /**< [ 33: 33](R/W) ECC enable for Core's Q HDR Bank0 RAM. */
+ uint64_t qhdr_b1_ena : 1; /**< [ 34: 34](R/W) ECC enable for Core's Q HDR Bank1 RAM. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_ecc_ena_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t qhdr_b1_ena : 1; /**< [ 34: 34](R/W) ECC enable for Core's Q HDR Bank1 RAM. */
+ uint64_t qhdr_b0_ena : 1; /**< [ 33: 33](R/W) ECC enable for Core's Q HDR Bank0 RAM. */
+ uint64_t rtry_ena : 1; /**< [ 32: 32](R/W) ECC enable for Core's RETRY RA. */
+ uint64_t reserved_9_31 : 23;
+ uint64_t c_c_ena : 1; /**< [ 8: 8](R/W) ECC enable for TLP CPL control FIFO. */
+ uint64_t c_d1_ena : 1; /**< [ 7: 7](R/W) ECC enable for TLP CPL data1 FIFO. */
+ uint64_t c_d0_ena : 1; /**< [ 6: 6](R/W) ECC enable for TLP CPL data0 FIFO. */
+ uint64_t n_c_ena : 1; /**< [ 5: 5](R/W) ECC enable for TLP NP control FIFO. */
+ uint64_t n_d1_ena : 1; /**< [ 4: 4](R/W) ECC enable for TLP NP data1 FIFO. */
+ uint64_t n_d0_ena : 1; /**< [ 3: 3](R/W) ECC enable for TLP NP data0 FIFO. */
+ uint64_t p_c_ena : 1; /**< [ 2: 2](R/W) ECC enable for TLP posted control FIFO. */
+ uint64_t p_d1_ena : 1; /**< [ 1: 1](R/W) ECC enable for TLP posted data1 FIFO. */
+ uint64_t p_d0_ena : 1; /**< [ 0: 0](R/W) ECC enable for TLP posted data0 FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_d0_ena : 1; /**< [ 0: 0](R/W) ECC enable for TLP posted data0 FIFO. */
+ uint64_t p_d1_ena : 1; /**< [ 1: 1](R/W) ECC enable for TLP posted data1 FIFO. */
+ uint64_t p_c_ena : 1; /**< [ 2: 2](R/W) ECC enable for TLP posted control FIFO. */
+ uint64_t n_d0_ena : 1; /**< [ 3: 3](R/W) ECC enable for TLP NP data0 FIFO. */
+ uint64_t n_d1_ena : 1; /**< [ 4: 4](R/W) ECC enable for TLP NP data1 FIFO. */
+ uint64_t n_c_ena : 1; /**< [ 5: 5](R/W) ECC enable for TLP NP control FIFO. */
+ uint64_t c_d0_ena : 1; /**< [ 6: 6](R/W) ECC enable for TLP CPL data0 FIFO. */
+ uint64_t c_d1_ena : 1; /**< [ 7: 7](R/W) ECC enable for TLP CPL data1 FIFO. */
+ uint64_t c_c_ena : 1; /**< [ 8: 8](R/W) ECC enable for TLP CPL control FIFO. */
+ uint64_t reserved_9_31 : 23;
+ uint64_t rtry_ena : 1; /**< [ 32: 32](R/W) ECC enable for Core's RETRY RA. */
+ uint64_t qhdr_b0_ena : 1; /**< [ 33: 33](R/W) ECC enable for Core's Q HDR Bank0 RAM. */
+ uint64_t qhdr_b1_ena : 1; /**< [ 34: 34](R/W) ECC enable for Core's Q HDR Bank1 RAM. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_ecc_ena_cn81xx cn88xx; */
+ struct bdk_pemx_ecc_ena_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t m2s_c_ena : 1; /**< [ 10: 10](R/W) ECC enable for M2S Control FIFO. */
+ uint64_t m2s_d_ena : 1; /**< [ 9: 9](R/W) ECC enable for M2S Data FIFO. */
+ uint64_t c_c_ena : 1; /**< [ 8: 8](R/W) ECC enable for TLP CPL control FIFO. */
+ uint64_t c_d1_ena : 1; /**< [ 7: 7](R/W) ECC enable for TLP CPL data1 FIFO. */
+ uint64_t c_d0_ena : 1; /**< [ 6: 6](R/W) ECC enable for TLP CPL data0 FIFO. */
+ uint64_t n_c_ena : 1; /**< [ 5: 5](R/W) ECC enable for TLP NP control FIFO. */
+ uint64_t n_d1_ena : 1; /**< [ 4: 4](R/W) ECC enable for TLP NP data1 FIFO. */
+ uint64_t n_d0_ena : 1; /**< [ 3: 3](R/W) ECC enable for TLP NP data0 FIFO. */
+ uint64_t p_c_ena : 1; /**< [ 2: 2](R/W) ECC enable for TLP posted control FIFO. */
+ uint64_t p_d1_ena : 1; /**< [ 1: 1](R/W) ECC enable for TLP posted data1 FIFO. */
+ uint64_t p_d0_ena : 1; /**< [ 0: 0](R/W) ECC enable for TLP posted data0 FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_d0_ena : 1; /**< [ 0: 0](R/W) ECC enable for TLP posted data0 FIFO. */
+ uint64_t p_d1_ena : 1; /**< [ 1: 1](R/W) ECC enable for TLP posted data1 FIFO. */
+ uint64_t p_c_ena : 1; /**< [ 2: 2](R/W) ECC enable for TLP posted control FIFO. */
+ uint64_t n_d0_ena : 1; /**< [ 3: 3](R/W) ECC enable for TLP NP data0 FIFO. */
+ uint64_t n_d1_ena : 1; /**< [ 4: 4](R/W) ECC enable for TLP NP data1 FIFO. */
+ uint64_t n_c_ena : 1; /**< [ 5: 5](R/W) ECC enable for TLP NP control FIFO. */
+ uint64_t c_d0_ena : 1; /**< [ 6: 6](R/W) ECC enable for TLP CPL data0 FIFO. */
+ uint64_t c_d1_ena : 1; /**< [ 7: 7](R/W) ECC enable for TLP CPL data1 FIFO. */
+ uint64_t c_c_ena : 1; /**< [ 8: 8](R/W) ECC enable for TLP CPL control FIFO. */
+ uint64_t m2s_d_ena : 1; /**< [ 9: 9](R/W) ECC enable for M2S Data FIFO. */
+ uint64_t m2s_c_ena : 1; /**< [ 10: 10](R/W) ECC enable for M2S Control FIFO. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_ecc_ena bdk_pemx_ecc_ena_t;
+
+static inline uint64_t BDK_PEMX_ECC_ENA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_ECC_ENA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000470ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000470ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000470ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_ECC_ENA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_ECC_ENA(a) bdk_pemx_ecc_ena_t
+#define bustype_BDK_PEMX_ECC_ENA(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_ECC_ENA(a) "PEMX_ECC_ENA"
+#define device_bar_BDK_PEMX_ECC_ENA(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_ECC_ENA(a) (a)
+#define arguments_BDK_PEMX_ECC_ENA(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_ecc_synd_ctrl
+ *
+ * PEM ECC Syndrome Control Register
+ * This register contains syndrome control for TLP FIFO ECC RAMs.
+ */
+union bdk_pemx_ecc_synd_ctrl
+{
+ uint64_t u;
+ struct bdk_pemx_ecc_synd_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t qhdr_b1_syn : 2; /**< [ 37: 36](R/W) Syndrome flip bits for Core's Q HDR Bank1 RAM. */
+ uint64_t qhdr_b0_syn : 2; /**< [ 35: 34](R/W) Syndrome flip bits for Core's Q HDR Bank0 RAM. */
+ uint64_t rtry_syn : 2; /**< [ 33: 32](R/W) Syndrome flip bits for Core's RETRY RAM. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t m2s_c_syn : 2; /**< [ 21: 20](R/W) Syndrome flip bits for M2S Control FIFO. */
+ uint64_t m2s_d_syn : 2; /**< [ 19: 18](R/W) Syndrome flip bits for M2S Data FIFO. */
+ uint64_t c_c_syn : 2; /**< [ 17: 16](R/W) Syndrome flip bits for TLP CPL control FIFO. */
+ uint64_t c_d1_syn : 2; /**< [ 15: 14](R/W) Syndrome flip bits for TLP CPL data1 FIFO. */
+ uint64_t c_d0_syn : 2; /**< [ 13: 12](R/W) Syndrome flip bits for TLP CPL data0 FIFO. */
+ uint64_t n_c_syn : 2; /**< [ 11: 10](R/W) Syndrome flip bits for TLP NP control FIFO. */
+ uint64_t n_d1_syn : 2; /**< [ 9: 8](R/W) Syndrome flip bits for TLP NP data1 FIFO. */
+ uint64_t n_d0_syn : 2; /**< [ 7: 6](R/W) Syndrome flip bits for TLP NP data0 FIFO. */
+ uint64_t p_c_syn : 2; /**< [ 5: 4](R/W) Syndrome flip bits for TLP posted control FIFO. */
+ uint64_t p_d1_syn : 2; /**< [ 3: 2](R/W) Syndrome flip bits for TLP posted data1 FIFO. */
+ uint64_t p_d0_syn : 2; /**< [ 1: 0](R/W) Syndrome flip bits for TLP posted data0 FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_d0_syn : 2; /**< [ 1: 0](R/W) Syndrome flip bits for TLP posted data0 FIFO. */
+ uint64_t p_d1_syn : 2; /**< [ 3: 2](R/W) Syndrome flip bits for TLP posted data1 FIFO. */
+ uint64_t p_c_syn : 2; /**< [ 5: 4](R/W) Syndrome flip bits for TLP posted control FIFO. */
+ uint64_t n_d0_syn : 2; /**< [ 7: 6](R/W) Syndrome flip bits for TLP NP data0 FIFO. */
+ uint64_t n_d1_syn : 2; /**< [ 9: 8](R/W) Syndrome flip bits for TLP NP data1 FIFO. */
+ uint64_t n_c_syn : 2; /**< [ 11: 10](R/W) Syndrome flip bits for TLP NP control FIFO. */
+ uint64_t c_d0_syn : 2; /**< [ 13: 12](R/W) Syndrome flip bits for TLP CPL data0 FIFO. */
+ uint64_t c_d1_syn : 2; /**< [ 15: 14](R/W) Syndrome flip bits for TLP CPL data1 FIFO. */
+ uint64_t c_c_syn : 2; /**< [ 17: 16](R/W) Syndrome flip bits for TLP CPL control FIFO. */
+ uint64_t m2s_d_syn : 2; /**< [ 19: 18](R/W) Syndrome flip bits for M2S Data FIFO. */
+ uint64_t m2s_c_syn : 2; /**< [ 21: 20](R/W) Syndrome flip bits for M2S Control FIFO. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t rtry_syn : 2; /**< [ 33: 32](R/W) Syndrome flip bits for Core's RETRY RAM. */
+ uint64_t qhdr_b0_syn : 2; /**< [ 35: 34](R/W) Syndrome flip bits for Core's Q HDR Bank0 RAM. */
+ uint64_t qhdr_b1_syn : 2; /**< [ 37: 36](R/W) Syndrome flip bits for Core's Q HDR Bank1 RAM. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_ecc_synd_ctrl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t qhdr_b1_syn : 2; /**< [ 37: 36](R/W) Syndrome flip bits for Core's Q HDR Bank1 RAM. */
+ uint64_t qhdr_b0_syn : 2; /**< [ 35: 34](R/W) Syndrome flip bits for Core's Q HDR Bank0 RAM. */
+ uint64_t rtry_syn : 2; /**< [ 33: 32](R/W) Syndrome flip bits for Core's RETRY RAM. */
+ uint64_t reserved_18_31 : 14;
+ uint64_t c_c_syn : 2; /**< [ 17: 16](R/W) Syndrome flip bits for TLP CPL control FIFO. */
+ uint64_t c_d1_syn : 2; /**< [ 15: 14](R/W) Syndrome flip bits for TLP CPL data1 FIFO. */
+ uint64_t c_d0_syn : 2; /**< [ 13: 12](R/W) Syndrome flip bits for TLP CPL data0 FIFO. */
+ uint64_t n_c_syn : 2; /**< [ 11: 10](R/W) Syndrome flip bits for TLP NP control FIFO. */
+ uint64_t n_d1_syn : 2; /**< [ 9: 8](R/W) Syndrome flip bits for TLP NP data1 FIFO. */
+ uint64_t n_d0_syn : 2; /**< [ 7: 6](R/W) Syndrome flip bits for TLP NP data0 FIFO. */
+ uint64_t p_c_syn : 2; /**< [ 5: 4](R/W) Syndrome flip bits for TLP posted control FIFO. */
+ uint64_t p_d1_syn : 2; /**< [ 3: 2](R/W) Syndrome flip bits for TLP posted data1 FIFO. */
+ uint64_t p_d0_syn : 2; /**< [ 1: 0](R/W) Syndrome flip bits for TLP posted data0 FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_d0_syn : 2; /**< [ 1: 0](R/W) Syndrome flip bits for TLP posted data0 FIFO. */
+ uint64_t p_d1_syn : 2; /**< [ 3: 2](R/W) Syndrome flip bits for TLP posted data1 FIFO. */
+ uint64_t p_c_syn : 2; /**< [ 5: 4](R/W) Syndrome flip bits for TLP posted control FIFO. */
+ uint64_t n_d0_syn : 2; /**< [ 7: 6](R/W) Syndrome flip bits for TLP NP data0 FIFO. */
+ uint64_t n_d1_syn : 2; /**< [ 9: 8](R/W) Syndrome flip bits for TLP NP data1 FIFO. */
+ uint64_t n_c_syn : 2; /**< [ 11: 10](R/W) Syndrome flip bits for TLP NP control FIFO. */
+ uint64_t c_d0_syn : 2; /**< [ 13: 12](R/W) Syndrome flip bits for TLP CPL data0 FIFO. */
+ uint64_t c_d1_syn : 2; /**< [ 15: 14](R/W) Syndrome flip bits for TLP CPL data1 FIFO. */
+ uint64_t c_c_syn : 2; /**< [ 17: 16](R/W) Syndrome flip bits for TLP CPL control FIFO. */
+ uint64_t reserved_18_31 : 14;
+ uint64_t rtry_syn : 2; /**< [ 33: 32](R/W) Syndrome flip bits for Core's RETRY RAM. */
+ uint64_t qhdr_b0_syn : 2; /**< [ 35: 34](R/W) Syndrome flip bits for Core's Q HDR Bank0 RAM. */
+ uint64_t qhdr_b1_syn : 2; /**< [ 37: 36](R/W) Syndrome flip bits for Core's Q HDR Bank1 RAM. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_ecc_synd_ctrl_cn81xx cn88xx; */
+ struct bdk_pemx_ecc_synd_ctrl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_22_63 : 42;
+ uint64_t m2s_c_syn : 2; /**< [ 21: 20](R/W) Syndrome flip bits for M2S Control FIFO. */
+ uint64_t m2s_d_syn : 2; /**< [ 19: 18](R/W) Syndrome flip bits for M2S Data FIFO. */
+ uint64_t c_c_syn : 2; /**< [ 17: 16](R/W) Syndrome flip bits for TLP CPL control FIFO. */
+ uint64_t c_d1_syn : 2; /**< [ 15: 14](R/W) Syndrome flip bits for TLP CPL data1 FIFO. */
+ uint64_t c_d0_syn : 2; /**< [ 13: 12](R/W) Syndrome flip bits for TLP CPL data0 FIFO. */
+ uint64_t n_c_syn : 2; /**< [ 11: 10](R/W) Syndrome flip bits for TLP NP control FIFO. */
+ uint64_t n_d1_syn : 2; /**< [ 9: 8](R/W) Syndrome flip bits for TLP NP data1 FIFO. */
+ uint64_t n_d0_syn : 2; /**< [ 7: 6](R/W) Syndrome flip bits for TLP NP data0 FIFO. */
+ uint64_t p_c_syn : 2; /**< [ 5: 4](R/W) Syndrome flip bits for TLP posted control FIFO. */
+ uint64_t p_d1_syn : 2; /**< [ 3: 2](R/W) Syndrome flip bits for TLP posted data1 FIFO. */
+ uint64_t p_d0_syn : 2; /**< [ 1: 0](R/W) Syndrome flip bits for TLP posted data0 FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_d0_syn : 2; /**< [ 1: 0](R/W) Syndrome flip bits for TLP posted data0 FIFO. */
+ uint64_t p_d1_syn : 2; /**< [ 3: 2](R/W) Syndrome flip bits for TLP posted data1 FIFO. */
+ uint64_t p_c_syn : 2; /**< [ 5: 4](R/W) Syndrome flip bits for TLP posted control FIFO. */
+ uint64_t n_d0_syn : 2; /**< [ 7: 6](R/W) Syndrome flip bits for TLP NP data0 FIFO. */
+ uint64_t n_d1_syn : 2; /**< [ 9: 8](R/W) Syndrome flip bits for TLP NP data1 FIFO. */
+ uint64_t n_c_syn : 2; /**< [ 11: 10](R/W) Syndrome flip bits for TLP NP control FIFO. */
+ uint64_t c_d0_syn : 2; /**< [ 13: 12](R/W) Syndrome flip bits for TLP CPL data0 FIFO. */
+ uint64_t c_d1_syn : 2; /**< [ 15: 14](R/W) Syndrome flip bits for TLP CPL data1 FIFO. */
+ uint64_t c_c_syn : 2; /**< [ 17: 16](R/W) Syndrome flip bits for TLP CPL control FIFO. */
+ uint64_t m2s_d_syn : 2; /**< [ 19: 18](R/W) Syndrome flip bits for M2S Data FIFO. */
+ uint64_t m2s_c_syn : 2; /**< [ 21: 20](R/W) Syndrome flip bits for M2S Control FIFO. */
+ uint64_t reserved_22_63 : 42;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_ecc_synd_ctrl bdk_pemx_ecc_synd_ctrl_t;
+
+static inline uint64_t BDK_PEMX_ECC_SYND_CTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_ECC_SYND_CTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000478ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000478ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000478ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_ECC_SYND_CTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_ECC_SYND_CTRL(a) bdk_pemx_ecc_synd_ctrl_t
+#define bustype_BDK_PEMX_ECC_SYND_CTRL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_ECC_SYND_CTRL(a) "PEMX_ECC_SYND_CTRL"
+#define device_bar_BDK_PEMX_ECC_SYND_CTRL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_ECC_SYND_CTRL(a) (a)
+#define arguments_BDK_PEMX_ECC_SYND_CTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_eco
+ *
+ * INTERNAL: PEM ECO Register
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_eco
+{
+ uint64_t u;
+ struct bdk_pemx_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t eco_rw : 8; /**< [ 7: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 8; /**< [ 7: 0](R/W) Internal:
+ Reserved for ECO usage. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_eco_s cn; */
+};
+typedef union bdk_pemx_eco bdk_pemx_eco_t;
+
+static inline uint64_t BDK_PEMX_ECO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_ECO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000010ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000008ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_ECO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_ECO(a) bdk_pemx_eco_t
+#define bustype_BDK_PEMX_ECO(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_ECO(a) "PEMX_ECO"
+#define device_bar_BDK_PEMX_ECO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_ECO(a) (a)
+#define arguments_BDK_PEMX_ECO(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_end_merge
+ *
+ * PEM End Merge Register
+ * Any access (read or write) to this register over NCBO will create a merging barrier
+ * for both the write and read streams within PEM outbound pipelines such that no NCBO
+ * reads or writes received after this register's access will merge with any NCBO accesses
+ * that occurred prior to this register's access. Note that RSL accesses to this register
+ * will have no effect on merging.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_end_merge
+{
+ uint64_t u;
+ struct bdk_pemx_end_merge_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_end_merge_s cn; */
+};
+typedef union bdk_pemx_end_merge bdk_pemx_end_merge_t;
+
+static inline uint64_t BDK_PEMX_END_MERGE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_END_MERGE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000178ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_END_MERGE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_END_MERGE(a) bdk_pemx_end_merge_t
+#define bustype_BDK_PEMX_END_MERGE(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_END_MERGE(a) "PEMX_END_MERGE"
+#define device_bar_BDK_PEMX_END_MERGE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_END_MERGE(a) (a)
+#define arguments_BDK_PEMX_END_MERGE(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_erom#
+ *
+ * PEM Expansion ROM Registers
+ * This register accesses the external EEPROM.
+ */
+union bdk_pemx_eromx
+{
+ uint64_t u;
+ struct bdk_pemx_eromx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t erom : 64; /**< [ 63: 0](R/W/H) PCIe express read transactions to BAR3 (through PCIEEP()_CFG012) will appear as
+ 8-byte RSL reads to this register.
+
+ Although 512 KB is advertised from PCIEEP()_CFG012, only the first 448 KB is
+ actually accessible, and reads above 448 KB will return zeros, writes are NOP.
+
+ Accessible through PEM2 if EP PEM0 is an RC, otherwise accessible through PEM0.
+ Access from a PEM that doesn't own the EEPROM will return fault. */
+#else /* Word 0 - Little Endian */
+ uint64_t erom : 64; /**< [ 63: 0](R/W/H) PCIe express read transactions to BAR3 (through PCIEEP()_CFG012) will appear as
+ 8-byte RSL reads to this register.
+
+ Although 512 KB is advertised from PCIEEP()_CFG012, only the first 448 KB is
+ actually accessible, and reads above 448 KB will return zeros, writes are NOP.
+
+ Accessible through PEM2 if EP PEM0 is an RC, otherwise accessible through PEM0.
+ Access from a PEM that doesn't own the EEPROM will return fault. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_eromx_s cn; */
+};
+typedef union bdk_pemx_eromx bdk_pemx_eromx_t;
+
+static inline uint64_t BDK_PEMX_EROMX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_EROMX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=65535)))
+ return 0x87e0c0080000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xffff);
+ __bdk_csr_fatal("PEMX_EROMX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_EROMX(a,b) bdk_pemx_eromx_t
+#define bustype_BDK_PEMX_EROMX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_EROMX(a,b) "PEMX_EROMX"
+#define device_bar_BDK_PEMX_EROMX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_EROMX(a,b) (a)
+#define arguments_BDK_PEMX_EROMX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_erom_bar_addr
+ *
+ * PEM EROM BAR Address Register
+ * This register configures PEM EROM BAR accesses targeted at NCBI.
+ * Fields in this register are only used when PEM()_EBUS_CTL[EROM_SEL]
+ * is clear and the PEM is configured for EP mode.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_erom_bar_addr
+{
+ uint64_t u;
+ struct bdk_pemx_erom_bar_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wvirt : 1; /**< [ 63: 63](R/W) Virtual:
+ 0 = [RD_ADDR] is a physical addresses.
+ 1 = [RD_ADDR] is a virtual address. */
+ uint64_t pspi_en : 1; /**< [ 62: 62](R/W) If PEM()_EBUS_CTL[EROM_SEL] is clear, PEM is configured for EP mode, and
+ [PSPI_EN] is set, this bit directs EROM BAR hits to a private bus connected
+ to the PSPI interface in MIO rather than NCB. */
+ uint64_t reserved_53_61 : 9;
+ uint64_t rd_addr : 37; /**< [ 52: 16](R/W) Base address for PEM EROM BAR transactions that is appended to the offset. This
+ field is only used when PEM()_EBUS_CTL[EROM_SEL] is clear, and PEM is configured for EP mode. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t rd_addr : 37; /**< [ 52: 16](R/W) Base address for PEM EROM BAR transactions that is appended to the offset. This
+ field is only used when PEM()_EBUS_CTL[EROM_SEL] is clear, and PEM is configured for EP mode. */
+ uint64_t reserved_53_61 : 9;
+ uint64_t pspi_en : 1; /**< [ 62: 62](R/W) If PEM()_EBUS_CTL[EROM_SEL] is clear, PEM is configured for EP mode, and
+ [PSPI_EN] is set, this bit directs EROM BAR hits to a private bus connected
+ to the PSPI interface in MIO rather than NCB. */
+ uint64_t wvirt : 1; /**< [ 63: 63](R/W) Virtual:
+ 0 = [RD_ADDR] is a physical addresses.
+ 1 = [RD_ADDR] is a virtual address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_erom_bar_addr_s cn; */
+};
+typedef union bdk_pemx_erom_bar_addr bdk_pemx_erom_bar_addr_t;
+
+static inline uint64_t BDK_PEMX_EROM_BAR_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_EROM_BAR_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000150ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_EROM_BAR_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_EROM_BAR_ADDR(a) bdk_pemx_erom_bar_addr_t
+#define bustype_BDK_PEMX_EROM_BAR_ADDR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_EROM_BAR_ADDR(a) "PEMX_EROM_BAR_ADDR"
+#define device_bar_BDK_PEMX_EROM_BAR_ADDR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_EROM_BAR_ADDR(a) (a)
+#define arguments_BDK_PEMX_EROM_BAR_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_flr_ctl
+ *
+ * PEM FLR Control Register
+ * This register provides function level reset controls.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC cold reset.
+ */
+union bdk_pemx_flr_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_flr_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t chge : 1; /**< [ 5: 5](R/W) When set, the default 25 ms expiration of the function level reset
+ global counter can be changed. */
+ uint64_t inc : 1; /**< [ 4: 4](R/W) When [CHGE] is set, this bit determines if the 25 ms expiration of the function
+ level reset global counter will be increased (set) or decreased (not set). */
+ uint64_t delta : 2; /**< [ 3: 2](R/W) When [CHGE] is set, this field determines the delta time to increase/decrease
+ the 25 ms expiration of the function level reset global counter.
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 4 ms.
+ 0x3 = 8 ms. */
+ uint64_t timer_ctl : 2; /**< [ 1: 0](R/W) Each FLR indication can be cleared within 66-99 ms by use of a timer. Controls how
+ FLR indication is cleared:
+ 0x0 = PEM()_FLR_REQ* can be used to clear the FLR indication, if not written before
+ timer expires, the timer will auto-clear FLR.
+ 0x1 = PEM()_FLR_REQ* must be used to clear the FLR indication, timers are not used.
+ 0x2 = Only timers are used, PEM()_FLR_REQ* is ignored.
+ 0x3 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t timer_ctl : 2; /**< [ 1: 0](R/W) Each FLR indication can be cleared within 66-99 ms by use of a timer. Controls how
+ FLR indication is cleared:
+ 0x0 = PEM()_FLR_REQ* can be used to clear the FLR indication, if not written before
+ timer expires, the timer will auto-clear FLR.
+ 0x1 = PEM()_FLR_REQ* must be used to clear the FLR indication, timers are not used.
+ 0x2 = Only timers are used, PEM()_FLR_REQ* is ignored.
+ 0x3 = Reserved. */
+ uint64_t delta : 2; /**< [ 3: 2](R/W) When [CHGE] is set, this field determines the delta time to increase/decrease
+ the 25 ms expiration of the function level reset global counter.
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 4 ms.
+ 0x3 = 8 ms. */
+ uint64_t inc : 1; /**< [ 4: 4](R/W) When [CHGE] is set, this bit determines if the 25 ms expiration of the function
+ level reset global counter will be increased (set) or decreased (not set). */
+ uint64_t chge : 1; /**< [ 5: 5](R/W) When set, the default 25 ms expiration of the function level reset
+ global counter can be changed. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_flr_ctl_s cn; */
+};
+typedef union bdk_pemx_flr_ctl bdk_pemx_flr_ctl_t;
+
+static inline uint64_t BDK_PEMX_FLR_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_FLR_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000068ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_FLR_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_FLR_CTL(a) bdk_pemx_flr_ctl_t
+#define bustype_BDK_PEMX_FLR_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_FLR_CTL(a) "PEMX_FLR_CTL"
+#define device_bar_BDK_PEMX_FLR_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_FLR_CTL(a) (a)
+#define arguments_BDK_PEMX_FLR_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_flr_glblcnt_ctl
+ *
+ * PEM FLR Global Count Control Register
+ * Function level reset global counter control.
+ */
+union bdk_pemx_flr_glblcnt_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_flr_glblcnt_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t chge : 1; /**< [ 3: 3](R/W) When set, the default 25ms expiration of the function level reset
+ global counter can be changed. */
+ uint64_t inc : 1; /**< [ 2: 2](R/W) When CHGE is set, this bit determines if the 25ms expiration of the function
+ level reset global counter will be increased (set) or decreased (not set). */
+ uint64_t delta : 2; /**< [ 1: 0](R/W) When CHGE is set, this field determines the delta time to increase/decrease
+ the 25 ms expiration of the function level reset global counter.
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 4 ms.
+ 0x3 = 8 ms. */
+#else /* Word 0 - Little Endian */
+ uint64_t delta : 2; /**< [ 1: 0](R/W) When CHGE is set, this field determines the delta time to increase/decrease
+ the 25 ms expiration of the function level reset global counter.
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 4 ms.
+ 0x3 = 8 ms. */
+ uint64_t inc : 1; /**< [ 2: 2](R/W) When CHGE is set, this bit determines if the 25ms expiration of the function
+ level reset global counter will be increased (set) or decreased (not set). */
+ uint64_t chge : 1; /**< [ 3: 3](R/W) When set, the default 25ms expiration of the function level reset
+ global counter can be changed. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_flr_glblcnt_ctl_s cn; */
+};
+typedef union bdk_pemx_flr_glblcnt_ctl bdk_pemx_flr_glblcnt_ctl_t;
+
+static inline uint64_t BDK_PEMX_FLR_GLBLCNT_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_FLR_GLBLCNT_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000210ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_FLR_GLBLCNT_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_FLR_GLBLCNT_CTL(a) bdk_pemx_flr_glblcnt_ctl_t
+#define bustype_BDK_PEMX_FLR_GLBLCNT_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_FLR_GLBLCNT_CTL(a) "PEMX_FLR_GLBLCNT_CTL"
+#define device_bar_BDK_PEMX_FLR_GLBLCNT_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_FLR_GLBLCNT_CTL(a) (a)
+#define arguments_BDK_PEMX_FLR_GLBLCNT_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_flr_pf#_stopreq
+ *
+ * PEM PF Stop Request Register
+ * PF function level reset stop outbound requests register.
+ * Hardware automatically sets the STOPREQ bit for the PF when it enters a
+ * function level reset (FLR). Software is responsible for clearing the STOPREQ
+ * bit but must not do so prior to hardware taking down the FLR, which could be
+ * as long as 100 ms. It may be appropriate for software to wait longer before clearing
+ * STOPREQ, software may need to drain deep DPI queues for example.
+ * Whenever PEM receives a PF or child VF request mastered by {ProductLine} over NCBO/EBUS
+ * (i.e. P or NP), when STOPREQ is set for the function, PEM will discard the outgoing request
+ * before sending it to the PCIe core. If a NP, PEM will schedule an immediate completion
+ * with error for the request - no timeout is required. STOPREQ mimics the behavior of
+ * PCIEEP_CMD[ME] for outbound requests that will master the PCIe bus (P and NP).
+ *
+ * STOPREQ has no effect on NCBI/incoming EBUS traffic.
+ *
+ * STOPREQ will have no effect on completions returned by CNXXXX over NCBO/EBUS.
+ *
+ * When a PEM()_FLR_PF()_STOPREQ is set, none of the associated
+ * PEM()_FLR_VF()_STOPREQ[VF_STOPREQ] will be set.
+ *
+ * STOPREQ is reset when the MAC is reset, and is not reset after a chip soft reset.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_flr_pfx_stopreq
+{
+ uint64_t u;
+ struct bdk_pemx_flr_pfx_stopreq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t pf_stopreq : 1; /**< [ 0: 0](R/W1C/H) PF STOPREQ bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t pf_stopreq : 1; /**< [ 0: 0](R/W1C/H) PF STOPREQ bit. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_flr_pfx_stopreq_s cn; */
+};
+typedef union bdk_pemx_flr_pfx_stopreq bdk_pemx_flr_pfx_stopreq_t;
+
+static inline uint64_t BDK_PEMX_FLR_PFX_STOPREQ(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_FLR_PFX_STOPREQ(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=15)))
+ return 0x8e0000000c00ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("PEMX_FLR_PFX_STOPREQ", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_FLR_PFX_STOPREQ(a,b) bdk_pemx_flr_pfx_stopreq_t
+#define bustype_BDK_PEMX_FLR_PFX_STOPREQ(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_FLR_PFX_STOPREQ(a,b) "PEMX_FLR_PFX_STOPREQ"
+#define device_bar_BDK_PEMX_FLR_PFX_STOPREQ(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_FLR_PFX_STOPREQ(a,b) (a)
+#define arguments_BDK_PEMX_FLR_PFX_STOPREQ(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) pem#_flr_pf0_vf_stopreq
+ *
+ * PEM PF0 Virtual Function Stop Request Lower Register
+ * PF0 virtual function level reset stop outbound requests register.
+ * Hardware automatically sets the STOPREQ bit for the VF when it enters a
+ * function level reset (FLR). Software is responsible for clearing the STOPREQ
+ * bit but must not do so prior to hardware taking down the FLR, which could be
+ * as long as 100 ms. It may be appropriate for software to wait longer before clearing
+ * STOPREQ, software may need to drain deep DPI queues for example.
+ *
+ * Whenever PEM receives a request mastered by {ProductLine} over S2M (i.e. P or NP),
+ * when STOPREQ is set for the function, PEM will discard the outgoing request
+ * before sending it to the PCIe core. If a NP, PEM will schedule an immediate
+ * SWI_RSP_ERROR completion for the request - no timeout is required.
+ * In both cases, the PEM()_INT_SUM[BMD_E] bit will be set and a error
+ * interrupt is generated.
+ *
+ * STOPREQ mimics the behavior of PCIEEPVF()_CFG001.ME for outbound requests that will
+ * master the PCIe bus (P and NP).
+ *
+ * Note that STOPREQ will have no effect on completions returned by {ProductLine} over the S2M.
+ *
+ * Note that STOPREQ will have no effect on M2S traffic.
+ */
+union bdk_pemx_flr_pf0_vf_stopreq
+{
+ uint64_t u;
+ struct bdk_pemx_flr_pf0_vf_stopreq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_stopreq_lo : 64; /**< [ 63: 0](R/W1C/H) STOPREQ for the 64 VFs in PF0. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_stopreq_lo : 64; /**< [ 63: 0](R/W1C/H) STOPREQ for the 64 VFs in PF0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_flr_pf0_vf_stopreq_s cn; */
+};
+typedef union bdk_pemx_flr_pf0_vf_stopreq bdk_pemx_flr_pf0_vf_stopreq_t;
+
+static inline uint64_t BDK_PEMX_FLR_PF0_VF_STOPREQ(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_FLR_PF0_VF_STOPREQ(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000220ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_FLR_PF0_VF_STOPREQ", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_FLR_PF0_VF_STOPREQ(a) bdk_pemx_flr_pf0_vf_stopreq_t
+#define bustype_BDK_PEMX_FLR_PF0_VF_STOPREQ(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_FLR_PF0_VF_STOPREQ(a) "PEMX_FLR_PF0_VF_STOPREQ"
+#define device_bar_BDK_PEMX_FLR_PF0_VF_STOPREQ(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_FLR_PF0_VF_STOPREQ(a) (a)
+#define arguments_BDK_PEMX_FLR_PF0_VF_STOPREQ(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_flr_pf_stopreq
+ *
+ * PEM PF Stop Request Register
+ * PF function level reset stop outbound requests register.
+ * Hardware automatically sets the STOPREQ bit for the PF when it enters a
+ * function level reset (FLR). Software is responsible for clearing the STOPREQ
+ * bit but must not do so prior to hardware taking down the FLR, which could be
+ * as long as 100 ms. It may be appropriate for software to wait longer before clearing
+ * STOPREQ, software may need to drain deep DPI queues for example.
+ * Whenever PEM receives a PF or child VF request mastered by {ProductLine} over S2M (i.e. P or
+ * NP),
+ * when STOPREQ is set for the function, PEM will discard the outgoing request
+ * before sending it to the PCIe core. If a NP, PEM will schedule an immediate
+ * SWI_RSP_ERROR completion for the request - no timeout is required.
+ * In both cases, the PEM(0..3)_INT_SUM[PBMD_E] bit will be set and a error
+ * interrupt is generated.
+ * STOPREQ mimics the behavior of PCIEEP()_CFG001.ME for outbound requests that will
+ * master the PCIe bus (P and NP).
+ *
+ * STOPREQ has no effect on M2S traffic.
+ *
+ * STOPREQ will have no effect on completions returned by CNXXXX over the S2M.
+ *
+ * When a PF()_STOPREQ is set, none of the associated
+ * PEM()_FLR_PF0_VF_STOPREQ[VF_STOPREQ] will be set.
+ *
+ * STOPREQ is reset when the MAC is reset, and is not reset after a chip soft reset.
+ */
+union bdk_pemx_flr_pf_stopreq
+{
+ uint64_t u;
+ struct bdk_pemx_flr_pf_stopreq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t pf0_stopreq : 1; /**< [ 0: 0](R/W1C/H) PF0 STOPREQ bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t pf0_stopreq : 1; /**< [ 0: 0](R/W1C/H) PF0 STOPREQ bit. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_flr_pf_stopreq_s cn; */
+};
+typedef union bdk_pemx_flr_pf_stopreq bdk_pemx_flr_pf_stopreq_t;
+
+static inline uint64_t BDK_PEMX_FLR_PF_STOPREQ(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_FLR_PF_STOPREQ(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000218ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_FLR_PF_STOPREQ", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_FLR_PF_STOPREQ(a) bdk_pemx_flr_pf_stopreq_t
+#define bustype_BDK_PEMX_FLR_PF_STOPREQ(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_FLR_PF_STOPREQ(a) "PEMX_FLR_PF_STOPREQ"
+#define device_bar_BDK_PEMX_FLR_PF_STOPREQ(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_FLR_PF_STOPREQ(a) (a)
+#define arguments_BDK_PEMX_FLR_PF_STOPREQ(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_flr_stopreq_ctl
+ *
+ * PEM FLR Global Count Control Register
+ * Function level reset STOPREQ control register.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC cold reset.
+ */
+union bdk_pemx_flr_stopreq_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_flr_stopreq_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t stopreqclr : 1; /**< [ 0: 0](R/W) When [STOPREQCLR] is clear, only software (and reset) can clear
+ PEM()_FLR_PF_STOPREQ[STOPREQ] and PEM()_FLR_PF0_VF_STOPREQ[STOPREQ]
+ bits. When STOPREQCLR is set, PEM hardware
+ also clears the STOPREQ bit when PEM completes an FLR to the PCIe core. In the
+ case of a VF, only one STOPREQ bit gets cleared upon each FLR ack when
+ STOPREQCLR mode bit is set. The srst will assert upon a PF
+ FLR, and srst could be used to reset all STOPREQ bits regardless of
+ STOPREQCLR. Otherwise (e.g. {ProductLine}), where a PF FLR does not
+ assert srst. */
+#else /* Word 0 - Little Endian */
+ uint64_t stopreqclr : 1; /**< [ 0: 0](R/W) When [STOPREQCLR] is clear, only software (and reset) can clear
+ PEM()_FLR_PF_STOPREQ[STOPREQ] and PEM()_FLR_PF0_VF_STOPREQ[STOPREQ]
+ bits. When STOPREQCLR is set, PEM hardware
+ also clears the STOPREQ bit when PEM completes an FLR to the PCIe core. In the
+ case of a VF, only one STOPREQ bit gets cleared upon each FLR ack when
+ STOPREQCLR mode bit is set. The srst will assert upon a PF
+ FLR, and srst could be used to reset all STOPREQ bits regardless of
+ STOPREQCLR. Otherwise (e.g. {ProductLine}), where a PF FLR does not
+ assert srst. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_flr_stopreq_ctl_s cn8; */
+ struct bdk_pemx_flr_stopreq_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t stopreqclr : 1; /**< [ 0: 0](R/W) Stop request clear behavior.
+
+ 0 = Only software (and reset) can clear PEM()_FLR_PF()_STOPREQ[PF_STOPREQ] and
+ PEM()_FLR_VF()_STOPREQ[VF_STOPREQ] bits.
+
+ 1 = PEM hardware also clears the STOPREQ bit when PEM completes an FLR to the
+ PCIe core. In the case of a VF, only one STOPREQ bit gets cleared upon each FLR
+ ack when [STOPREQCLR] is set.
+
+ The srst will assert upon a PF FLR, and srst could be used to reset all STOPREQ
+ bits regardless of [STOPREQCLR]. Otherwise, a PF FLR does not assert srst. */
+#else /* Word 0 - Little Endian */
+ uint64_t stopreqclr : 1; /**< [ 0: 0](R/W) Stop request clear behavior.
+
+ 0 = Only software (and reset) can clear PEM()_FLR_PF()_STOPREQ[PF_STOPREQ] and
+ PEM()_FLR_VF()_STOPREQ[VF_STOPREQ] bits.
+
+ 1 = PEM hardware also clears the STOPREQ bit when PEM completes an FLR to the
+ PCIe core. In the case of a VF, only one STOPREQ bit gets cleared upon each FLR
+ ack when [STOPREQCLR] is set.
+
+ The srst will assert upon a PF FLR, and srst could be used to reset all STOPREQ
+ bits regardless of [STOPREQCLR]. Otherwise, a PF FLR does not assert srst. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_flr_stopreq_ctl bdk_pemx_flr_stopreq_ctl_t;
+
+static inline uint64_t BDK_PEMX_FLR_STOPREQ_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_FLR_STOPREQ_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000238ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000070ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_FLR_STOPREQ_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_FLR_STOPREQ_CTL(a) bdk_pemx_flr_stopreq_ctl_t
+#define bustype_BDK_PEMX_FLR_STOPREQ_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_FLR_STOPREQ_CTL(a) "PEMX_FLR_STOPREQ_CTL"
+#define device_bar_BDK_PEMX_FLR_STOPREQ_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_FLR_STOPREQ_CTL(a) (a)
+#define arguments_BDK_PEMX_FLR_STOPREQ_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_flr_vf#_stopreq
+ *
+ * PEM VF Stop Request Register
+ * VFI 0-239 virtual function level reset stop outbound requests register.
+ * Hardware automatically sets the STOPREQ bit for the VF when it enters a
+ * function level reset (FLR). Software is responsible for clearing the STOPREQ
+ * bit but must not do so prior to hardware taking down the FLR, which could be
+ * as long as 100 ms. It may be appropriate for software to wait longer before clearing
+ * STOPREQ, software may need to drain deep DPI queues for example.
+ *
+ * Whenever PEM receives a request mastered by {ProductLine} over NCBO/EBUS (i.e. P or NP),
+ * when STOPREQ is set for the function, PEM will discard the outgoing request
+ * before sending it to the PCIe core. If a NP, PEM will schedule an immediate
+ * cpl w/error for the request - no timeout is required.
+ * In both cases, the PEM()_DBG_INFO[BMD_E] bit will be set.
+ *
+ * The index into this array is referred to as a "VFI" and will need to be calculated
+ * by software based on the number of VFs assigned to each PF. {PF0,VF0} is VFI0 and
+ * for this VF, bit [0] would be used. {PF1,VF0} is PCIEEP_SRIOV_VFS[IVF] for PF0.
+ * In general, {PFx,VFy} is determined by SUM(PF0..PF(x-1))(PCIEEP_SRIOV_VFS[IVF]) + y.
+ *
+ * STOPREQ mimics the behavior of PCIEEPVF_CMD[ME] for outbound requests that will
+ * master the PCIe bus (P and NP).
+ *
+ * Note that STOPREQ will have no effect on completions returned by {ProductLine} over the NCBO/EBUS.
+ *
+ * Note that STOPREQ will have no effect on NCBI or incoming EBUS traffic.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_flr_vfx_stopreq
+{
+ uint64_t u;
+ struct bdk_pemx_flr_vfx_stopreq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_stopreq : 64; /**< [ 63: 0](R/W1C/H) STOPREQ for the pool of 240 VFs in shared by the 16 PFs.
+ Each bit corresponds to one of the NVF virtual functions. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_stopreq : 64; /**< [ 63: 0](R/W1C/H) STOPREQ for the pool of 240 VFs in shared by the 16 PFs.
+ Each bit corresponds to one of the NVF virtual functions. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_flr_vfx_stopreq_s cn; */
+};
+typedef union bdk_pemx_flr_vfx_stopreq bdk_pemx_flr_vfx_stopreq_t;
+
+static inline uint64_t BDK_PEMX_FLR_VFX_STOPREQ(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_FLR_VFX_STOPREQ(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=3)))
+ return 0x8e0000000e00ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("PEMX_FLR_VFX_STOPREQ", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_FLR_VFX_STOPREQ(a,b) bdk_pemx_flr_vfx_stopreq_t
+#define bustype_BDK_PEMX_FLR_VFX_STOPREQ(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_FLR_VFX_STOPREQ(a,b) "PEMX_FLR_VFX_STOPREQ"
+#define device_bar_BDK_PEMX_FLR_VFX_STOPREQ(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_FLR_VFX_STOPREQ(a,b) (a)
+#define arguments_BDK_PEMX_FLR_VFX_STOPREQ(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) pem#_flr_zombie_ctl
+ *
+ * PEM FLR Global Count Control Register
+ * Function level reset global zombie counter control register.
+ */
+union bdk_pemx_flr_zombie_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_flr_zombie_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t exp : 10; /**< [ 9: 0](R/W) The expiration value for the inbound shared global zombie counter. The global zombie
+ counter
+ continuously counts the number of cycles where the PCIe core was allowed to send
+ either a posted request or a completion to the PEM. When the global zombie counter
+ reaches expiration (EXP), it resets to zero and all the nonzero per PCIe tag zombie
+ counters are decremented. When a per PCIe tag zombie counter decrements to zero, a
+ SWI_RSP_ERROR is
+ sent to the M2S bus and its associated PCIe tag is returned to the pool.
+ This field allows software programmability control of the zombie counter expiration. */
+#else /* Word 0 - Little Endian */
+ uint64_t exp : 10; /**< [ 9: 0](R/W) The expiration value for the inbound shared global zombie counter. The global zombie
+ counter
+ continuously counts the number of cycles where the PCIe core was allowed to send
+ either a posted request or a completion to the PEM. When the global zombie counter
+ reaches expiration (EXP), it resets to zero and all the nonzero per PCIe tag zombie
+ counters are decremented. When a per PCIe tag zombie counter decrements to zero, a
+ SWI_RSP_ERROR is
+ sent to the M2S bus and its associated PCIe tag is returned to the pool.
+ This field allows software programmability control of the zombie counter expiration. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_flr_zombie_ctl_s cn; */
+};
+typedef union bdk_pemx_flr_zombie_ctl bdk_pemx_flr_zombie_ctl_t;
+
+static inline uint64_t BDK_PEMX_FLR_ZOMBIE_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_FLR_ZOMBIE_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000230ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_FLR_ZOMBIE_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_FLR_ZOMBIE_CTL(a) bdk_pemx_flr_zombie_ctl_t
+#define bustype_BDK_PEMX_FLR_ZOMBIE_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_FLR_ZOMBIE_CTL(a) "PEMX_FLR_ZOMBIE_CTL"
+#define device_bar_BDK_PEMX_FLR_ZOMBIE_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_FLR_ZOMBIE_CTL(a) (a)
+#define arguments_BDK_PEMX_FLR_ZOMBIE_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ib_merge_timer_ctl
+ *
+ * PEM NCBI Merge Timer Control Register
+ * This register controls the merging timer for inbound NCB writes.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_ib_merge_timer_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_ib_merge_timer_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t wmerge_dis : 1; /**< [ 10: 10](R/W) For diagnostic use only. If set, will disable inbound write merging. */
+ uint64_t wmerge_total_timer : 10; /**< [ 9: 0](R/W) Write merge encapsulation timer. When PEM accepts a NCBI write which begins
+ a write merging process, [WMERGE_TOTAL_TIMER] specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations into one larger
+ write. The values for this field range from 1 to 1023, with 0x0 used for
+ diagnostics only and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+#else /* Word 0 - Little Endian */
+ uint64_t wmerge_total_timer : 10; /**< [ 9: 0](R/W) Write merge encapsulation timer. When PEM accepts a NCBI write which begins
+ a write merging process, [WMERGE_TOTAL_TIMER] specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations into one larger
+ write. The values for this field range from 1 to 1023, with 0x0 used for
+ diagnostics only and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t wmerge_dis : 1; /**< [ 10: 10](R/W) For diagnostic use only. If set, will disable inbound write merging. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ib_merge_timer_ctl_s cn; */
+};
+typedef union bdk_pemx_ib_merge_timer_ctl bdk_pemx_ib_merge_timer_ctl_t;
+
+static inline uint64_t BDK_PEMX_IB_MERGE_TIMER_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_IB_MERGE_TIMER_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001b0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_IB_MERGE_TIMER_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_IB_MERGE_TIMER_CTL(a) bdk_pemx_ib_merge_timer_ctl_t
+#define bustype_BDK_PEMX_IB_MERGE_TIMER_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_IB_MERGE_TIMER_CTL(a) "PEMX_IB_MERGE_TIMER_CTL"
+#define device_bar_BDK_PEMX_IB_MERGE_TIMER_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_IB_MERGE_TIMER_CTL(a) (a)
+#define arguments_BDK_PEMX_IB_MERGE_TIMER_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ib_wmerge_merged_pc
+ *
+ * PEM Inbound Merge Writes Merged Performance Counter Register
+ * This register reports how many writes merged within the inbound write merge unit.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_ib_wmerge_merged_pc
+{
+ uint64_t u;
+ struct bdk_pemx_ib_wmerge_merged_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wmerge_merged : 64; /**< [ 63: 0](R/W/H) Each NCBI write operation mapped to MEM that merges with a previous
+ write will increment this count. */
+#else /* Word 0 - Little Endian */
+ uint64_t wmerge_merged : 64; /**< [ 63: 0](R/W/H) Each NCBI write operation mapped to MEM that merges with a previous
+ write will increment this count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ib_wmerge_merged_pc_s cn; */
+};
+typedef union bdk_pemx_ib_wmerge_merged_pc bdk_pemx_ib_wmerge_merged_pc_t;
+
+static inline uint64_t BDK_PEMX_IB_WMERGE_MERGED_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_IB_WMERGE_MERGED_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001c0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_IB_WMERGE_MERGED_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_IB_WMERGE_MERGED_PC(a) bdk_pemx_ib_wmerge_merged_pc_t
+#define bustype_BDK_PEMX_IB_WMERGE_MERGED_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_IB_WMERGE_MERGED_PC(a) "PEMX_IB_WMERGE_MERGED_PC"
+#define device_bar_BDK_PEMX_IB_WMERGE_MERGED_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_IB_WMERGE_MERGED_PC(a) (a)
+#define arguments_BDK_PEMX_IB_WMERGE_MERGED_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ib_wmerge_received_pc
+ *
+ * PEM Inbound Merge Writes Received Performance Counter Register
+ * This register reports the number of writes that enter the inbound write merge unit.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_ib_wmerge_received_pc
+{
+ uint64_t u;
+ struct bdk_pemx_ib_wmerge_received_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wmerge_writes : 64; /**< [ 63: 0](R/W/H) Each NCBI write operation mapped to MEM type will increment this count. */
+#else /* Word 0 - Little Endian */
+ uint64_t wmerge_writes : 64; /**< [ 63: 0](R/W/H) Each NCBI write operation mapped to MEM type will increment this count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ib_wmerge_received_pc_s cn; */
+};
+typedef union bdk_pemx_ib_wmerge_received_pc bdk_pemx_ib_wmerge_received_pc_t;
+
+static inline uint64_t BDK_PEMX_IB_WMERGE_RECEIVED_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_IB_WMERGE_RECEIVED_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001b8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_IB_WMERGE_RECEIVED_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_IB_WMERGE_RECEIVED_PC(a) bdk_pemx_ib_wmerge_received_pc_t
+#define bustype_BDK_PEMX_IB_WMERGE_RECEIVED_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_IB_WMERGE_RECEIVED_PC(a) "PEMX_IB_WMERGE_RECEIVED_PC"
+#define device_bar_BDK_PEMX_IB_WMERGE_RECEIVED_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_IB_WMERGE_RECEIVED_PC(a) (a)
+#define arguments_BDK_PEMX_IB_WMERGE_RECEIVED_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_inb_read_credits
+ *
+ * PEM In-flight Read Credits Register
+ * This register contains the number of in-flight read operations from PCIe core to SLI.
+ */
+union bdk_pemx_inb_read_credits
+{
+ uint64_t u;
+ struct bdk_pemx_inb_read_credits_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t num : 7; /**< [ 6: 0](R/W) The number of reads that may be in flight from the PCIe core to the SLI. Minimum number is
+ 6; maximum number is 64. */
+#else /* Word 0 - Little Endian */
+ uint64_t num : 7; /**< [ 6: 0](R/W) The number of reads that may be in flight from the PCIe core to the SLI. Minimum number is
+ 6; maximum number is 64. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_inb_read_credits_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t num : 7; /**< [ 6: 0](R/W) The number of reads that may be in flight from the PCIe core to the SLI. Minimum number is
+ 2; maximum number is 64. */
+#else /* Word 0 - Little Endian */
+ uint64_t num : 7; /**< [ 6: 0](R/W) The number of reads that may be in flight from the PCIe core to the SLI. Minimum number is
+ 2; maximum number is 64. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_pemx_inb_read_credits_s cn81xx; */
+ /* struct bdk_pemx_inb_read_credits_s cn83xx; */
+ /* struct bdk_pemx_inb_read_credits_s cn88xxp2; */
+};
+typedef union bdk_pemx_inb_read_credits bdk_pemx_inb_read_credits_t;
+
+static inline uint64_t BDK_PEMX_INB_READ_CREDITS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_INB_READ_CREDITS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c00000b8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c00000b8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c00000b8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_INB_READ_CREDITS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_INB_READ_CREDITS(a) bdk_pemx_inb_read_credits_t
+#define bustype_BDK_PEMX_INB_READ_CREDITS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_INB_READ_CREDITS(a) "PEMX_INB_READ_CREDITS"
+#define device_bar_BDK_PEMX_INB_READ_CREDITS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_INB_READ_CREDITS(a) (a)
+#define arguments_BDK_PEMX_INB_READ_CREDITS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_int_ena_w1c
+ *
+ * PEM Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_pemx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_pemx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_0_14 : 15;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_14 : 15;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_int_ena_w1c_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t un_b0 : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B0]. */
+ uint64_t up_b0 : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B0]. */
+ uint64_t surp_down : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t cfg_inf : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CFG_INF]. */
+ uint64_t crs_dr : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t un_bx : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_b4 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B4]. */
+ uint64_t up_bx : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_b4 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B4]. */
+ uint64_t up_b3 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t se : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+#else /* Word 0 - Little Endian */
+ uint64_t se : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t up_b3 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t up_b4 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B4]. */
+ uint64_t up_b2 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t un_b4 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B4]. */
+ uint64_t un_b2 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t rdlk : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t cfg_inf : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CFG_INF]. */
+ uint64_t surp_down : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t up_b0 : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B0]. */
+ uint64_t un_b0 : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B0]. */
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UP_B1]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..2)_INT_SUM[CRS_DR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_int_ena_w1c_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UP_B1]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..5)_INT_SUM[CRS_DR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pemx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t surp_down : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B1]. */
+ uint64_t up_b3 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t reserved_2 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2 : 1;
+ uint64_t up_b3 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t surp_down : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_int_ena_w1c bdk_pemx_int_ena_w1c_t;
+
+static inline uint64_t BDK_PEMX_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000438ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000438ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000438ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000e8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_INT_ENA_W1C(a) bdk_pemx_int_ena_w1c_t
+#define bustype_BDK_PEMX_INT_ENA_W1C(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_INT_ENA_W1C(a) "PEMX_INT_ENA_W1C"
+#define device_bar_BDK_PEMX_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_INT_ENA_W1C(a) (a)
+#define arguments_BDK_PEMX_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_int_ena_w1s
+ *
+ * PEM Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_pemx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_pemx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_0_14 : 15;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_14 : 15;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_int_ena_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t un_b0 : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B0]. */
+ uint64_t up_b0 : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B0]. */
+ uint64_t surp_down : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t cfg_inf : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CFG_INF]. */
+ uint64_t crs_dr : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t un_bx : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_b4 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B4]. */
+ uint64_t up_bx : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_b4 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B4]. */
+ uint64_t up_b3 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t se : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+#else /* Word 0 - Little Endian */
+ uint64_t se : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t up_b3 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t up_b4 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B4]. */
+ uint64_t up_b2 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t un_b4 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B4]. */
+ uint64_t un_b2 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t rdlk : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t cfg_inf : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CFG_INF]. */
+ uint64_t surp_down : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t up_b0 : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B0]. */
+ uint64_t un_b0 : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B0]. */
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UP_B1]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..2)_INT_SUM[CRS_DR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_int_ena_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UP_B1]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..5)_INT_SUM[CRS_DR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pemx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t surp_down : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B1]. */
+ uint64_t up_b3 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t reserved_2 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2 : 1;
+ uint64_t up_b3 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t surp_down : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_int_ena_w1s bdk_pemx_int_ena_w1s_t;
+
+static inline uint64_t BDK_PEMX_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000440ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000440ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000440ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000f0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_INT_ENA_W1S(a) bdk_pemx_int_ena_w1s_t
+#define bustype_BDK_PEMX_INT_ENA_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_INT_ENA_W1S(a) "PEMX_INT_ENA_W1S"
+#define device_bar_BDK_PEMX_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_INT_ENA_W1S(a) (a)
+#define arguments_BDK_PEMX_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_int_sum
+ *
+ * PEM Interrupt Summary Register
+ * This register contains the different interrupt summary bits of the PEM.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_int_sum
+{
+ uint64_t u;
+ struct bdk_pemx_int_sum_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1C/H) PTM Root is ready to have to context validated.
+
+ The Mac PTM logic does not have a permenantly valid context.
+ Currently the core invalidates the responder context on two conditions
+ * aux_clk_active (is asserted when link in L1 states)
+ * Link speed changes
+
+ To clear this interrupt, The host programs PCIERC_PTM_RES_LOCAL_MSB and
+ PCIERC_PTM_RES_LOCAL_LSB and then sets the context valid bit
+ (PCIERC_PTM_RES_CTL[PRES_CTX_VLD]). */
+ uint64_t reserved_0_14 : 15;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_14 : 15;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1C/H) PTM Root is ready to have to context validated.
+
+ The Mac PTM logic does not have a permenantly valid context.
+ Currently the core invalidates the responder context on two conditions
+ * aux_clk_active (is asserted when link in L1 states)
+ * Link speed changes
+
+ To clear this interrupt, The host programs PCIERC_PTM_RES_LOCAL_MSB and
+ PCIERC_PTM_RES_LOCAL_LSB and then sets the context valid bit
+ (PCIERC_PTM_RES_CTL[PRES_CTX_VLD]). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_int_sum_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1C/H) PTM Root is ready to have to context validated.
+
+ The Mac PTM logic does not have a permenantly valid context.
+ Currently the core invalidates the responder context on two conditions
+ * aux_clk_active (is asserted when link in L1 states)
+ * Link speed changes
+
+ To clear this interrupt, The host programs PCIERC_PTM_RES_LOCAL_MSB and
+ PCIERC_PTM_RES_LOCAL_LSB and then sets the context valid bit
+ (PCIERC_PTM_RES_CTL[PRES_CTX_VLD]). */
+ uint64_t un_b0 : 1; /**< [ 14: 14](R/W1C/H) Received N-TLP for BAR0 when BAR0 is disabled. */
+ uint64_t up_b0 : 1; /**< [ 13: 13](R/W1C/H) Received P-TLP for BAR0 when BAR0 is disabled. */
+ uint64_t surp_down : 1; /**< [ 12: 12](R/W1C/H) Indicates that a surprise down event is occuring in the controller. */
+ uint64_t cfg_inf : 1; /**< [ 11: 11](R/W1C/H) AP cores sent a second config read while a current config read was still inflight */
+ uint64_t crs_dr : 1; /**< [ 10: 10](R/W1C/H) Had a CRS timeout when retries were disabled. */
+ uint64_t crs_er : 1; /**< [ 9: 9](R/W1C/H) Had a CRS timeout when retries were enabled. */
+ uint64_t rdlk : 1; /**< [ 8: 8](R/W1C/H) Received read lock TLP. */
+ uint64_t un_bx : 1; /**< [ 7: 7](R/W1C/H) Received N-TLP for unknown BAR. */
+ uint64_t un_b2 : 1; /**< [ 6: 6](R/W1C/H) Received N-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t un_b4 : 1; /**< [ 5: 5](R/W1C/H) Received N-TLP for BAR4 when BAR4 index valid is not set. */
+ uint64_t up_bx : 1; /**< [ 4: 4](R/W1C/H) Received P-TLP for an unknown BAR. */
+ uint64_t up_b2 : 1; /**< [ 3: 3](R/W1C/H) Received P-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t up_b4 : 1; /**< [ 2: 2](R/W1C/H) Received P-TLP for BAR4 when BAR4 index valid is not set. */
+ uint64_t up_b3 : 1; /**< [ 1: 1](R/W1C/H) Received P-TLP for Expansion ROM. */
+ uint64_t se : 1; /**< [ 0: 0](R/W1C/H) System error, RC mode only.
+ Internal:
+ cfg_sys_err_rc. */
+#else /* Word 0 - Little Endian */
+ uint64_t se : 1; /**< [ 0: 0](R/W1C/H) System error, RC mode only.
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t up_b3 : 1; /**< [ 1: 1](R/W1C/H) Received P-TLP for Expansion ROM. */
+ uint64_t up_b4 : 1; /**< [ 2: 2](R/W1C/H) Received P-TLP for BAR4 when BAR4 index valid is not set. */
+ uint64_t up_b2 : 1; /**< [ 3: 3](R/W1C/H) Received P-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t up_bx : 1; /**< [ 4: 4](R/W1C/H) Received P-TLP for an unknown BAR. */
+ uint64_t un_b4 : 1; /**< [ 5: 5](R/W1C/H) Received N-TLP for BAR4 when BAR4 index valid is not set. */
+ uint64_t un_b2 : 1; /**< [ 6: 6](R/W1C/H) Received N-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t un_bx : 1; /**< [ 7: 7](R/W1C/H) Received N-TLP for unknown BAR. */
+ uint64_t rdlk : 1; /**< [ 8: 8](R/W1C/H) Received read lock TLP. */
+ uint64_t crs_er : 1; /**< [ 9: 9](R/W1C/H) Had a CRS timeout when retries were enabled. */
+ uint64_t crs_dr : 1; /**< [ 10: 10](R/W1C/H) Had a CRS timeout when retries were disabled. */
+ uint64_t cfg_inf : 1; /**< [ 11: 11](R/W1C/H) AP cores sent a second config read while a current config read was still inflight */
+ uint64_t surp_down : 1; /**< [ 12: 12](R/W1C/H) Indicates that a surprise down event is occuring in the controller. */
+ uint64_t up_b0 : 1; /**< [ 13: 13](R/W1C/H) Received P-TLP for BAR0 when BAR0 is disabled. */
+ uint64_t un_b0 : 1; /**< [ 14: 14](R/W1C/H) Received N-TLP for BAR0 when BAR0 is disabled. */
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1C/H) PTM Root is ready to have to context validated.
+
+ The Mac PTM logic does not have a permenantly valid context.
+ Currently the core invalidates the responder context on two conditions
+ * aux_clk_active (is asserted when link in L1 states)
+ * Link speed changes
+
+ To clear this interrupt, The host programs PCIERC_PTM_RES_LOCAL_MSB and
+ PCIERC_PTM_RES_LOCAL_LSB and then sets the context valid bit
+ (PCIERC_PTM_RES_CTL[PRES_CTX_VLD]). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_int_sum_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Had a CRS timeout when retries were disabled. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Had a CRS timeout when retries were enabled. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Received read lock TLP. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Received N-TLP for unknown BAR. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Received N-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Received N-TLP for BAR1 when BAR1 index valid is not set. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Received P-TLP for an unknown BAR. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Received P-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Received P-TLP for BAR1 when BAR1 index valid is not set. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) System error, RC mode only.
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) System error, RC mode only.
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Received P-TLP for BAR1 when BAR1 index valid is not set. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Received P-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Received P-TLP for an unknown BAR. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Received N-TLP for BAR1 when BAR1 index valid is not set. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Received N-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Received N-TLP for unknown BAR. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Received read lock TLP. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Had a CRS timeout when retries were enabled. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Had a CRS timeout when retries were disabled. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_int_sum_cn81xx cn88xx; */
+ struct bdk_pemx_int_sum_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t surp_down : 1; /**< [ 14: 14](R/W1C/H) Indicates that a surprise down event is occuring in the controller. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Had a CRS timeout when retries were disabled. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Had a CRS timeout when retries were enabled. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Received read lock TLP. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Received N-TLP for unknown BAR. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Received N-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Received N-TLP for BAR1 when BAR1 index valid is not set. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Received P-TLP for an unknown BAR. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Received P-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Received P-TLP for BAR1 when BAR1 index valid is not set. */
+ uint64_t up_b3 : 1; /**< [ 3: 3](R/W1C/H) Received P-TLP for Expansion ROM (BAR3 EP Mode). */
+ uint64_t reserved_2 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) System error, RC mode only.
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1C/H) System error, RC mode only.
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2 : 1;
+ uint64_t up_b3 : 1; /**< [ 3: 3](R/W1C/H) Received P-TLP for Expansion ROM (BAR3 EP Mode). */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1C/H) Received P-TLP for BAR1 when BAR1 index valid is not set. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1C/H) Received P-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1C/H) Received P-TLP for an unknown BAR. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1C/H) Received N-TLP for BAR1 when BAR1 index valid is not set. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1C/H) Received N-TLP for BAR2 when BAR2 is disabled. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1C/H) Received N-TLP for unknown BAR. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1C/H) Received read lock TLP. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1C/H) Had a CRS timeout when retries were enabled. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1C/H) Had a CRS timeout when retries were disabled. */
+ uint64_t surp_down : 1; /**< [ 14: 14](R/W1C/H) Indicates that a surprise down event is occuring in the controller. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_int_sum bdk_pemx_int_sum_t;
+
+static inline uint64_t BDK_PEMX_INT_SUM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_INT_SUM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000428ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000428ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000428ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000d8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_INT_SUM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_INT_SUM(a) bdk_pemx_int_sum_t
+#define bustype_BDK_PEMX_INT_SUM(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_INT_SUM(a) "PEMX_INT_SUM"
+#define device_bar_BDK_PEMX_INT_SUM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_INT_SUM(a) (a)
+#define arguments_BDK_PEMX_INT_SUM(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_int_sum_w1s
+ *
+ * PEM Interrupt Summary Register
+ * This register sets interrupt bits.
+ */
+union bdk_pemx_int_sum_w1s
+{
+ uint64_t u;
+ struct bdk_pemx_int_sum_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_0_14 : 15;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_14 : 15;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_int_sum_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t un_b0 : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B0]. */
+ uint64_t up_b0 : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B0]. */
+ uint64_t surp_down : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t cfg_inf : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CFG_INF]. */
+ uint64_t crs_dr : 1; /**< [ 10: 10](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t un_bx : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_b4 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B4]. */
+ uint64_t up_bx : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_b4 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B4]. */
+ uint64_t up_b3 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t se : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+#else /* Word 0 - Little Endian */
+ uint64_t se : 1; /**< [ 0: 0](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t up_b3 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t up_b4 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B4]. */
+ uint64_t up_b2 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t un_b4 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B4]. */
+ uint64_t un_b2 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t rdlk : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 10: 10](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t cfg_inf : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CFG_INF]. */
+ uint64_t surp_down : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t up_b0 : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B0]. */
+ uint64_t un_b0 : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B0]. */
+ uint64_t ptm_rdy_val : 1; /**< [ 15: 15](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[PTM_RDY_VAL]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_int_sum_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UP_B1]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..2)_INT_SUM[CRS_DR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_int_sum_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UP_B1]. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..5)_INT_SUM[CRS_DR]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pemx_int_sum_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t surp_down : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t reserved_10 : 1;
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B1]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B1]. */
+ uint64_t up_b3 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t reserved_2 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t se : 1; /**< [ 1: 1](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[SE].
+ Internal:
+ cfg_sys_err_rc. */
+ uint64_t reserved_2 : 1;
+ uint64_t up_b3 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B3]. */
+ uint64_t up_b1 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B1]. */
+ uint64_t up_b2 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_B2]. */
+ uint64_t up_bx : 1; /**< [ 6: 6](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UP_BX]. */
+ uint64_t un_b1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B1]. */
+ uint64_t un_b2 : 1; /**< [ 8: 8](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_B2]. */
+ uint64_t un_bx : 1; /**< [ 9: 9](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[UN_BX]. */
+ uint64_t reserved_10 : 1;
+ uint64_t rdlk : 1; /**< [ 11: 11](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[RDLK]. */
+ uint64_t crs_er : 1; /**< [ 12: 12](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CRS_ER]. */
+ uint64_t crs_dr : 1; /**< [ 13: 13](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[CRS_DR]. */
+ uint64_t surp_down : 1; /**< [ 14: 14](R/W1S/H) Reads or sets PEM(0..3)_INT_SUM[SURP_DOWN]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_int_sum_w1s bdk_pemx_int_sum_w1s_t;
+
+static inline uint64_t BDK_PEMX_INT_SUM_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_INT_SUM_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000430ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000430ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000430ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000e0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_INT_SUM_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_INT_SUM_W1S(a) bdk_pemx_int_sum_w1s_t
+#define bustype_BDK_PEMX_INT_SUM_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_INT_SUM_W1S(a) "PEMX_INT_SUM_W1S"
+#define device_bar_BDK_PEMX_INT_SUM_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_INT_SUM_W1S(a) (a)
+#define arguments_BDK_PEMX_INT_SUM_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_latency_pc
+ *
+ * PEM Latency Count Register
+ * This register contains read latency count for debugging purposes.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_latency_pc
+{
+ uint64_t u;
+ struct bdk_pemx_latency_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t latency : 64; /**< [ 63: 0](RO/H) Total read latency count in units of coprocessor-clocks measured from
+ SLI read request until first data is returned from remote memory aggregated
+ across all non-masked SWI tags. */
+#else /* Word 0 - Little Endian */
+ uint64_t latency : 64; /**< [ 63: 0](RO/H) Total read latency count in units of coprocessor-clocks measured from
+ SLI read request until first data is returned from remote memory aggregated
+ across all non-masked SWI tags. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_latency_pc_s cn8; */
+ struct bdk_pemx_latency_pc_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t latency : 64; /**< [ 63: 0](R/W/H) Total read latency count in 10 ns units measured from an internal point in PEM
+ after coming from either NCBO (but prior to any merging logic) or EBO, to an
+ internal point in PEM where the corresponding completion is sent to the NCBI
+ or EBI interface logic. PEM()_LATENCY_PC_CTL[EBO_SEL] controls which
+ outbound bus has its reads latency tracked. This register can only be written
+ by software when PEM()_LATENCY_PC_CTL[ACTIVE] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t latency : 64; /**< [ 63: 0](R/W/H) Total read latency count in 10 ns units measured from an internal point in PEM
+ after coming from either NCBO (but prior to any merging logic) or EBO, to an
+ internal point in PEM where the corresponding completion is sent to the NCBI
+ or EBI interface logic. PEM()_LATENCY_PC_CTL[EBO_SEL] controls which
+ outbound bus has its reads latency tracked. This register can only be written
+ by software when PEM()_LATENCY_PC_CTL[ACTIVE] is clear. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_latency_pc bdk_pemx_latency_pc_t;
+
+static inline uint64_t BDK_PEMX_LATENCY_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_LATENCY_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000490ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000108ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_LATENCY_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_LATENCY_PC(a) bdk_pemx_latency_pc_t
+#define bustype_BDK_PEMX_LATENCY_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_LATENCY_PC(a) "PEMX_LATENCY_PC"
+#define device_bar_BDK_PEMX_LATENCY_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_LATENCY_PC(a) (a)
+#define arguments_BDK_PEMX_LATENCY_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_latency_pc_ctl
+ *
+ * PEM Latency Control Register
+ * This register controls read latency monitoring for debugging purposes.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_latency_pc_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_latency_pc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t ebo_sel : 1; /**< [ 2: 2](R/W) If set, latency will be measured on EBO reads instead of NCBO reads. */
+ uint64_t complete : 1; /**< [ 1: 1](RO/H) When software causes a rising edge on [ACTIVE], hardware will clear this
+ bit. Later, when software clears [ACTIVE], hardware will wait for all
+ outstanding reads to get their first data returned and then set this bit to
+ indicate that measurement operations are completed. */
+ uint64_t active : 1; /**< [ 0: 0](R/W) When a software write causes a rising edge on this bit, PEM will begin a
+ measurement which will include PEM clearing PEM()_LATENCY_PC and PEM()_READS_PC
+ to reset all counting as well as PEM clearing PEM()_LATENCY_CTL[COMPLETE]. Only
+ SLI SWI reads that occur after this rising edge will be considered. When
+ software wants to halt measurement, it can clear this bit which will block
+ further reads from being considered. When software reads
+ PEM()_LATENCY_CTL[COMPLETE] as set, it can know that all measurement is
+ completed and PEM()_LATENCY_PC and PEM()_READS_PC reflect a completely accurate
+ and stable set of values. */
+#else /* Word 0 - Little Endian */
+ uint64_t active : 1; /**< [ 0: 0](R/W) When a software write causes a rising edge on this bit, PEM will begin a
+ measurement which will include PEM clearing PEM()_LATENCY_PC and PEM()_READS_PC
+ to reset all counting as well as PEM clearing PEM()_LATENCY_CTL[COMPLETE]. Only
+ SLI SWI reads that occur after this rising edge will be considered. When
+ software wants to halt measurement, it can clear this bit which will block
+ further reads from being considered. When software reads
+ PEM()_LATENCY_CTL[COMPLETE] as set, it can know that all measurement is
+ completed and PEM()_LATENCY_PC and PEM()_READS_PC reflect a completely accurate
+ and stable set of values. */
+ uint64_t complete : 1; /**< [ 1: 1](RO/H) When software causes a rising edge on [ACTIVE], hardware will clear this
+ bit. Later, when software clears [ACTIVE], hardware will wait for all
+ outstanding reads to get their first data returned and then set this bit to
+ indicate that measurement operations are completed. */
+ uint64_t ebo_sel : 1; /**< [ 2: 2](R/W) If set, latency will be measured on EBO reads instead of NCBO reads. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_latency_pc_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t complete : 1; /**< [ 1: 1](RO/H) When software causes a rising edge on [ACTIVE], hardware will clear this
+ bit. Later, when software clears [ACTIVE], hardware will wait for all
+ outstanding reads to get their first data returned and then set this bit to
+ indicate that measurement operations are completed. */
+ uint64_t active : 1; /**< [ 0: 0](R/W) When a software write causes a rising edge on this bit, PEM will begin a
+ measurement which will include PEM clearing PEM()_LATENCY_PC and PEM()_READS_PC
+ to reset all counting as well as PEM clearing PEM()_LATENCY_CTL[COMPLETE]. Only
+ SLI SWI reads that occur after this rising edge will be considered. When
+ software wants to halt measurement, it can clear this bit which will block
+ further reads from being considered. When software reads
+ PEM()_LATENCY_CTL[COMPLETE] as set, it can know that all measurement is
+ completed and PEM()_LATENCY_PC and PEM()_READS_PC reflect a completely accurate
+ and stable set of values. */
+#else /* Word 0 - Little Endian */
+ uint64_t active : 1; /**< [ 0: 0](R/W) When a software write causes a rising edge on this bit, PEM will begin a
+ measurement which will include PEM clearing PEM()_LATENCY_PC and PEM()_READS_PC
+ to reset all counting as well as PEM clearing PEM()_LATENCY_CTL[COMPLETE]. Only
+ SLI SWI reads that occur after this rising edge will be considered. When
+ software wants to halt measurement, it can clear this bit which will block
+ further reads from being considered. When software reads
+ PEM()_LATENCY_CTL[COMPLETE] as set, it can know that all measurement is
+ completed and PEM()_LATENCY_PC and PEM()_READS_PC reflect a completely accurate
+ and stable set of values. */
+ uint64_t complete : 1; /**< [ 1: 1](RO/H) When software causes a rising edge on [ACTIVE], hardware will clear this
+ bit. Later, when software clears [ACTIVE], hardware will wait for all
+ outstanding reads to get their first data returned and then set this bit to
+ indicate that measurement operations are completed. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pemx_latency_pc_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t ebo_sel : 1; /**< [ 2: 2](R/W) If set, latency will be measured on EBO reads instead of NCBO reads. */
+ uint64_t complete : 1; /**< [ 1: 1](RO/H) When software causes a rising edge on [ACTIVE], hardware will clear this
+ bit. Later, when software clears [ACTIVE], hardware will wait for all
+ outstanding reads to get their first data returned and then set this bit to
+ indicate that measurement operations are completed. */
+ uint64_t active : 1; /**< [ 0: 0](R/W) When a software write causes a rising edge on [ACTIVE], PEM will begin a
+ measurement using existing values for PEM()_LATENCY_PC and PEM()_READS_PC
+ as well as clear [COMPLETE]. Only NCBO reads that occur
+ after this rising edge will be added into the results. When software wants
+ to halt measurement, it can clear this bit which will block further reads
+ from being considered. When software reads [COMPLETE] as set,
+ it can know that all measurement is completed and PEM()_LATENCY_PC and
+ PEM()_READS_PC reflect a completely accurate and stable set of values.
+
+ Note that [ACTIVE] does not need to be cleared in order to read
+ PEM()_LATENCY_PC and PEM()_READS_PC to calcuate average latency during active
+ processing, but there will be some small error.
+
+ Note that because software can write PEM()_LATENCY_PC and PEM()_READS_PC,
+ PEM will not clear these values when a software write causes a rising edge on
+ [ACTIVE]. Instead, software must initialize these two registers (probably
+ both to 0) prior to starting a measurement. */
+#else /* Word 0 - Little Endian */
+ uint64_t active : 1; /**< [ 0: 0](R/W) When a software write causes a rising edge on [ACTIVE], PEM will begin a
+ measurement using existing values for PEM()_LATENCY_PC and PEM()_READS_PC
+ as well as clear [COMPLETE]. Only NCBO reads that occur
+ after this rising edge will be added into the results. When software wants
+ to halt measurement, it can clear this bit which will block further reads
+ from being considered. When software reads [COMPLETE] as set,
+ it can know that all measurement is completed and PEM()_LATENCY_PC and
+ PEM()_READS_PC reflect a completely accurate and stable set of values.
+
+ Note that [ACTIVE] does not need to be cleared in order to read
+ PEM()_LATENCY_PC and PEM()_READS_PC to calcuate average latency during active
+ processing, but there will be some small error.
+
+ Note that because software can write PEM()_LATENCY_PC and PEM()_READS_PC,
+ PEM will not clear these values when a software write causes a rising edge on
+ [ACTIVE]. Instead, software must initialize these two registers (probably
+ both to 0) prior to starting a measurement. */
+ uint64_t complete : 1; /**< [ 1: 1](RO/H) When software causes a rising edge on [ACTIVE], hardware will clear this
+ bit. Later, when software clears [ACTIVE], hardware will wait for all
+ outstanding reads to get their first data returned and then set this bit to
+ indicate that measurement operations are completed. */
+ uint64_t ebo_sel : 1; /**< [ 2: 2](R/W) If set, latency will be measured on EBO reads instead of NCBO reads. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_latency_pc_ctl bdk_pemx_latency_pc_ctl_t;
+
+static inline uint64_t BDK_PEMX_LATENCY_PC_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_LATENCY_PC_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c00004c0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000118ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_LATENCY_PC_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_LATENCY_PC_CTL(a) bdk_pemx_latency_pc_ctl_t
+#define bustype_BDK_PEMX_LATENCY_PC_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_LATENCY_PC_CTL(a) "PEMX_LATENCY_PC_CTL"
+#define device_bar_BDK_PEMX_LATENCY_PC_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_LATENCY_PC_CTL(a) (a)
+#define arguments_BDK_PEMX_LATENCY_PC_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_latency_pc_mask#
+ *
+ * PEM Latency Counts Low Register
+ * This register contains read latency masking for debugging purposes.
+ */
+union bdk_pemx_latency_pc_maskx
+{
+ uint64_t u;
+ struct bdk_pemx_latency_pc_maskx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mask : 64; /**< [ 63: 0](R/W) Each bit of MASK corresponds to one SWI tag value. PEM()_LATENCY_PC_MASK(0)
+ corresponds to SWI tags [63:0], PEM()_LATENCY_PC_MASK(1) corresponds to
+ SWI tags [127:64]. If a bit of [MASK] is set, then its SWI tag will NOT be
+ included in the values reported in PEM()_LATENCY_PC and PEM()_READS_PC. */
+#else /* Word 0 - Little Endian */
+ uint64_t mask : 64; /**< [ 63: 0](R/W) Each bit of MASK corresponds to one SWI tag value. PEM()_LATENCY_PC_MASK(0)
+ corresponds to SWI tags [63:0], PEM()_LATENCY_PC_MASK(1) corresponds to
+ SWI tags [127:64]. If a bit of [MASK] is set, then its SWI tag will NOT be
+ included in the values reported in PEM()_LATENCY_PC and PEM()_READS_PC. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_latency_pc_maskx_s cn; */
+};
+typedef union bdk_pemx_latency_pc_maskx bdk_pemx_latency_pc_maskx_t;
+
+static inline uint64_t BDK_PEMX_LATENCY_PC_MASKX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_LATENCY_PC_MASKX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0c00004a0ll + 0x1000000ll * ((a) & 0x3) + 0x100ll * ((b) & 0x1);
+ __bdk_csr_fatal("PEMX_LATENCY_PC_MASKX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_LATENCY_PC_MASKX(a,b) bdk_pemx_latency_pc_maskx_t
+#define bustype_BDK_PEMX_LATENCY_PC_MASKX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_LATENCY_PC_MASKX(a,b) "PEMX_LATENCY_PC_MASKX"
+#define device_bar_BDK_PEMX_LATENCY_PC_MASKX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_LATENCY_PC_MASKX(a,b) (a)
+#define arguments_BDK_PEMX_LATENCY_PC_MASKX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_ltr_latency
+ *
+ * PEM Latency Tolerance Reporting Register
+ * This register contains the current LTR values reported and in-use
+ * by the downstream device.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_ltr_latency
+{
+ uint64_t u;
+ struct bdk_pemx_ltr_latency_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t latency : 32; /**< [ 31: 0](RO/H) Reflects the captured LTR values from received LTR message in RC mode. */
+#else /* Word 0 - Little Endian */
+ uint64_t latency : 32; /**< [ 31: 0](RO/H) Reflects the captured LTR values from received LTR message in RC mode. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ltr_latency_s cn; */
+};
+typedef union bdk_pemx_ltr_latency bdk_pemx_ltr_latency_t;
+
+static inline uint64_t BDK_PEMX_LTR_LATENCY(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_LTR_LATENCY(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000b0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_LTR_LATENCY", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_LTR_LATENCY(a) bdk_pemx_ltr_latency_t
+#define bustype_BDK_PEMX_LTR_LATENCY(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_LTR_LATENCY(a) "PEMX_LTR_LATENCY"
+#define device_bar_BDK_PEMX_LTR_LATENCY(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_LTR_LATENCY(a) (a)
+#define arguments_BDK_PEMX_LTR_LATENCY(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ltr_val#
+ *
+ * PEM Latency Tolerance Reporting Register
+ * This register contains the values to put into the latency tolerance reporting (LTM) message
+ * when triggered by hardware. EP Mode.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_ltr_valx
+{
+ uint64_t u;
+ struct bdk_pemx_ltr_valx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ns_lat : 16; /**< [ 31: 16](R/W) No-snoop latency value to put into LTM message when triggered by hardware. */
+ uint64_t snoop_lat : 16; /**< [ 15: 0](R/W) Snoop latency value to put into LTM message when triggered by hardware. */
+#else /* Word 0 - Little Endian */
+ uint64_t snoop_lat : 16; /**< [ 15: 0](R/W) Snoop latency value to put into LTM message when triggered by hardware. */
+ uint64_t ns_lat : 16; /**< [ 31: 16](R/W) No-snoop latency value to put into LTM message when triggered by hardware. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ltr_valx_s cn; */
+};
+typedef union bdk_pemx_ltr_valx bdk_pemx_ltr_valx_t;
+
+static inline uint64_t BDK_PEMX_LTR_VALX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_LTR_VALX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x8e00000000a0ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("PEMX_LTR_VALX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_LTR_VALX(a,b) bdk_pemx_ltr_valx_t
+#define bustype_BDK_PEMX_LTR_VALX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_LTR_VALX(a,b) "PEMX_LTR_VALX"
+#define device_bar_BDK_PEMX_LTR_VALX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_LTR_VALX(a,b) (a)
+#define arguments_BDK_PEMX_LTR_VALX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_mac_lane#_eq
+ *
+ * PEM MAC Lane RX/TX Equalization Info Register
+ * This register specifies the per lane RX/TX Equalization values advertised
+ * by the link partner.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_mac_lanex_eq
+{
+ uint64_t u;
+ struct bdk_pemx_mac_lanex_eq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t pset_coef : 18; /**< [ 49: 32](RO/H) Presets and coefficients chosen by the PEM. */
+ uint64_t reserved_15_31 : 17;
+ uint64_t rxphint : 3; /**< [ 14: 12](RO/H) Represents the RX equalization preset hint
+ for the receiver. */
+ uint64_t lf : 6; /**< [ 11: 6](RO/H) Represents the low frequency value of the remote transmitter
+ captured in Recovery.Equalization Phase 1. */
+ uint64_t fs : 6; /**< [ 5: 0](RO/H) Represents the full swing value of the remote transmitter
+ captured in Recovery.Equalization Phase 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t fs : 6; /**< [ 5: 0](RO/H) Represents the full swing value of the remote transmitter
+ captured in Recovery.Equalization Phase 1. */
+ uint64_t lf : 6; /**< [ 11: 6](RO/H) Represents the low frequency value of the remote transmitter
+ captured in Recovery.Equalization Phase 1. */
+ uint64_t rxphint : 3; /**< [ 14: 12](RO/H) Represents the RX equalization preset hint
+ for the receiver. */
+ uint64_t reserved_15_31 : 17;
+ uint64_t pset_coef : 18; /**< [ 49: 32](RO/H) Presets and coefficients chosen by the PEM. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_mac_lanex_eq_s cn; */
+};
+typedef union bdk_pemx_mac_lanex_eq bdk_pemx_mac_lanex_eq_t;
+
+static inline uint64_t BDK_PEMX_MAC_LANEX_EQ(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_MAC_LANEX_EQ(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=15)))
+ return 0x8e0000000780ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("PEMX_MAC_LANEX_EQ", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_MAC_LANEX_EQ(a,b) bdk_pemx_mac_lanex_eq_t
+#define bustype_BDK_PEMX_MAC_LANEX_EQ(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_MAC_LANEX_EQ(a,b) "PEMX_MAC_LANEX_EQ"
+#define device_bar_BDK_PEMX_MAC_LANEX_EQ(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_MAC_LANEX_EQ(a,b) (a)
+#define arguments_BDK_PEMX_MAC_LANEX_EQ(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_merge_timer_ctl
+ *
+ * PEM Merge Timer Control Register
+ * This register controls merging timers and overrides for maximum merging size
+ * for outbound reads, writes, and completions.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_merge_timer_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_merge_timer_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cmerge_dis : 1; /**< [ 63: 63](R/W) For diagnostic use only. If set, will disable outbound completion merging. */
+ uint64_t cmerge_mps_limit : 3; /**< [ 62: 60](R/W) Completion merge maximum payload size limit value. Software can use this value
+ to reduce the maximum size of a merged completion operation to a level below the
+ MPS value coming from the PCIe core. A value of 0x0 limits to 128 bytes with
+ each increase in value doubling the limit. The hardware is controlled by the
+ lower of [CMERGE_MPS_LIMIT] and the MPS value coming from the PCIe core. Resets
+ to a value guaranteed to be at least as large as any legal value for MPS coming
+ from the PCIe core. */
+ uint64_t cmerge_total_timer : 7; /**< [ 59: 53](R/W) Completion merge encapsulation timer. When PEM accepts an outbound completion
+ which begins a completion merging process, [CMERGE_TOTAL_TIMER] specifies the
+ maximum wait, in units of (coprocessor-clock cycles * 64), to merge additional
+ completion transfers into one larger overall completion. The values for this
+ field range from 1 to 127, with 0x0 used for diagnostics only and treated as
+ never expire. This translates into a range of 64 to 8128 in units of
+ co-processor-clock cycles.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t cmerge_segment_timer : 7; /**< [ 52: 46](R/W) Completion merge segment timer. The maximum wait, in coprocessor-clock cycles,
+ to wait between each segment of the overall merge operation. Each iterative
+ completion transfer added to the overall merge restarts this timer. The values
+ for this field range from 1 to 127, with 0x0 used for diagnostics only and
+ treated as never expire. This translates into a range of 64 to 8128 in units of
+ co-processor-clock cycles.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t wmerge_mps_limit : 3; /**< [ 45: 43](R/W) Write merge maximum payload size limit value. Software can use this value to
+ reduce the maximum size of a merged write operation to a level below the MPS
+ value coming from the PCIe core. A value of 0 limits to 128 bytes with each
+ increase in value doubling the limit. The hardware will be controlled by the
+ LOWER of [WMERGE_MPS_LIMIT] and the MPS value coming from the PCIe core. Resets
+ to a value guaranteed to be at least as large as any legal value for MPS coming
+ from the PCIe core. */
+ uint64_t wmerge_total_timer : 10; /**< [ 42: 33](R/W) Write merge encapsulation timer. When PEM accepts an outbound write which begins
+ a write merging process, [WMERGE_TOTAL_TIMER] specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations into one larger
+ write. The values for this field range from 1 to 1023, with 0x0 used for
+ diagnostics only and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t wmerge_segment_timer : 10; /**< [ 32: 23](R/W) Write merge segment timer. The maximum wait,
+ in coprocessor-clock cycles, to wait between each segment of the overall merge
+ operation. Each iterative write operation added to the overall merge restarts this
+ timer. The values for this field range from 1 to 1023, with 0x0 used for
+ diagnostics only and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t rmerge_mrrs_limit : 3; /**< [ 22: 20](R/W) Read merge maximum read request size limit value. Software can use this value to
+ reduce the maximum size of a merged read operation to a level below the MRRS
+ value coming from the PCIe core. A value of 0x0 limits to 128 bytes with each
+ increase in value doubling the limit. The hardware will be controlled by the
+ LOWER of [RMERGE_MRRS_LIMIT] and the MRRS value coming from the PCIe
+ core. Resets to a value guaranteed to be at least as large as any legal value
+ for MRRS coming from the PCIe core. */
+ uint64_t rmerge_total_timer : 10; /**< [ 19: 10](R/W) Read merge encapsulation timer. When PEM accepts an outbound read which begins a
+ read merging process, [RMERGE_TOTAL_TIMER] specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional read operations into one larger
+ read. The values for this field range from 1 to 1023, with 0x0 used for
+ diagnostics only and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t rmerge_segment_timer : 10; /**< [ 9: 0](R/W) Read merge segment timer. specifies the maximum wait, in coprocessor-clock
+ cycles, to wait between each segment of the overall merge operation. Each
+ iterative read operation added to the overall merge restarts this timer. The
+ values for this field range from 1 to 1023, with 0x0 used for diagnostics only
+ and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+#else /* Word 0 - Little Endian */
+ uint64_t rmerge_segment_timer : 10; /**< [ 9: 0](R/W) Read merge segment timer. specifies the maximum wait, in coprocessor-clock
+ cycles, to wait between each segment of the overall merge operation. Each
+ iterative read operation added to the overall merge restarts this timer. The
+ values for this field range from 1 to 1023, with 0x0 used for diagnostics only
+ and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t rmerge_total_timer : 10; /**< [ 19: 10](R/W) Read merge encapsulation timer. When PEM accepts an outbound read which begins a
+ read merging process, [RMERGE_TOTAL_TIMER] specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional read operations into one larger
+ read. The values for this field range from 1 to 1023, with 0x0 used for
+ diagnostics only and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t rmerge_mrrs_limit : 3; /**< [ 22: 20](R/W) Read merge maximum read request size limit value. Software can use this value to
+ reduce the maximum size of a merged read operation to a level below the MRRS
+ value coming from the PCIe core. A value of 0x0 limits to 128 bytes with each
+ increase in value doubling the limit. The hardware will be controlled by the
+ LOWER of [RMERGE_MRRS_LIMIT] and the MRRS value coming from the PCIe
+ core. Resets to a value guaranteed to be at least as large as any legal value
+ for MRRS coming from the PCIe core. */
+ uint64_t wmerge_segment_timer : 10; /**< [ 32: 23](R/W) Write merge segment timer. The maximum wait,
+ in coprocessor-clock cycles, to wait between each segment of the overall merge
+ operation. Each iterative write operation added to the overall merge restarts this
+ timer. The values for this field range from 1 to 1023, with 0x0 used for
+ diagnostics only and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t wmerge_total_timer : 10; /**< [ 42: 33](R/W) Write merge encapsulation timer. When PEM accepts an outbound write which begins
+ a write merging process, [WMERGE_TOTAL_TIMER] specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations into one larger
+ write. The values for this field range from 1 to 1023, with 0x0 used for
+ diagnostics only and treated as never expire.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t wmerge_mps_limit : 3; /**< [ 45: 43](R/W) Write merge maximum payload size limit value. Software can use this value to
+ reduce the maximum size of a merged write operation to a level below the MPS
+ value coming from the PCIe core. A value of 0 limits to 128 bytes with each
+ increase in value doubling the limit. The hardware will be controlled by the
+ LOWER of [WMERGE_MPS_LIMIT] and the MPS value coming from the PCIe core. Resets
+ to a value guaranteed to be at least as large as any legal value for MPS coming
+ from the PCIe core. */
+ uint64_t cmerge_segment_timer : 7; /**< [ 52: 46](R/W) Completion merge segment timer. The maximum wait, in coprocessor-clock cycles,
+ to wait between each segment of the overall merge operation. Each iterative
+ completion transfer added to the overall merge restarts this timer. The values
+ for this field range from 1 to 127, with 0x0 used for diagnostics only and
+ treated as never expire. This translates into a range of 64 to 8128 in units of
+ co-processor-clock cycles.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t cmerge_total_timer : 7; /**< [ 59: 53](R/W) Completion merge encapsulation timer. When PEM accepts an outbound completion
+ which begins a completion merging process, [CMERGE_TOTAL_TIMER] specifies the
+ maximum wait, in units of (coprocessor-clock cycles * 64), to merge additional
+ completion transfers into one larger overall completion. The values for this
+ field range from 1 to 127, with 0x0 used for diagnostics only and treated as
+ never expire. This translates into a range of 64 to 8128 in units of
+ co-processor-clock cycles.
+
+ Internal:
+ If, during diagnostics, a timer value of 0x0 causes final transactions to be
+ stuck within the pipeline, those transactions can be released by changing the
+ timer to a non-zero value. */
+ uint64_t cmerge_mps_limit : 3; /**< [ 62: 60](R/W) Completion merge maximum payload size limit value. Software can use this value
+ to reduce the maximum size of a merged completion operation to a level below the
+ MPS value coming from the PCIe core. A value of 0x0 limits to 128 bytes with
+ each increase in value doubling the limit. The hardware is controlled by the
+ lower of [CMERGE_MPS_LIMIT] and the MPS value coming from the PCIe core. Resets
+ to a value guaranteed to be at least as large as any legal value for MPS coming
+ from the PCIe core. */
+ uint64_t cmerge_dis : 1; /**< [ 63: 63](R/W) For diagnostic use only. If set, will disable outbound completion merging. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_merge_timer_ctl_s cn; */
+};
+typedef union bdk_pemx_merge_timer_ctl bdk_pemx_merge_timer_ctl_t;
+
+static inline uint64_t BDK_PEMX_MERGE_TIMER_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_MERGE_TIMER_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000170ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_MERGE_TIMER_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_MERGE_TIMER_CTL(a) bdk_pemx_merge_timer_ctl_t
+#define bustype_BDK_PEMX_MERGE_TIMER_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_MERGE_TIMER_CTL(a) "PEMX_MERGE_TIMER_CTL"
+#define device_bar_BDK_PEMX_MERGE_TIMER_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_MERGE_TIMER_CTL(a) (a)
+#define arguments_BDK_PEMX_MERGE_TIMER_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_msix_pba#
+ *
+ * PEM MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table, the bit number is indexed by the PEM_INT_VEC_E enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_pemx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated PEM_MSIX_VEC()_CTL, enumerated by PEM_INT_VEC_E. Bits
+ that have no associated PEM_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated PEM_MSIX_VEC()_CTL, enumerated by PEM_INT_VEC_E. Bits
+ that have no associated PEM_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_msix_pbax_s cn; */
+};
+typedef union bdk_pemx_msix_pbax bdk_pemx_msix_pbax_t;
+
+static inline uint64_t BDK_PEMX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=2) && (b==0)))
+ return 0x87e0c0ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b==0)))
+ return 0x87e0c0ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=5) && (b==0)))
+ return 0x87e0c0ff0000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b==0)))
+ return 0x8e0f000f0000ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("PEMX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_MSIX_PBAX(a,b) bdk_pemx_msix_pbax_t
+#define bustype_BDK_PEMX_MSIX_PBAX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_MSIX_PBAX(a,b) "PEMX_MSIX_PBAX"
+#define device_bar_BDK_PEMX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_PEMX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_PEMX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_msix_vec#_addr
+ *
+ * PEM MSI-X Vector Table Address Registers
+ * This register is the MSI-X vector table, indexed by the PEM_INT_VEC_E enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_pemx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM(0..5)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM(0..5)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_msix_vecx_addr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_msix_vecx_addr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM(0..2)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM(0..2)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_msix_vecx_addr_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM(0..5)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM(0..5)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pemx_msix_vecx_addr_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM(0..3)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's PEM()_MSIX_VEC()_ADDR, PEM()_MSIX_VEC()_CTL, and
+ corresponding bit of PEM()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_PEM(0..3)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_msix_vecx_addr bdk_pemx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_PEMX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=2) && (b<=11)))
+ return 0x87e0c0f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=11)))
+ return 0x87e0c0f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && ((a<=5) && (b<=13)))
+ return 0x87e0c0f00000ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=5) && (b<=15)))
+ return 0x87e0c0f00000ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=8)))
+ return 0x8e0f00000000ll + 0x1000000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xf);
+ __bdk_csr_fatal("PEMX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_MSIX_VECX_ADDR(a,b) bdk_pemx_msix_vecx_addr_t
+#define bustype_BDK_PEMX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_MSIX_VECX_ADDR(a,b) "PEMX_MSIX_VECX_ADDR"
+#define device_bar_BDK_PEMX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_PEMX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_PEMX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_msix_vec#_ctl
+ *
+ * PEM MSI-X Vector Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the PEM_INT_VEC_E enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_pemx_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_pemx_msix_vecx_ctl bdk_pemx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_PEMX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=2) && (b<=11)))
+ return 0x87e0c0f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=11)))
+ return 0x87e0c0f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && ((a<=5) && (b<=13)))
+ return 0x87e0c0f00008ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=5) && (b<=15)))
+ return 0x87e0c0f00008ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=8)))
+ return 0x8e0f00000008ll + 0x1000000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xf);
+ __bdk_csr_fatal("PEMX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_MSIX_VECX_CTL(a,b) bdk_pemx_msix_vecx_ctl_t
+#define bustype_BDK_PEMX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_MSIX_VECX_CTL(a,b) "PEMX_MSIX_VECX_CTL"
+#define device_bar_BDK_PEMX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_PEMX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_PEMX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_ncbi_ctl
+ *
+ * PEM Inbound NCBI Control Register
+ * This register contains control bits for memory accesses targeting the NCBI bus.
+ * This register is ignored when PEM()_EBUS_CTL[PF_BAR*_SEL] is set.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_ncbi_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_ncbi_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t bige : 1; /**< [ 20: 20](R/W) Atomics sent on NCBI will be marked as big endian. If the link partner is
+ big-endian and the processors are big-endian, this allows exchange of big-endian
+ atomics without byte swapping. */
+ uint64_t reserved_11_19 : 9;
+ uint64_t clken_force : 1; /**< [ 10: 10](R/W) Force clock enable on NCBI bus to always enabled. For diagnostic use only. */
+ uint64_t ntlp_ro_dis : 1; /**< [ 9: 9](R/W) Relaxed ordering disable for non-posted TLPs. Will force relaxed ordering bit off when
+ non-posted TLPs are forwarded to IOB over NCBI. */
+ uint64_t ctlp_ro_dis : 1; /**< [ 8: 8](R/W) Relaxed ordering disable for completion TLPs. Will force relaxed ordering bit off when
+ completion TLPs are forwarded to IOB over NCBI. */
+ uint64_t ptlp_ro_dis : 1; /**< [ 7: 7](R/W) Relaxed ordering disable for posted TLPs. Will force relaxed ordering bit off when posted
+ TLPs are forwarded to IOB over NCBI. */
+ uint64_t reserved_3_6 : 4;
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When PEM issues a load command over NCBI to the LLC that is to be cached, this field
+ selects the type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. For diagnostic use only.
+
+ When set, replaces the default automatic store-store ordering with a more
+ conservative and lower performing rule. This causes the PEM to wait for a store
+ done from the NCB before sending additional stores to the NCB from the MAC. The
+ PEM requests a commit on the last store if more than one STORE operation is
+ required on NCBI. When set, inbound write merging must be disabled
+ (PEM()_IB_MERGE_TIMER_CTL[WMERGE_DIS] = 1). */
+#else /* Word 0 - Little Endian */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. For diagnostic use only.
+
+ When set, replaces the default automatic store-store ordering with a more
+ conservative and lower performing rule. This causes the PEM to wait for a store
+ done from the NCB before sending additional stores to the NCB from the MAC. The
+ PEM requests a commit on the last store if more than one STORE operation is
+ required on NCBI. When set, inbound write merging must be disabled
+ (PEM()_IB_MERGE_TIMER_CTL[WMERGE_DIS] = 1). */
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When PEM issues a load command over NCBI to the LLC that is to be cached, this field
+ selects the type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t reserved_3_6 : 4;
+ uint64_t ptlp_ro_dis : 1; /**< [ 7: 7](R/W) Relaxed ordering disable for posted TLPs. Will force relaxed ordering bit off when posted
+ TLPs are forwarded to IOB over NCBI. */
+ uint64_t ctlp_ro_dis : 1; /**< [ 8: 8](R/W) Relaxed ordering disable for completion TLPs. Will force relaxed ordering bit off when
+ completion TLPs are forwarded to IOB over NCBI. */
+ uint64_t ntlp_ro_dis : 1; /**< [ 9: 9](R/W) Relaxed ordering disable for non-posted TLPs. Will force relaxed ordering bit off when
+ non-posted TLPs are forwarded to IOB over NCBI. */
+ uint64_t clken_force : 1; /**< [ 10: 10](R/W) Force clock enable on NCBI bus to always enabled. For diagnostic use only. */
+ uint64_t reserved_11_19 : 9;
+ uint64_t bige : 1; /**< [ 20: 20](R/W) Atomics sent on NCBI will be marked as big endian. If the link partner is
+ big-endian and the processors are big-endian, this allows exchange of big-endian
+ atomics without byte swapping. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_ncbi_ctl_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t bige : 1; /**< [ 20: 20](R/W) Atomics sent on NCBI will be marked as big endian. If the link partner is
+ big-endian and the processors are big-endian, this allows exchange of big-endian
+ atomics without byte swapping. */
+ uint64_t reserved_11_19 : 9;
+ uint64_t clken_force : 1; /**< [ 10: 10](R/W) Force clock enable on NCBI bus to always enabled. For diagnostic use only. */
+ uint64_t ntlp_ro_dis : 1; /**< [ 9: 9](R/W) Relaxed ordering disable for non-posted TLPs. Will force relaxed ordering bit off when
+ non-posted TLPs are forwarded to IOB over NCBI. */
+ uint64_t ctlp_ro_dis : 1; /**< [ 8: 8](R/W) Relaxed ordering disable for completion TLPs. Will force relaxed ordering bit off when
+ completion TLPs are forwarded to IOB over NCBI. */
+ uint64_t ptlp_ro_dis : 1; /**< [ 7: 7](R/W) Relaxed ordering disable for posted TLPs. Will force relaxed ordering bit off when posted
+ TLPs are forwarded to IOB over NCBI. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t reserved_4 : 1;
+ uint64_t reserved_3 : 1;
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When PEM issues a load command over NCBI to the LLC that is to be cached, this field
+ selects the type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. For diagnostic use only.
+
+ When set, replaces the default automatic store-store ordering with a more
+ conservative and lower performing rule. This causes the PEM to wait for a store
+ done from the NCB before sending additional stores to the NCB from the MAC. The
+ PEM requests a commit on the last store if more than one STORE operation is
+ required on NCBI. When set, inbound write merging must be disabled
+ (PEM()_IB_MERGE_TIMER_CTL[WMERGE_DIS] = 1). */
+#else /* Word 0 - Little Endian */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. For diagnostic use only.
+
+ When set, replaces the default automatic store-store ordering with a more
+ conservative and lower performing rule. This causes the PEM to wait for a store
+ done from the NCB before sending additional stores to the NCB from the MAC. The
+ PEM requests a commit on the last store if more than one STORE operation is
+ required on NCBI. When set, inbound write merging must be disabled
+ (PEM()_IB_MERGE_TIMER_CTL[WMERGE_DIS] = 1). */
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When PEM issues a load command over NCBI to the LLC that is to be cached, this field
+ selects the type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t reserved_3 : 1;
+ uint64_t reserved_4 : 1;
+ uint64_t reserved_5_6 : 2;
+ uint64_t ptlp_ro_dis : 1; /**< [ 7: 7](R/W) Relaxed ordering disable for posted TLPs. Will force relaxed ordering bit off when posted
+ TLPs are forwarded to IOB over NCBI. */
+ uint64_t ctlp_ro_dis : 1; /**< [ 8: 8](R/W) Relaxed ordering disable for completion TLPs. Will force relaxed ordering bit off when
+ completion TLPs are forwarded to IOB over NCBI. */
+ uint64_t ntlp_ro_dis : 1; /**< [ 9: 9](R/W) Relaxed ordering disable for non-posted TLPs. Will force relaxed ordering bit off when
+ non-posted TLPs are forwarded to IOB over NCBI. */
+ uint64_t clken_force : 1; /**< [ 10: 10](R/W) Force clock enable on NCBI bus to always enabled. For diagnostic use only. */
+ uint64_t reserved_11_19 : 9;
+ uint64_t bige : 1; /**< [ 20: 20](R/W) Atomics sent on NCBI will be marked as big endian. If the link partner is
+ big-endian and the processors are big-endian, this allows exchange of big-endian
+ atomics without byte swapping. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_pemx_ncbi_ctl bdk_pemx_ncbi_ctl_t;
+
+static inline uint64_t BDK_PEMX_NCBI_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_NCBI_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000168ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_NCBI_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_NCBI_CTL(a) bdk_pemx_ncbi_ctl_t
+#define bustype_BDK_PEMX_NCBI_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_NCBI_CTL(a) "PEMX_NCBI_CTL"
+#define device_bar_BDK_PEMX_NCBI_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_NCBI_CTL(a) (a)
+#define arguments_BDK_PEMX_NCBI_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ncbi_tlp_credits
+ *
+ * PEM NCB Inbound TLP Credits Register
+ * This register specifies the number of credits for use in moving TLPs. When this register is
+ * written, the credit values are reset to the register value. This register is for diagnostic
+ * use only, and should only be written when PEM()_CTL_STATUS[LNK_ENB] is clear.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_ncbi_tlp_credits
+{
+ uint64_t u;
+ struct bdk_pemx_ncbi_tlp_credits_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ncbi_cpl : 11; /**< [ 31: 21](R/W) TLP 32 B credits for completion TLPs in the PEMs NCBI buffers.
+ Legal values are 0x21 to 0x100. */
+ uint64_t ncbi_np : 10; /**< [ 20: 11](R/W) TLP headers for non-posted TLPs in the PEMs NCBI buffers.
+ Legal values are 0x20 to 0x100. */
+ uint64_t ncbi_p : 11; /**< [ 10: 0](R/W) TLP 32 B credits for posted TLPs in the PEMs NCBI buffers.
+ Legal values are 0x21 to 0x100. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbi_p : 11; /**< [ 10: 0](R/W) TLP 32 B credits for posted TLPs in the PEMs NCBI buffers.
+ Legal values are 0x21 to 0x100. */
+ uint64_t ncbi_np : 10; /**< [ 20: 11](R/W) TLP headers for non-posted TLPs in the PEMs NCBI buffers.
+ Legal values are 0x20 to 0x100. */
+ uint64_t ncbi_cpl : 11; /**< [ 31: 21](R/W) TLP 32 B credits for completion TLPs in the PEMs NCBI buffers.
+ Legal values are 0x21 to 0x100. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ncbi_tlp_credits_s cn; */
+};
+typedef union bdk_pemx_ncbi_tlp_credits bdk_pemx_ncbi_tlp_credits_t;
+
+static inline uint64_t BDK_PEMX_NCBI_TLP_CREDITS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_NCBI_TLP_CREDITS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000030ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_NCBI_TLP_CREDITS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_NCBI_TLP_CREDITS(a) bdk_pemx_ncbi_tlp_credits_t
+#define bustype_BDK_PEMX_NCBI_TLP_CREDITS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_NCBI_TLP_CREDITS(a) "PEMX_NCBI_TLP_CREDITS"
+#define device_bar_BDK_PEMX_NCBI_TLP_CREDITS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_NCBI_TLP_CREDITS(a) (a)
+#define arguments_BDK_PEMX_NCBI_TLP_CREDITS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ncbo_fifo_status
+ *
+ * PEM NCBO Offloading FIFO Status Register
+ * This register contains status about the PEM NCBO offloading FIFOs.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_ncbo_fifo_status
+{
+ uint64_t u;
+ struct bdk_pemx_ncbo_fifo_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t csr_volume : 8; /**< [ 23: 16](RO/H) Reports the number of valid entries currently held in the NCBO CSR offloading
+ FIFO. Each entry represents an NCBO-based CSR access and the value read can
+ range from 0x0 to a maximum of 128 which would represent completely full.
+ For diagnostic use only. */
+ uint64_t reserved_15 : 1;
+ uint64_t n_volume : 7; /**< [ 14: 8](RO/H) Reports the number of valid entries currently held in the NCBO nonposted
+ offloading FIFO. Each entry represents a beat of the NCBO bus related to a
+ nonposted operation and the value read can range from 0x0 to a maximum of 64
+ which would represent completely full.
+ For diagnostic use only. */
+ uint64_t p_volume : 8; /**< [ 7: 0](RO/H) Reports the number of valid entries currently held in the NCBO posted offloading
+ FIFO. Each entry represents a beat of the NCBO bus related to a memory store and
+ the value read can range from 0x0 to a maximum of 128 which would represent
+ completely full.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_volume : 8; /**< [ 7: 0](RO/H) Reports the number of valid entries currently held in the NCBO posted offloading
+ FIFO. Each entry represents a beat of the NCBO bus related to a memory store and
+ the value read can range from 0x0 to a maximum of 128 which would represent
+ completely full.
+ For diagnostic use only. */
+ uint64_t n_volume : 7; /**< [ 14: 8](RO/H) Reports the number of valid entries currently held in the NCBO nonposted
+ offloading FIFO. Each entry represents a beat of the NCBO bus related to a
+ nonposted operation and the value read can range from 0x0 to a maximum of 64
+ which would represent completely full.
+ For diagnostic use only. */
+ uint64_t reserved_15 : 1;
+ uint64_t csr_volume : 8; /**< [ 23: 16](RO/H) Reports the number of valid entries currently held in the NCBO CSR offloading
+ FIFO. Each entry represents an NCBO-based CSR access and the value read can
+ range from 0x0 to a maximum of 128 which would represent completely full.
+ For diagnostic use only. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ncbo_fifo_status_s cn; */
+};
+typedef union bdk_pemx_ncbo_fifo_status bdk_pemx_ncbo_fifo_status_t;
+
+static inline uint64_t BDK_PEMX_NCBO_FIFO_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_NCBO_FIFO_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000128ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_NCBO_FIFO_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_NCBO_FIFO_STATUS(a) bdk_pemx_ncbo_fifo_status_t
+#define bustype_BDK_PEMX_NCBO_FIFO_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_NCBO_FIFO_STATUS(a) "PEMX_NCBO_FIFO_STATUS"
+#define device_bar_BDK_PEMX_NCBO_FIFO_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_NCBO_FIFO_STATUS(a) (a)
+#define arguments_BDK_PEMX_NCBO_FIFO_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ob_cpl_fifo_status
+ *
+ * PEM Outbound Completion FIFO Status Register
+ * This register contains status about the PEM Outbound Completion FIFOs.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_ob_cpl_fifo_status
+{
+ uint64_t u;
+ struct bdk_pemx_ob_cpl_fifo_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t ncbo_c_volume : 10; /**< [ 25: 16](RO/H) Reports the number of valid entries currently held in the NCBO completion
+ FIFO. Each entry represents a 256-bit beat of data. The value read can
+ range from 0x0 to a maximum of 512 which would represent completely full.
+ For diagnostic use only. */
+ uint64_t ebo_c_volume : 8; /**< [ 15: 8](RO/H) Reports the number of valid entries currently held in the EBO completion
+ FIFO which is downstream and separate from the EBO completion offloading
+ FIFO. Each entry represents a 256-bit beat of data. The value read can
+ range from 0x0 to a maximum of 128 which would represent completely full.
+ For diagnostic use only. */
+ uint64_t pspi_c_volume : 8; /**< [ 7: 0](RO/H) Reports the number of valid entries currently held in the PSPI completion
+ FIFO. Each entry represents a 256-bit beat of data. The value read can
+ range from 0x0 to a maximum of 128 which would represent completely full.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t pspi_c_volume : 8; /**< [ 7: 0](RO/H) Reports the number of valid entries currently held in the PSPI completion
+ FIFO. Each entry represents a 256-bit beat of data. The value read can
+ range from 0x0 to a maximum of 128 which would represent completely full.
+ For diagnostic use only. */
+ uint64_t ebo_c_volume : 8; /**< [ 15: 8](RO/H) Reports the number of valid entries currently held in the EBO completion
+ FIFO which is downstream and separate from the EBO completion offloading
+ FIFO. Each entry represents a 256-bit beat of data. The value read can
+ range from 0x0 to a maximum of 128 which would represent completely full.
+ For diagnostic use only. */
+ uint64_t ncbo_c_volume : 10; /**< [ 25: 16](RO/H) Reports the number of valid entries currently held in the NCBO completion
+ FIFO. Each entry represents a 256-bit beat of data. The value read can
+ range from 0x0 to a maximum of 512 which would represent completely full.
+ For diagnostic use only. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ob_cpl_fifo_status_s cn; */
+};
+typedef union bdk_pemx_ob_cpl_fifo_status bdk_pemx_ob_cpl_fifo_status_t;
+
+static inline uint64_t BDK_PEMX_OB_CPL_FIFO_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_OB_CPL_FIFO_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000160ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_OB_CPL_FIFO_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_OB_CPL_FIFO_STATUS(a) bdk_pemx_ob_cpl_fifo_status_t
+#define bustype_BDK_PEMX_OB_CPL_FIFO_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_OB_CPL_FIFO_STATUS(a) "PEMX_OB_CPL_FIFO_STATUS"
+#define device_bar_BDK_PEMX_OB_CPL_FIFO_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_OB_CPL_FIFO_STATUS(a) (a)
+#define arguments_BDK_PEMX_OB_CPL_FIFO_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_obff_ctl_status
+ *
+ * PEM Optimized Buffer Flush/Fill Control/Status Register
+ * This register is used for EP mode OFF debug.
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_obff_ctl_status
+{
+ uint64_t u;
+ struct bdk_pemx_obff_ctl_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t dec_err : 1; /**< [ 4: 4](R/W1C) Wake decoder recieved a invalid WAKE pattern.
+
+ When a invalid WAKE pattern is detected, the OBFF
+ wake decoder is forced into the CPU_ACT state. */
+ uint64_t dec_state : 4; /**< [ 3: 0](RO/H) The current FSM state of the OBFF wake decoder. EP mode only.
+ For debug purposes only.
+
+ 0x0 = IDLE (RC mode).
+ 0x1 = IDLE to OBFF.
+ 0x3 = IDLE to CPU.
+ 0x4 = OBFF to IDLE.
+ 0x5 = OBFF.
+ 0x6 = OBFF to CPU 1, inactive pulse.
+ 0xa = CPU_IDLE.
+ 0xb = CPU_ACT (default state in EP mode).
+ 0xe = OBFF to CPU 1, inactive pulse.
+
+ All other FSM states are undefined. */
+#else /* Word 0 - Little Endian */
+ uint64_t dec_state : 4; /**< [ 3: 0](RO/H) The current FSM state of the OBFF wake decoder. EP mode only.
+ For debug purposes only.
+
+ 0x0 = IDLE (RC mode).
+ 0x1 = IDLE to OBFF.
+ 0x3 = IDLE to CPU.
+ 0x4 = OBFF to IDLE.
+ 0x5 = OBFF.
+ 0x6 = OBFF to CPU 1, inactive pulse.
+ 0xa = CPU_IDLE.
+ 0xb = CPU_ACT (default state in EP mode).
+ 0xe = OBFF to CPU 1, inactive pulse.
+
+ All other FSM states are undefined. */
+ uint64_t dec_err : 1; /**< [ 4: 4](R/W1C) Wake decoder recieved a invalid WAKE pattern.
+
+ When a invalid WAKE pattern is detected, the OBFF
+ wake decoder is forced into the CPU_ACT state. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_obff_ctl_status_s cn; */
+};
+typedef union bdk_pemx_obff_ctl_status bdk_pemx_obff_ctl_status_t;
+
+static inline uint64_t BDK_PEMX_OBFF_CTL_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_OBFF_CTL_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000080ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_OBFF_CTL_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_OBFF_CTL_STATUS(a) bdk_pemx_obff_ctl_status_t
+#define bustype_BDK_PEMX_OBFF_CTL_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_OBFF_CTL_STATUS(a) "PEMX_OBFF_CTL_STATUS"
+#define device_bar_BDK_PEMX_OBFF_CTL_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_OBFF_CTL_STATUS(a) (a)
+#define arguments_BDK_PEMX_OBFF_CTL_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_obff_wake_cfg
+ *
+ * PEM Optimized Buffer Flush/Fill Wake Configuration Register
+ * This configures wake configuration.
+ *
+ * This register is reset on MAC cold reset.
+ */
+union bdk_pemx_obff_wake_cfg
+{
+ uint64_t u;
+ struct bdk_pemx_obff_wake_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t max_pls : 8; /**< [ 31: 24](R/W) Max pulse width for active-inactive-active pulse.
+ Twake_tx_max_pulse = [MAX_PLS] * Tck_period (10ns). */
+ uint64_t min_pls : 8; /**< [ 23: 16](R/W) Min pulse width for active-inactive pulse.
+ Twake_tx_min_pulse = [MIN_PLS] * Tck_period (10ns). */
+ uint64_t max_f2f : 8; /**< [ 15: 8](R/W) Max falling to falling edge width.
+ Twake_fall_fall_cpu_active (max) = [MAX_F2F] * Tck_period (10ns). */
+ uint64_t min_f2f : 8; /**< [ 7: 0](R/W) Min falling to falling edge width.
+ Twake_fall_fall_cpu_active (min) = [MIN_F2F] * Tck_period (10ns). */
+#else /* Word 0 - Little Endian */
+ uint64_t min_f2f : 8; /**< [ 7: 0](R/W) Min falling to falling edge width.
+ Twake_fall_fall_cpu_active (min) = [MIN_F2F] * Tck_period (10ns). */
+ uint64_t max_f2f : 8; /**< [ 15: 8](R/W) Max falling to falling edge width.
+ Twake_fall_fall_cpu_active (max) = [MAX_F2F] * Tck_period (10ns). */
+ uint64_t min_pls : 8; /**< [ 23: 16](R/W) Min pulse width for active-inactive pulse.
+ Twake_tx_min_pulse = [MIN_PLS] * Tck_period (10ns). */
+ uint64_t max_pls : 8; /**< [ 31: 24](R/W) Max pulse width for active-inactive-active pulse.
+ Twake_tx_max_pulse = [MAX_PLS] * Tck_period (10ns). */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_obff_wake_cfg_s cn; */
+};
+typedef union bdk_pemx_obff_wake_cfg bdk_pemx_obff_wake_cfg_t;
+
+static inline uint64_t BDK_PEMX_OBFF_WAKE_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_OBFF_WAKE_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000088ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_OBFF_WAKE_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_OBFF_WAKE_CFG(a) bdk_pemx_obff_wake_cfg_t
+#define bustype_BDK_PEMX_OBFF_WAKE_CFG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_OBFF_WAKE_CFG(a) "PEMX_OBFF_WAKE_CFG"
+#define device_bar_BDK_PEMX_OBFF_WAKE_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_OBFF_WAKE_CFG(a) (a)
+#define arguments_BDK_PEMX_OBFF_WAKE_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_on
+ *
+ * PEM On Status Register
+ * This register indicates that PEM is configured and ready.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on cold reset.
+ */
+union bdk_pemx_on
+{
+ uint64_t u;
+ struct bdk_pemx_on_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t aclr : 1; /**< [ 2: 2](R/W) When this bit is set, [PEMON] will auto-clear on PEM domain reset, in addition
+ to being reset on cold reset. [ACLR] should be 0 in an EP configuration where
+ it is desired to leave the link operational while resetting the chip core.
+ It should normally be 1 in root complex mode. */
+ uint64_t pemoor : 1; /**< [ 1: 1](RO/H) Indication to software that the PEM has been taken out of reset (i.e. BIST is done) and it
+ is safe to configure core CSRs. */
+ uint64_t pemon : 1; /**< [ 0: 0](R/W/H) Indication to the GSER that the PEM is out of reset, configured, and ready to send/receive
+ traffic. Setting this bit takes the configured PIPE out of reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pemon : 1; /**< [ 0: 0](R/W/H) Indication to the GSER that the PEM is out of reset, configured, and ready to send/receive
+ traffic. Setting this bit takes the configured PIPE out of reset. */
+ uint64_t pemoor : 1; /**< [ 1: 1](RO/H) Indication to software that the PEM has been taken out of reset (i.e. BIST is done) and it
+ is safe to configure core CSRs. */
+ uint64_t aclr : 1; /**< [ 2: 2](R/W) When this bit is set, [PEMON] will auto-clear on PEM domain reset, in addition
+ to being reset on cold reset. [ACLR] should be 0 in an EP configuration where
+ it is desired to leave the link operational while resetting the chip core.
+ It should normally be 1 in root complex mode. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_on_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t pemoor : 1; /**< [ 1: 1](RO/H) Indication to software that the PEM has been taken out of reset (i.e. BIST is done) and it
+ is safe to configure core CSRs. */
+ uint64_t pemon : 1; /**< [ 0: 0](R/W/H) Indication to the GSER that the PEM is out of reset, configured, and ready to send/receive
+ traffic. Setting this bit takes the configured PIPE out of reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pemon : 1; /**< [ 0: 0](R/W/H) Indication to the GSER that the PEM is out of reset, configured, and ready to send/receive
+ traffic. Setting this bit takes the configured PIPE out of reset. */
+ uint64_t pemoor : 1; /**< [ 1: 1](RO/H) Indication to software that the PEM has been taken out of reset (i.e. BIST is done) and it
+ is safe to configure core CSRs. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pemx_on_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t aclr : 1; /**< [ 2: 2](R/W) When this bit is set, [PEMON] will auto-clear on PEM domain reset, in addition
+ to being reset on cold reset. [ACLR] should be 0 in an EP configuration where
+ it is desired to leave the link operational while resetting the chip core.
+ It should normally be 1 in root complex mode. */
+ uint64_t pemoor : 1; /**< [ 1: 1](RO/H) Indication to software that the PEM has been taken out of MAC reset and it
+ is safe to configure CSRs marked as being on MAC reset, as well as all PCIe configuration
+ registers. */
+ uint64_t pemon : 1; /**< [ 0: 0](R/W/H) Indication to the centralized reset block that the PEM is out of domain reset,
+ and PEM()_CLK_EN and PEM()_CFG have been configured. Setting this bit will allow the
+ configured PIPE to be taken out of reset and MAC reset to be deasserted.
+ This bit is set as part of the initialization/boot sequence for PCIe. */
+#else /* Word 0 - Little Endian */
+ uint64_t pemon : 1; /**< [ 0: 0](R/W/H) Indication to the centralized reset block that the PEM is out of domain reset,
+ and PEM()_CLK_EN and PEM()_CFG have been configured. Setting this bit will allow the
+ configured PIPE to be taken out of reset and MAC reset to be deasserted.
+ This bit is set as part of the initialization/boot sequence for PCIe. */
+ uint64_t pemoor : 1; /**< [ 1: 1](RO/H) Indication to software that the PEM has been taken out of MAC reset and it
+ is safe to configure CSRs marked as being on MAC reset, as well as all PCIe configuration
+ registers. */
+ uint64_t aclr : 1; /**< [ 2: 2](R/W) When this bit is set, [PEMON] will auto-clear on PEM domain reset, in addition
+ to being reset on cold reset. [ACLR] should be 0 in an EP configuration where
+ it is desired to leave the link operational while resetting the chip core.
+ It should normally be 1 in root complex mode. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_on bdk_pemx_on_t;
+
+static inline uint64_t BDK_PEMX_ON(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_ON(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000420ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000420ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000420ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000d0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_ON", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_ON(a) bdk_pemx_on_t
+#define bustype_BDK_PEMX_ON(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_ON(a) "PEMX_ON"
+#define device_bar_BDK_PEMX_ON(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_ON(a) (a)
+#define arguments_BDK_PEMX_ON(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_p2n_bar0_start
+ *
+ * PEM PCIe RC BAR0 Start Register
+ * This register specifies the starting address for memory requests that are to be forwarded to
+ * NCB/EBUS in RC mode. In EP mode, the standard PCIe config space BAR registers are used, and
+ * this register is ignored.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_p2n_bar0_start
+{
+ uint64_t u;
+ struct bdk_pemx_p2n_bar0_start_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_p2n_bar0_start_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 48; /**< [ 63: 16](R/W) The starting address of the BAR0 address space, sized as configured by the
+ PEM()_BAR_CTL[BAR0_SIZ] which defaults to ADDR\<63:23\> and used to determine a RC BAR0 hit. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t addr : 48; /**< [ 63: 16](R/W) The starting address of the BAR0 address space, sized as configured by the
+ PEM()_BAR_CTL[BAR0_SIZ] which defaults to ADDR\<63:23\> and used to determine a RC BAR0 hit. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_p2n_bar0_start_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 50; /**< [ 63: 14](R/W) The starting address of the 16KB BAR0 address space. */
+ uint64_t reserved_0_13 : 14;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_13 : 14;
+ uint64_t addr : 50; /**< [ 63: 14](R/W) The starting address of the 16KB BAR0 address space. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_p2n_bar0_start_cn81xx cn88xx; */
+ struct bdk_pemx_p2n_bar0_start_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 41; /**< [ 63: 23](R/W) The starting address of the 8 MB BAR0 address space. */
+ uint64_t reserved_0_22 : 23;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_22 : 23;
+ uint64_t addr : 41; /**< [ 63: 23](R/W) The starting address of the 8 MB BAR0 address space. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_p2n_bar0_start bdk_pemx_p2n_bar0_start_t;
+
+static inline uint64_t BDK_PEMX_P2N_BAR0_START(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_P2N_BAR0_START(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000080ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000080ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000080ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000148ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_P2N_BAR0_START", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_P2N_BAR0_START(a) bdk_pemx_p2n_bar0_start_t
+#define bustype_BDK_PEMX_P2N_BAR0_START(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_P2N_BAR0_START(a) "PEMX_P2N_BAR0_START"
+#define device_bar_BDK_PEMX_P2N_BAR0_START(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_P2N_BAR0_START(a) (a)
+#define arguments_BDK_PEMX_P2N_BAR0_START(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_p2n_bar1_start
+ *
+ * PEM PCIe to SLI BAR1 Start Register
+ * This register specifies the starting address for memory requests that are to be forwarded to
+ * the SLI in RC mode.
+ */
+union bdk_pemx_p2n_bar1_start
+{
+ uint64_t u;
+ struct bdk_pemx_p2n_bar1_start_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 38; /**< [ 63: 26](R/W) The starting address of the 64 MB BAR1 address space. */
+ uint64_t reserved_0_25 : 26;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_25 : 26;
+ uint64_t addr : 38; /**< [ 63: 26](R/W) The starting address of the 64 MB BAR1 address space. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_p2n_bar1_start_s cn; */
+};
+typedef union bdk_pemx_p2n_bar1_start bdk_pemx_p2n_bar1_start_t;
+
+static inline uint64_t BDK_PEMX_P2N_BAR1_START(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_P2N_BAR1_START(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000088ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000088ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000088ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_P2N_BAR1_START", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_P2N_BAR1_START(a) bdk_pemx_p2n_bar1_start_t
+#define bustype_BDK_PEMX_P2N_BAR1_START(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_P2N_BAR1_START(a) "PEMX_P2N_BAR1_START"
+#define device_bar_BDK_PEMX_P2N_BAR1_START(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_P2N_BAR1_START(a) (a)
+#define arguments_BDK_PEMX_P2N_BAR1_START(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_p2n_bar2_start
+ *
+ * PEM PCIe RC BAR2 Start Register
+ * This register specifies the starting address for memory requests that are to be forwarded to
+ * NCB/EBUS in RC mode. In EP mode, the standard PCIe config space BAR registers are used, and
+ * this register is ignored.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_p2n_bar2_start
+{
+ uint64_t u;
+ struct bdk_pemx_p2n_bar2_start_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_p2n_bar2_start_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 44; /**< [ 63: 20](R/W) The starting address of the BAR2 address space, sized as configured by the
+ PEM()_BAR_CTL[BAR2_SIZ] which defaults to ADDR\<63:50\> and used to determine a RC BAR2 hit. */
+ uint64_t reserved_0_19 : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_19 : 20;
+ uint64_t addr : 44; /**< [ 63: 20](R/W) The starting address of the BAR2 address space, sized as configured by the
+ PEM()_BAR_CTL[BAR2_SIZ] which defaults to ADDR\<63:50\> and used to determine a RC BAR2 hit. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_pemx_p2n_bar2_start_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 14; /**< [ 63: 50](R/W) The starting address of the 2^50 address space
+ that is the BAR2 address space. */
+ uint64_t spares : 2; /**< [ 49: 48](R/W) Spare flops. */
+ uint64_t reserved_0_47 : 48;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_47 : 48;
+ uint64_t spares : 2; /**< [ 49: 48](R/W) Spare flops. */
+ uint64_t addr : 14; /**< [ 63: 50](R/W) The starting address of the 2^50 address space
+ that is the BAR2 address space. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pemx_p2n_bar2_start_cn81xx cn88xx; */
+ struct bdk_pemx_p2n_bar2_start_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 44; /**< [ 63: 20](R/W) The starting address of the BAR2 address space, sized as configured by the
+ PEM()_BAR_CTL[BAR2_SIZ]
+ which defaults to ADDR\<63:50\> and used to determine a RC bar2 hit. */
+ uint64_t reserved_0_19 : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_19 : 20;
+ uint64_t addr : 44; /**< [ 63: 20](R/W) The starting address of the BAR2 address space, sized as configured by the
+ PEM()_BAR_CTL[BAR2_SIZ]
+ which defaults to ADDR\<63:50\> and used to determine a RC bar2 hit. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pemx_p2n_bar2_start bdk_pemx_p2n_bar2_start_t;
+
+static inline uint64_t BDK_PEMX_P2N_BAR2_START(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_P2N_BAR2_START(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000090ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000090ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000090ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000140ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_P2N_BAR2_START", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_P2N_BAR2_START(a) bdk_pemx_p2n_bar2_start_t
+#define bustype_BDK_PEMX_P2N_BAR2_START(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_P2N_BAR2_START(a) "PEMX_P2N_BAR2_START"
+#define device_bar_BDK_PEMX_P2N_BAR2_START(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_P2N_BAR2_START(a) (a)
+#define arguments_BDK_PEMX_P2N_BAR2_START(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_p2n_bar4_start
+ *
+ * PEM PCIe RC BAR4 Start Register
+ * This register specifies the starting address for memory requests that are to be forwarded to
+ * NCB/EBUS in RC mode. In EP mode, the standard PCIe config space BAR registers are used, and
+ * this register is ignored.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_p2n_bar4_start
+{
+ uint64_t u;
+ struct bdk_pemx_p2n_bar4_start_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 38; /**< [ 63: 26](R/W) The starting address of BAR4 address space. */
+ uint64_t reserved_0_25 : 26;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_25 : 26;
+ uint64_t addr : 38; /**< [ 63: 26](R/W) The starting address of BAR4 address space. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_p2n_bar4_start_s cn; */
+};
+typedef union bdk_pemx_p2n_bar4_start bdk_pemx_p2n_bar4_start_t;
+
+static inline uint64_t BDK_PEMX_P2N_BAR4_START(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_P2N_BAR4_START(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000138ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_P2N_BAR4_START", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_P2N_BAR4_START(a) bdk_pemx_p2n_bar4_start_t
+#define bustype_BDK_PEMX_P2N_BAR4_START(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_P2N_BAR4_START(a) "PEMX_P2N_BAR4_START"
+#define device_bar_BDK_PEMX_P2N_BAR4_START(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_P2N_BAR4_START(a) (a)
+#define arguments_BDK_PEMX_P2N_BAR4_START(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_p2p_bar#_end
+ *
+ * PEM Peer-to-Peer BAR0 End Register
+ * This register specifies the ending address for memory requests that are to be forwarded to the
+ * PCIe peer port.
+ */
+union bdk_pemx_p2p_barx_end
+{
+ uint64_t u;
+ struct bdk_pemx_p2p_barx_end_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 52; /**< [ 63: 12](R/W) The ending address of the address window created by this field and the
+ PEM_P2P_BAR0_START[63:12] field. The full 64 bits of the address are created by:
+ {ADDR[63:12], 12'b0}. */
+ uint64_t reserved_0_11 : 12;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_11 : 12;
+ uint64_t addr : 52; /**< [ 63: 12](R/W) The ending address of the address window created by this field and the
+ PEM_P2P_BAR0_START[63:12] field. The full 64 bits of the address are created by:
+ {ADDR[63:12], 12'b0}. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_p2p_barx_end_s cn; */
+};
+typedef union bdk_pemx_p2p_barx_end bdk_pemx_p2p_barx_end_t;
+
+static inline uint64_t BDK_PEMX_P2P_BARX_END(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_P2P_BARX_END(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0c0000048ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("PEMX_P2P_BARX_END", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_P2P_BARX_END(a,b) bdk_pemx_p2p_barx_end_t
+#define bustype_BDK_PEMX_P2P_BARX_END(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_P2P_BARX_END(a,b) "PEMX_P2P_BARX_END"
+#define device_bar_BDK_PEMX_P2P_BARX_END(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_P2P_BARX_END(a,b) (a)
+#define arguments_BDK_PEMX_P2P_BARX_END(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) pem#_p2p_bar#_start
+ *
+ * PEM Peer-to-Peer BAR0 Start Register
+ * This register specifies the starting address for memory requests that are to be forwarded to
+ * the PCIe peer port.
+ */
+union bdk_pemx_p2p_barx_start
+{
+ uint64_t u;
+ struct bdk_pemx_p2p_barx_start_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 52; /**< [ 63: 12](R/W) The starting address of the address window created by this field and the
+ PEM_P2P_BAR0_END[63:12] field. The full 64-bits of the address are created by:
+ {ADDR[63:12], 12'b0}. */
+ uint64_t reserved_2_11 : 10;
+ uint64_t dst : 2; /**< [ 1: 0](R/W) The destination peer of the address window created by this field and the
+ PEM_P2P_BAR0_END[63:12] field. It is illegal to configure the destination peer to match
+ the source. */
+#else /* Word 0 - Little Endian */
+ uint64_t dst : 2; /**< [ 1: 0](R/W) The destination peer of the address window created by this field and the
+ PEM_P2P_BAR0_END[63:12] field. It is illegal to configure the destination peer to match
+ the source. */
+ uint64_t reserved_2_11 : 10;
+ uint64_t addr : 52; /**< [ 63: 12](R/W) The starting address of the address window created by this field and the
+ PEM_P2P_BAR0_END[63:12] field. The full 64-bits of the address are created by:
+ {ADDR[63:12], 12'b0}. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_p2p_barx_start_s cn; */
+};
+typedef union bdk_pemx_p2p_barx_start bdk_pemx_p2p_barx_start_t;
+
+static inline uint64_t BDK_PEMX_P2P_BARX_START(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_P2P_BARX_START(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0c0000040ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("PEMX_P2P_BARX_START", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_P2P_BARX_START(a,b) bdk_pemx_p2p_barx_start_t
+#define bustype_BDK_PEMX_P2P_BARX_START(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_P2P_BARX_START(a,b) "PEMX_P2P_BARX_START"
+#define device_bar_BDK_PEMX_P2P_BARX_START(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_P2P_BARX_START(a,b) (a)
+#define arguments_BDK_PEMX_P2P_BARX_START(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_perr_status
+ *
+ * PEM Parity Error Status Register
+ * This register contains indications of parity errors detected inside PEM.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_perr_status
+{
+ uint64_t u;
+ struct bdk_pemx_perr_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t mac_rx_perr : 1; /**< [ 6: 6](R/W1C/H) Set when the MAC core has detected a parity error in the receive datapath.
+ Corresponds to app_parity_errs[2] output from MAC core. */
+ uint64_t mac_txbe_perr : 1; /**< [ 5: 5](R/W1C/H) Set when the MAC core has detected a parity error in the back end of the transmit
+ datapath.
+ Corresponds to app_parity_errs[1] output from MAC core. */
+ uint64_t mac_txfe_perr : 1; /**< [ 4: 4](R/W1C/H) Set when the MAC core has detected a parity error in the front end of the transmit
+ datapath.
+ Corresponds to app_parity_errs[0] output from MAC core. */
+ uint64_t rasdp : 1; /**< [ 3: 3](R/W1C/H) Set when the MAC core has entered RASDP mode due to an uncorrectable error. */
+ uint64_t dbe : 1; /**< [ 2: 2](R/W1C/H) Set when an uncorrectable (double-bit) error was detected in a RAM inside PEM. */
+ uint64_t rx_perr : 1; /**< [ 1: 1](R/W1C/H) Set when a parity error was detected in the receive datapath. */
+ uint64_t tx_perr : 1; /**< [ 0: 0](R/W1C/H) Set when a parity error was detected in the transmit datapath (only applies to traffic
+ originating on EBO). */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_perr : 1; /**< [ 0: 0](R/W1C/H) Set when a parity error was detected in the transmit datapath (only applies to traffic
+ originating on EBO). */
+ uint64_t rx_perr : 1; /**< [ 1: 1](R/W1C/H) Set when a parity error was detected in the receive datapath. */
+ uint64_t dbe : 1; /**< [ 2: 2](R/W1C/H) Set when an uncorrectable (double-bit) error was detected in a RAM inside PEM. */
+ uint64_t rasdp : 1; /**< [ 3: 3](R/W1C/H) Set when the MAC core has entered RASDP mode due to an uncorrectable error. */
+ uint64_t mac_txfe_perr : 1; /**< [ 4: 4](R/W1C/H) Set when the MAC core has detected a parity error in the front end of the transmit
+ datapath.
+ Corresponds to app_parity_errs[0] output from MAC core. */
+ uint64_t mac_txbe_perr : 1; /**< [ 5: 5](R/W1C/H) Set when the MAC core has detected a parity error in the back end of the transmit
+ datapath.
+ Corresponds to app_parity_errs[1] output from MAC core. */
+ uint64_t mac_rx_perr : 1; /**< [ 6: 6](R/W1C/H) Set when the MAC core has detected a parity error in the receive datapath.
+ Corresponds to app_parity_errs[2] output from MAC core. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_perr_status_s cn; */
+};
+typedef union bdk_pemx_perr_status bdk_pemx_perr_status_t;
+
+static inline uint64_t BDK_PEMX_PERR_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_PERR_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000001c8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_PERR_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_PERR_STATUS(a) bdk_pemx_perr_status_t
+#define bustype_BDK_PEMX_PERR_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_PERR_STATUS(a) "PEMX_PERR_STATUS"
+#define device_bar_BDK_PEMX_PERR_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_PERR_STATUS(a) (a)
+#define arguments_BDK_PEMX_PERR_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_pf#_clr_flr_req
+ *
+ * PEM PF Clear FLR Request Register
+ * This register provides clear request for PCIe PF function level reset (FLR).
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_pfx_clr_flr_req
+{
+ uint64_t u;
+ struct bdk_pemx_pfx_clr_flr_req_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t clr : 1; /**< [ 0: 0](R/W1C/H) When written with a 1, will cause hardware to clear the FLR condition.
+ This bit always reads as a zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr : 1; /**< [ 0: 0](R/W1C/H) When written with a 1, will cause hardware to clear the FLR condition.
+ This bit always reads as a zero. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_pfx_clr_flr_req_s cn; */
+};
+typedef union bdk_pemx_pfx_clr_flr_req bdk_pemx_pfx_clr_flr_req_t;
+
+static inline uint64_t BDK_PEMX_PFX_CLR_FLR_REQ(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_PFX_CLR_FLR_REQ(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=15)))
+ return 0x8e0000000a00ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("PEMX_PFX_CLR_FLR_REQ", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_PFX_CLR_FLR_REQ(a,b) bdk_pemx_pfx_clr_flr_req_t
+#define bustype_BDK_PEMX_PFX_CLR_FLR_REQ(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_PFX_CLR_FLR_REQ(a,b) "PEMX_PFX_CLR_FLR_REQ"
+#define device_bar_BDK_PEMX_PFX_CLR_FLR_REQ(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_PFX_CLR_FLR_REQ(a,b) (a)
+#define arguments_BDK_PEMX_PFX_CLR_FLR_REQ(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_pf#_cs#_pfcfg#
+ *
+ * PEM PCIe Direct Config PF Registers
+ * This register is used to modify PF configuration space. It can only be accessed
+ * using 32-bit instructions (either [DATA_LO] or [DATA_HI] but not both
+ * simultaneously.)
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_pfx_csx_pfcfgx
+{
+ uint64_t u;
+ struct bdk_pemx_pfx_csx_pfcfgx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data_hi : 32; /**< [ 63: 32](R/W/H) Data bits to write to high config register, or bits read from selected config register. */
+ uint64_t data_lo : 32; /**< [ 31: 0](R/W/H) Data bits to write to low config register, or bits read from selected config register. */
+#else /* Word 0 - Little Endian */
+ uint64_t data_lo : 32; /**< [ 31: 0](R/W/H) Data bits to write to low config register, or bits read from selected config register. */
+ uint64_t data_hi : 32; /**< [ 63: 32](R/W/H) Data bits to write to high config register, or bits read from selected config register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_pfx_csx_pfcfgx_s cn; */
+};
+typedef union bdk_pemx_pfx_csx_pfcfgx bdk_pemx_pfx_csx_pfcfgx_t;
+
+static inline uint64_t BDK_PEMX_PFX_CSX_PFCFGX(unsigned long a, unsigned long b, unsigned long c, unsigned long d) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_PFX_CSX_PFCFGX(unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=15) && (c<=1) && (d<=511)))
+ return 0x8e0000008000ll + 0x1000000000ll * ((a) & 0x3) + 0x40000ll * ((b) & 0xf) + 0x10000ll * ((c) & 0x1) + 8ll * ((d) & 0x1ff);
+ __bdk_csr_fatal("PEMX_PFX_CSX_PFCFGX", 4, a, b, c, d);
+}
+
+#define typedef_BDK_PEMX_PFX_CSX_PFCFGX(a,b,c,d) bdk_pemx_pfx_csx_pfcfgx_t
+#define bustype_BDK_PEMX_PFX_CSX_PFCFGX(a,b,c,d) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_PFX_CSX_PFCFGX(a,b,c,d) "PEMX_PFX_CSX_PFCFGX"
+#define device_bar_BDK_PEMX_PFX_CSX_PFCFGX(a,b,c,d) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_PFX_CSX_PFCFGX(a,b,c,d) (a)
+#define arguments_BDK_PEMX_PFX_CSX_PFCFGX(a,b,c,d) (a),(b),(c),(d)
+
+/**
+ * Register (NCB) pem#_pf#_ctl_status
+ *
+ * PEM PF Control Status Register
+ * This is a general PF control and status register of the PEM.
+ * There is a register for each PF.
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_pfx_ctl_status
+{
+ uint64_t u;
+ struct bdk_pemx_pfx_ctl_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ob_p_cmd : 1; /**< [ 4: 4](WO) Wake up. Writing to a one to set this bit creates a pulse
+ in the application to wake up the PMC state machine
+ from a D1, D2 or D3 power state. This bit will always
+ read a zero.
+ Upon wake-up, the controller sends a PM_PME message. EP mode.
+
+ Internal:
+ Controls outband_pwrup_cmd input to the DW core. */
+ uint64_t pf_flr_en : 1; /**< [ 3: 3](R/W) When a PF-FLR occurs, an indication will be sent to the central reset controller.
+ The reset controller can decide whether to reset the chip core based on this indication.
+ These bits control which PFs can notify of the reset controller. Bit [16] corresponds to
+ PF0, Bit [17] corresponds to PF1, etc. If the corresponding bit is set, the PF-FLR will
+ be forwarded to the reset controller.
+
+ Internal:
+ Indication is on pem__rst_intf.pf_flr */
+ uint64_t pm_dst : 3; /**< [ 2: 0](RO/H) Current power management DSTATE. There are 3 bits of
+ D-state for each function.
+ 0x0 = D0.
+ 0x1 = D1.
+ 0x2 = D2.
+ 0x3 = D3.
+ 0x4 = Uninitialized.
+ 0x5 - 0x7 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t pm_dst : 3; /**< [ 2: 0](RO/H) Current power management DSTATE. There are 3 bits of
+ D-state for each function.
+ 0x0 = D0.
+ 0x1 = D1.
+ 0x2 = D2.
+ 0x3 = D3.
+ 0x4 = Uninitialized.
+ 0x5 - 0x7 = Reserved. */
+ uint64_t pf_flr_en : 1; /**< [ 3: 3](R/W) When a PF-FLR occurs, an indication will be sent to the central reset controller.
+ The reset controller can decide whether to reset the chip core based on this indication.
+ These bits control which PFs can notify of the reset controller. Bit [16] corresponds to
+ PF0, Bit [17] corresponds to PF1, etc. If the corresponding bit is set, the PF-FLR will
+ be forwarded to the reset controller.
+
+ Internal:
+ Indication is on pem__rst_intf.pf_flr */
+ uint64_t ob_p_cmd : 1; /**< [ 4: 4](WO) Wake up. Writing to a one to set this bit creates a pulse
+ in the application to wake up the PMC state machine
+ from a D1, D2 or D3 power state. This bit will always
+ read a zero.
+ Upon wake-up, the controller sends a PM_PME message. EP mode.
+
+ Internal:
+ Controls outband_pwrup_cmd input to the DW core. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_pfx_ctl_status_s cn; */
+};
+typedef union bdk_pemx_pfx_ctl_status bdk_pemx_pfx_ctl_status_t;
+
+static inline uint64_t BDK_PEMX_PFX_CTL_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_PFX_CTL_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=15)))
+ return 0x8e0000000800ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
+ __bdk_csr_fatal("PEMX_PFX_CTL_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_PFX_CTL_STATUS(a,b) bdk_pemx_pfx_ctl_status_t
+#define bustype_BDK_PEMX_PFX_CTL_STATUS(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_PFX_CTL_STATUS(a,b) "PEMX_PFX_CTL_STATUS"
+#define device_bar_BDK_PEMX_PFX_CTL_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_PFX_CTL_STATUS(a,b) (a)
+#define arguments_BDK_PEMX_PFX_CTL_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_pf#_vf#_vfcfg#
+ *
+ * PEM PCIe Direct Config VF Registers
+ * This register is used to modify VF configuration space. It can only be accessed
+ * using 32-bit instructions (either [DATA_LO] or [DATA_HI] but not both
+ * simultaneously.)
+ *
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_pfx_vfx_vfcfgx
+{
+ uint64_t u;
+ struct bdk_pemx_pfx_vfx_vfcfgx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data_hi : 32; /**< [ 63: 32](R/W/H) Data bits to write to high config register, or bits read from selected config register. */
+ uint64_t data_lo : 32; /**< [ 31: 0](R/W/H) Data bits to write to low config register, or bits read from selected config register. */
+#else /* Word 0 - Little Endian */
+ uint64_t data_lo : 32; /**< [ 31: 0](R/W/H) Data bits to write to low config register, or bits read from selected config register. */
+ uint64_t data_hi : 32; /**< [ 63: 32](R/W/H) Data bits to write to high config register, or bits read from selected config register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_pfx_vfx_vfcfgx_s cn; */
+};
+typedef union bdk_pemx_pfx_vfx_vfcfgx bdk_pemx_pfx_vfx_vfcfgx_t;
+
+static inline uint64_t BDK_PEMX_PFX_VFX_VFCFGX(unsigned long a, unsigned long b, unsigned long c, unsigned long d) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_PFX_VFX_VFCFGX(unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=15) && (c<=239) && (d<=511)))
+ return 0x8e0000028000ll + 0x1000000000ll * ((a) & 0x3) + 0x40000ll * ((b) & 0xf) + 0x400000ll * ((c) & 0xff) + 8ll * ((d) & 0x1ff);
+ __bdk_csr_fatal("PEMX_PFX_VFX_VFCFGX", 4, a, b, c, d);
+}
+
+#define typedef_BDK_PEMX_PFX_VFX_VFCFGX(a,b,c,d) bdk_pemx_pfx_vfx_vfcfgx_t
+#define bustype_BDK_PEMX_PFX_VFX_VFCFGX(a,b,c,d) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_PFX_VFX_VFCFGX(a,b,c,d) "PEMX_PFX_VFX_VFCFGX"
+#define device_bar_BDK_PEMX_PFX_VFX_VFCFGX(a,b,c,d) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_PFX_VFX_VFCFGX(a,b,c,d) (a)
+#define arguments_BDK_PEMX_PFX_VFX_VFCFGX(a,b,c,d) (a),(b),(c),(d)
+
+/**
+ * Register (NCB) pem#_ptm_ctl
+ *
+ * PEM Miscellaneous Control Register
+ * This register contains precision timer control bits.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC cold reset.
+ */
+union bdk_pemx_ptm_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_ptm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t ptm_lcl_cap : 1; /**< [ 9: 9](WO) When set, causes the precision time management local value to be captured in
+ PEM()_PTM_LCL_TIME. */
+ uint64_t ptm_mstr_sel : 1; /**< [ 8: 8](R/W) When configured as a root complex, precision time management protocol which
+ master clock input to use. If configured as an endpoint, this bit is ignored.
+ 0 = Master clock from PTP timestamp.
+ 1 = Master clock from GTI_CC_CNTCV. */
+ uint64_t ptm_mstr_adj : 8; /**< [ 7: 0](R/W) This value (in ns) is added to the selected ([PTM_MSTR_SEL]) master time input
+ to account for insertion (including clock domain crossing) delays, before
+ being presented to the MAC.
+
+ To calculate an accurate delay:
+
+ [PTM_MSTR_ADJ] = 2 sclk cycles (does not include TBD channel flops) + 3.5 core_clk cycles.
+
+ The default value assumes the MAC is operating at GEN1, and there are 2 channel
+ flops on the master time inputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t ptm_mstr_adj : 8; /**< [ 7: 0](R/W) This value (in ns) is added to the selected ([PTM_MSTR_SEL]) master time input
+ to account for insertion (including clock domain crossing) delays, before
+ being presented to the MAC.
+
+ To calculate an accurate delay:
+
+ [PTM_MSTR_ADJ] = 2 sclk cycles (does not include TBD channel flops) + 3.5 core_clk cycles.
+
+ The default value assumes the MAC is operating at GEN1, and there are 2 channel
+ flops on the master time inputs. */
+ uint64_t ptm_mstr_sel : 1; /**< [ 8: 8](R/W) When configured as a root complex, precision time management protocol which
+ master clock input to use. If configured as an endpoint, this bit is ignored.
+ 0 = Master clock from PTP timestamp.
+ 1 = Master clock from GTI_CC_CNTCV. */
+ uint64_t ptm_lcl_cap : 1; /**< [ 9: 9](WO) When set, causes the precision time management local value to be captured in
+ PEM()_PTM_LCL_TIME. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ptm_ctl_s cn; */
+};
+typedef union bdk_pemx_ptm_ctl bdk_pemx_ptm_ctl_t;
+
+static inline uint64_t BDK_PEMX_PTM_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_PTM_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000090ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_PTM_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_PTM_CTL(a) bdk_pemx_ptm_ctl_t
+#define bustype_BDK_PEMX_PTM_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_PTM_CTL(a) "PEMX_PTM_CTL"
+#define device_bar_BDK_PEMX_PTM_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_PTM_CTL(a) (a)
+#define arguments_BDK_PEMX_PTM_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ptm_lcl_time
+ *
+ * PEM PTM Time Register
+ * This register contains the PTM synchronized local time value.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_ptm_lcl_time
+{
+ uint64_t u;
+ struct bdk_pemx_ptm_lcl_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t val : 64; /**< [ 63: 0](RO/H) When an external hardware trigger occurs, or CSR bit PEM()_PTM_CTL[PTM_LCL_CAP] is written,
+ the local time as tracked by the precision time management protocol is captured to this
+ register. */
+#else /* Word 0 - Little Endian */
+ uint64_t val : 64; /**< [ 63: 0](RO/H) When an external hardware trigger occurs, or CSR bit PEM()_PTM_CTL[PTM_LCL_CAP] is written,
+ the local time as tracked by the precision time management protocol is captured to this
+ register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ptm_lcl_time_s cn; */
+};
+typedef union bdk_pemx_ptm_lcl_time bdk_pemx_ptm_lcl_time_t;
+
+static inline uint64_t BDK_PEMX_PTM_LCL_TIME(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_PTM_LCL_TIME(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000098ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_PTM_LCL_TIME", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_PTM_LCL_TIME(a) bdk_pemx_ptm_lcl_time_t
+#define bustype_BDK_PEMX_PTM_LCL_TIME(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_PTM_LCL_TIME(a) "PEMX_PTM_LCL_TIME"
+#define device_bar_BDK_PEMX_PTM_LCL_TIME(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_PTM_LCL_TIME(a) (a)
+#define arguments_BDK_PEMX_PTM_LCL_TIME(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_qlm
+ *
+ * PEM QLM Configuration Register
+ * This register configures the PEM QLM.
+ */
+union bdk_pemx_qlm
+{
+ uint64_t u;
+ struct bdk_pemx_qlm_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t pem_bdlm : 1; /**< [ 0: 0](R/W/H) This bit can only be set for PEM2/PEM3, for all other PEMs it has no
+ function.
+ PEM2: when set, will be configured to send/receive traffic to DLM4.
+ when clear, will be configured to send/receive traffic to QLM2/QLM3.
+ PEM3: when set, will be configured to send/receive traffic to DLM5/DLM6.
+ when clear, will be configured to send/receive traffic to QLM3.
+ Note that this bit must only be set when both the associated PHYs and PEM2/PEM3 are in
+ reset.
+ These conditions can be assured by setting the PEM(2/3)_ON[PEMON] bit after setting this
+ bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t pem_bdlm : 1; /**< [ 0: 0](R/W/H) This bit can only be set for PEM2/PEM3, for all other PEMs it has no
+ function.
+ PEM2: when set, will be configured to send/receive traffic to DLM4.
+ when clear, will be configured to send/receive traffic to QLM2/QLM3.
+ PEM3: when set, will be configured to send/receive traffic to DLM5/DLM6.
+ when clear, will be configured to send/receive traffic to QLM3.
+ Note that this bit must only be set when both the associated PHYs and PEM2/PEM3 are in
+ reset.
+ These conditions can be assured by setting the PEM(2/3)_ON[PEMON] bit after setting this
+ bit. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_qlm_s cn; */
+};
+typedef union bdk_pemx_qlm bdk_pemx_qlm_t;
+
+static inline uint64_t BDK_PEMX_QLM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_QLM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000418ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_QLM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_QLM(a) bdk_pemx_qlm_t
+#define bustype_BDK_PEMX_QLM(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_QLM(a) "PEMX_QLM"
+#define device_bar_BDK_PEMX_QLM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_QLM(a) (a)
+#define arguments_BDK_PEMX_QLM(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_ras_tba_ctl
+ *
+ * PEM RAS Time Based Analysis Control Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_ras_tba_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_ras_tba_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t tba_ctrl : 2; /**< [ 1: 0](WO) Controls the start/end of time based analysis (TBA) in the core. Note that TBA can also
+ be controlled
+ by setting the contents of PCIEEP()_CFG114/RC()_CFG114, and that TBA_CTRL will also
+ affect the contents of PCIEEP()_CFG114/RC()_CFG114[TIMER_START].
+ 0x0 = No action.
+ 0x1 = Start time based analysis.
+ 0x2 = End time based analysis.
+ Only used if PCIEEP()_CFG114/RC()_CFG114[TBASE_DUR_SEL] is set to manual control,
+ otherwise it is ignored.
+ 0x3 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t tba_ctrl : 2; /**< [ 1: 0](WO) Controls the start/end of time based analysis (TBA) in the core. Note that TBA can also
+ be controlled
+ by setting the contents of PCIEEP()_CFG114/RC()_CFG114, and that TBA_CTRL will also
+ affect the contents of PCIEEP()_CFG114/RC()_CFG114[TIMER_START].
+ 0x0 = No action.
+ 0x1 = Start time based analysis.
+ 0x2 = End time based analysis.
+ Only used if PCIEEP()_CFG114/RC()_CFG114[TBASE_DUR_SEL] is set to manual control,
+ otherwise it is ignored.
+ 0x3 = Reserved. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_ras_tba_ctl_s cn8; */
+ struct bdk_pemx_ras_tba_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t tba_ctrl : 2; /**< [ 1: 0](WO) Controls the start/end of time based analysis (TBA) in the core. Note that TBA can also
+ be controlled
+ by setting the contents of PCIERC_RAS_TBA_CTL, and that will also
+ affect the contents of PCIERC_RAS_TBA_CTL[TIMER_START].
+ 0x0 = No action.
+ 0x1 = Start time based analysis.
+ 0x2 = End time based analysis.
+ Only used if PCIERC_RAS_TBA_CTL[TBASE_DUR_SEL] is set to manual control,
+ otherwise it is ignored.
+ 0x3 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t tba_ctrl : 2; /**< [ 1: 0](WO) Controls the start/end of time based analysis (TBA) in the core. Note that TBA can also
+ be controlled
+ by setting the contents of PCIERC_RAS_TBA_CTL, and that will also
+ affect the contents of PCIERC_RAS_TBA_CTL[TIMER_START].
+ 0x0 = No action.
+ 0x1 = Start time based analysis.
+ 0x2 = End time based analysis.
+ Only used if PCIERC_RAS_TBA_CTL[TBASE_DUR_SEL] is set to manual control,
+ otherwise it is ignored.
+ 0x3 = Reserved. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_ras_tba_ctl bdk_pemx_ras_tba_ctl_t;
+
+static inline uint64_t BDK_PEMX_RAS_TBA_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_RAS_TBA_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000240ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000060ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_RAS_TBA_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_RAS_TBA_CTL(a) bdk_pemx_ras_tba_ctl_t
+#define bustype_BDK_PEMX_RAS_TBA_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_RAS_TBA_CTL(a) "PEMX_RAS_TBA_CTL"
+#define device_bar_BDK_PEMX_RAS_TBA_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_RAS_TBA_CTL(a) (a)
+#define arguments_BDK_PEMX_RAS_TBA_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_reads_pc
+ *
+ * PEM Read Count Register
+ * This register contains read count for debugging purposes.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_reads_pc
+{
+ uint64_t u;
+ struct bdk_pemx_reads_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reads : 64; /**< [ 63: 0](RO/H) Total number of SLI reads from remote memory aggregated across all
+ non-masked SWI tags. Software can calculate the average read latency
+ to first data per SLI read request by dividing PEM()_LATENCY_PC[LATENCY]
+ by PEM()_READS_PC[READS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t reads : 64; /**< [ 63: 0](RO/H) Total number of SLI reads from remote memory aggregated across all
+ non-masked SWI tags. Software can calculate the average read latency
+ to first data per SLI read request by dividing PEM()_LATENCY_PC[LATENCY]
+ by PEM()_READS_PC[READS]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_reads_pc_s cn8; */
+ struct bdk_pemx_reads_pc_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reads : 64; /**< [ 63: 0](R/W/H) Total number of NCBO or EBO reads from remote memory since latency tracking logic was
+ enabled. PEM()_LATENCY_PC_CTL[EBO_SEL] controls which outbound bus has its reads
+ latency tracked. This register can only be written by software when
+ PEM()_LATENCY_PC_CTL[ACTIVE] is clear. Software can calculate the average read
+ latency through PEM and external PCIe interface with the following calculation:
+ * Average Latency = PEM()_LATENCY_PC[LATENCY] / PEM()_READS_PC[READS] * 10 ns
+ This calculation can be done at any time while PEM()_LATENCY_PC_CTL[ACTIVE] is set,
+ but will only be fully accurate by following the control flow outlined in the
+ PEM()_LATENCY_PC_CTL[ACTIVE] description. */
+#else /* Word 0 - Little Endian */
+ uint64_t reads : 64; /**< [ 63: 0](R/W/H) Total number of NCBO or EBO reads from remote memory since latency tracking logic was
+ enabled. PEM()_LATENCY_PC_CTL[EBO_SEL] controls which outbound bus has its reads
+ latency tracked. This register can only be written by software when
+ PEM()_LATENCY_PC_CTL[ACTIVE] is clear. Software can calculate the average read
+ latency through PEM and external PCIe interface with the following calculation:
+ * Average Latency = PEM()_LATENCY_PC[LATENCY] / PEM()_READS_PC[READS] * 10 ns
+ This calculation can be done at any time while PEM()_LATENCY_PC_CTL[ACTIVE] is set,
+ but will only be fully accurate by following the control flow outlined in the
+ PEM()_LATENCY_PC_CTL[ACTIVE] description. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_reads_pc bdk_pemx_reads_pc_t;
+
+static inline uint64_t BDK_PEMX_READS_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_READS_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000498ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000110ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_READS_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_READS_PC(a) bdk_pemx_reads_pc_t
+#define bustype_BDK_PEMX_READS_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_READS_PC(a) "PEMX_READS_PC"
+#define device_bar_BDK_PEMX_READS_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_READS_PC(a) (a)
+#define arguments_BDK_PEMX_READS_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_reg_ctl
+ *
+ * PEM CSR Control Register
+ * This register contains control for register accesses.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_reg_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_reg_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t gia_timeout : 6; /**< [ 5: 0](R/W) GIA timeout (2^[GIA_TIMEOUT] clock cycles). Timeout for MSI-X commits. When zero, wait
+ for commits is disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t gia_timeout : 6; /**< [ 5: 0](R/W) GIA timeout (2^[GIA_TIMEOUT] clock cycles). Timeout for MSI-X commits. When zero, wait
+ for commits is disabled. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_reg_ctl_s cn; */
+};
+typedef union bdk_pemx_reg_ctl bdk_pemx_reg_ctl_t;
+
+static inline uint64_t BDK_PEMX_REG_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_REG_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000058ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_REG_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_REG_CTL(a) bdk_pemx_reg_ctl_t
+#define bustype_BDK_PEMX_REG_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_REG_CTL(a) "PEMX_REG_CTL"
+#define device_bar_BDK_PEMX_REG_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_REG_CTL(a) (a)
+#define arguments_BDK_PEMX_REG_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_reg_huge#_acc
+ *
+ * PEM Huge Region Access Registers
+ * These registers contains address index and control bits for access to memory from cores.
+ * Indexed using NCBO address\<45:38\>.
+ *
+ * For discovery of the size of this register and fields, see PEM()_CONST_ACC.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_reg_hugex_acc
+{
+ uint64_t u;
+ struct bdk_pemx_reg_hugex_acc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t vf : 8; /**< [ 61: 54](R/W) Virtual function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t vf_active : 1; /**< [ 53: 53](R/W) Access is to virtual function if set. Access is to physical function if
+ clear. */
+ uint64_t reserved_50_52 : 3;
+ uint64_t pf : 4; /**< [ 49: 46](R/W) Physical function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t ctype : 2; /**< [ 45: 44](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration. Only operations that access bytes within a single aligned dword
+ are supported. Note normally the ECAM would be used in place of this CTYPE.
+ 0x2 = PCI I/O. Only operations that access bytes within a single aligned dword are supported.
+ 0x3 = Reserved. */
+ uint64_t zero : 1; /**< [ 43: 43](R/W) Causes load operations that are eight bytes or less and stay within a single aligned quadword
+ to become zero-length read operations which will return zeros to the EXEC for all read
+ data.
+ Load operations that do not meet the size/alignment requirements above and have [ZERO] set
+ will have unpredictable behavior.
+
+ Internal:
+ When hardware encounters an improperly formed load operation with [ZERO] set, it
+ will drop the load internally and form up a properly sized completion with fault
+ over NCBI to attempt to indicate an error condition. */
+ uint64_t wnmerge : 1; /**< [ 42: 42](R/W) When set, no write merging is allowed in this window. */
+ uint64_t rnmerge : 1; /**< [ 41: 41](R/W) When set, no read merging is allowed in this window. */
+ uint64_t wtype : 3; /**< [ 40: 38](R/W) Write type. ADDRTYPE\<2:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute.
+ ADDRTYPE\<2\> is the id-based ordering attribute. */
+ uint64_t rtype : 3; /**< [ 37: 35](R/W) Read type. ADDRTYPE\<2:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute.
+ ADDRTYPE\<2\> is the id-based ordering attribute. */
+ uint64_t reserved_26_34 : 9;
+ uint64_t ba : 26; /**< [ 25: 0](R/W) Bus address. Address bits\<63:38\> for read/write operations that use this region. */
+#else /* Word 0 - Little Endian */
+ uint64_t ba : 26; /**< [ 25: 0](R/W) Bus address. Address bits\<63:38\> for read/write operations that use this region. */
+ uint64_t reserved_26_34 : 9;
+ uint64_t rtype : 3; /**< [ 37: 35](R/W) Read type. ADDRTYPE\<2:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute.
+ ADDRTYPE\<2\> is the id-based ordering attribute. */
+ uint64_t wtype : 3; /**< [ 40: 38](R/W) Write type. ADDRTYPE\<2:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute.
+ ADDRTYPE\<2\> is the id-based ordering attribute. */
+ uint64_t rnmerge : 1; /**< [ 41: 41](R/W) When set, no read merging is allowed in this window. */
+ uint64_t wnmerge : 1; /**< [ 42: 42](R/W) When set, no write merging is allowed in this window. */
+ uint64_t zero : 1; /**< [ 43: 43](R/W) Causes load operations that are eight bytes or less and stay within a single aligned quadword
+ to become zero-length read operations which will return zeros to the EXEC for all read
+ data.
+ Load operations that do not meet the size/alignment requirements above and have [ZERO] set
+ will have unpredictable behavior.
+
+ Internal:
+ When hardware encounters an improperly formed load operation with [ZERO] set, it
+ will drop the load internally and form up a properly sized completion with fault
+ over NCBI to attempt to indicate an error condition. */
+ uint64_t ctype : 2; /**< [ 45: 44](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration. Only operations that access bytes within a single aligned dword
+ are supported. Note normally the ECAM would be used in place of this CTYPE.
+ 0x2 = PCI I/O. Only operations that access bytes within a single aligned dword are supported.
+ 0x3 = Reserved. */
+ uint64_t pf : 4; /**< [ 49: 46](R/W) Physical function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t reserved_50_52 : 3;
+ uint64_t vf_active : 1; /**< [ 53: 53](R/W) Access is to virtual function if set. Access is to physical function if
+ clear. */
+ uint64_t vf : 8; /**< [ 61: 54](R/W) Virtual function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_reg_hugex_acc_s cn; */
+};
+typedef union bdk_pemx_reg_hugex_acc bdk_pemx_reg_hugex_acc_t;
+
+static inline uint64_t BDK_PEMX_REG_HUGEX_ACC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_REG_HUGEX_ACC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=255)))
+ return 0x8e0000006000ll + 0x1000000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xff);
+ __bdk_csr_fatal("PEMX_REG_HUGEX_ACC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_REG_HUGEX_ACC(a,b) bdk_pemx_reg_hugex_acc_t
+#define bustype_BDK_PEMX_REG_HUGEX_ACC(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_REG_HUGEX_ACC(a,b) "PEMX_REG_HUGEX_ACC"
+#define device_bar_BDK_PEMX_REG_HUGEX_ACC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_REG_HUGEX_ACC(a,b) (a)
+#define arguments_BDK_PEMX_REG_HUGEX_ACC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_reg_huge#_acc2
+ *
+ * PEM Huge Region Access 2 Registers
+ * These registers contains address index and control bits for access to memory from cores.
+ * Indexed using NCBO address\<45:38\>.
+ *
+ * For discovery of the size of this register and fields, see PEM()_CONST_ACC.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_reg_hugex_acc2
+{
+ uint64_t u;
+ struct bdk_pemx_reg_hugex_acc2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_reg_hugex_acc2_s cn; */
+};
+typedef union bdk_pemx_reg_hugex_acc2 bdk_pemx_reg_hugex_acc2_t;
+
+static inline uint64_t BDK_PEMX_REG_HUGEX_ACC2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_REG_HUGEX_ACC2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=255)))
+ return 0x8e0000006008ll + 0x1000000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xff);
+ __bdk_csr_fatal("PEMX_REG_HUGEX_ACC2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_REG_HUGEX_ACC2(a,b) bdk_pemx_reg_hugex_acc2_t
+#define bustype_BDK_PEMX_REG_HUGEX_ACC2(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_REG_HUGEX_ACC2(a,b) "PEMX_REG_HUGEX_ACC2"
+#define device_bar_BDK_PEMX_REG_HUGEX_ACC2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_REG_HUGEX_ACC2(a,b) (a)
+#define arguments_BDK_PEMX_REG_HUGEX_ACC2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_reg_norm#_acc
+ *
+ * PEM Normal Region Access Registers
+ * These registers contains address index and control bits for access to memory from cores.
+ * Indexed using NCBO address\<38:31\>.
+ *
+ * See PEM()_CONST_ACC.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_reg_normx_acc
+{
+ uint64_t u;
+ struct bdk_pemx_reg_normx_acc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t vf : 8; /**< [ 61: 54](R/W) Virtual function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t vf_active : 1; /**< [ 53: 53](R/W) Access is to virtual function if set. Access is to physical function if
+ clear. */
+ uint64_t reserved_50_52 : 3;
+ uint64_t pf : 4; /**< [ 49: 46](R/W) Physical function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t ctype : 2; /**< [ 45: 44](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration. Only operations that access bytes within a single aligned dword
+ are supported. Note normally the ECAM would be used in place of this CTYPE.
+ 0x2 = PCI I/O. Only operations that access bytes within a single aligned dword are supported.
+ 0x3 = Reserved. */
+ uint64_t zero : 1; /**< [ 43: 43](R/W) Causes load operations that are eight bytes or less and stay within a single aligned quadword
+ to become zero-length read operations which will return zeros to the EXEC for all read
+ data.
+ Load operations that do not meet the size/alignment requirements above and have [ZERO] set
+ will have unpredictable behavior.
+
+ Internal:
+ When hardware encounters an improperly formed load operation with [ZERO] set, it
+ will drop the load internally and form up a properly sized completion with fault
+ over NCBI to attempt to indicate an error condition. */
+ uint64_t wnmerge : 1; /**< [ 42: 42](R/W) When set, no write merging (aka write combining) is allowed in this
+ window. Write combining may result in higher performance. Write combining is
+ legal and typically used in endpoints, or embedded applications. Write combining
+ is not technically permitted in standard operating system root complexes, but
+ typically functions correctly. */
+ uint64_t rnmerge : 1; /**< [ 41: 41](R/W) When set, no read merging (aka read combining) is allowed in this window. Read
+ combining may result in higher performance. Read combining is typically used in
+ endpoints, or embedded applications. Read combining is not typically used in
+ standard operating system root complexes. */
+ uint64_t wtype : 3; /**< [ 40: 38](R/W) Write type. ADDRTYPE\<2:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute.
+ ADDRTYPE\<2\> is the id-based ordering attribute. */
+ uint64_t rtype : 3; /**< [ 37: 35](R/W) Read type. ADDRTYPE\<2:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute.
+ ADDRTYPE\<2\> is the id-based ordering attribute. */
+ uint64_t reserved_33_34 : 2;
+ uint64_t ba : 33; /**< [ 32: 0](R/W) Bus address. Address bits\<63:31\> for read/write operations that use this region. */
+#else /* Word 0 - Little Endian */
+ uint64_t ba : 33; /**< [ 32: 0](R/W) Bus address. Address bits\<63:31\> for read/write operations that use this region. */
+ uint64_t reserved_33_34 : 2;
+ uint64_t rtype : 3; /**< [ 37: 35](R/W) Read type. ADDRTYPE\<2:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute.
+ ADDRTYPE\<2\> is the id-based ordering attribute. */
+ uint64_t wtype : 3; /**< [ 40: 38](R/W) Write type. ADDRTYPE\<2:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute.
+ ADDRTYPE\<2\> is the id-based ordering attribute. */
+ uint64_t rnmerge : 1; /**< [ 41: 41](R/W) When set, no read merging (aka read combining) is allowed in this window. Read
+ combining may result in higher performance. Read combining is typically used in
+ endpoints, or embedded applications. Read combining is not typically used in
+ standard operating system root complexes. */
+ uint64_t wnmerge : 1; /**< [ 42: 42](R/W) When set, no write merging (aka write combining) is allowed in this
+ window. Write combining may result in higher performance. Write combining is
+ legal and typically used in endpoints, or embedded applications. Write combining
+ is not technically permitted in standard operating system root complexes, but
+ typically functions correctly. */
+ uint64_t zero : 1; /**< [ 43: 43](R/W) Causes load operations that are eight bytes or less and stay within a single aligned quadword
+ to become zero-length read operations which will return zeros to the EXEC for all read
+ data.
+ Load operations that do not meet the size/alignment requirements above and have [ZERO] set
+ will have unpredictable behavior.
+
+ Internal:
+ When hardware encounters an improperly formed load operation with [ZERO] set, it
+ will drop the load internally and form up a properly sized completion with fault
+ over NCBI to attempt to indicate an error condition. */
+ uint64_t ctype : 2; /**< [ 45: 44](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration. Only operations that access bytes within a single aligned dword
+ are supported. Note normally the ECAM would be used in place of this CTYPE.
+ 0x2 = PCI I/O. Only operations that access bytes within a single aligned dword are supported.
+ 0x3 = Reserved. */
+ uint64_t pf : 4; /**< [ 49: 46](R/W) Physical function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t reserved_50_52 : 3;
+ uint64_t vf_active : 1; /**< [ 53: 53](R/W) Access is to virtual function if set. Access is to physical function if
+ clear. */
+ uint64_t vf : 8; /**< [ 61: 54](R/W) Virtual function number associated with this access. In RC mode, this
+ field must be zero. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_reg_normx_acc_s cn; */
+};
+typedef union bdk_pemx_reg_normx_acc bdk_pemx_reg_normx_acc_t;
+
+static inline uint64_t BDK_PEMX_REG_NORMX_ACC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_REG_NORMX_ACC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=255)))
+ return 0x8e0000004000ll + 0x1000000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xff);
+ __bdk_csr_fatal("PEMX_REG_NORMX_ACC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_REG_NORMX_ACC(a,b) bdk_pemx_reg_normx_acc_t
+#define bustype_BDK_PEMX_REG_NORMX_ACC(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_REG_NORMX_ACC(a,b) "PEMX_REG_NORMX_ACC"
+#define device_bar_BDK_PEMX_REG_NORMX_ACC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_REG_NORMX_ACC(a,b) (a)
+#define arguments_BDK_PEMX_REG_NORMX_ACC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_reg_norm#_acc2
+ *
+ * PEM Normal Region Access 2 Registers
+ * See PEM()_CONST_ACC.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_reg_normx_acc2
+{
+ uint64_t u;
+ struct bdk_pemx_reg_normx_acc2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_reg_normx_acc2_s cn; */
+};
+typedef union bdk_pemx_reg_normx_acc2 bdk_pemx_reg_normx_acc2_t;
+
+static inline uint64_t BDK_PEMX_REG_NORMX_ACC2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_REG_NORMX_ACC2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=255)))
+ return 0x8e0000004008ll + 0x1000000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0xff);
+ __bdk_csr_fatal("PEMX_REG_NORMX_ACC2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_PEMX_REG_NORMX_ACC2(a,b) bdk_pemx_reg_normx_acc2_t
+#define bustype_BDK_PEMX_REG_NORMX_ACC2(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_REG_NORMX_ACC2(a,b) "PEMX_REG_NORMX_ACC2"
+#define device_bar_BDK_PEMX_REG_NORMX_ACC2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_REG_NORMX_ACC2(a,b) (a)
+#define arguments_BDK_PEMX_REG_NORMX_ACC2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) pem#_rmerge_merged_pc
+ *
+ * PEM Merge Reads Merged Performance Counter Register
+ * This register reports how many reads merged within the outbound read merge unit.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_rmerge_merged_pc
+{
+ uint64_t u;
+ struct bdk_pemx_rmerge_merged_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rmerge_merged : 64; /**< [ 63: 0](R/W/H) Each NCBO read operation mapped to MEM type by the ACC table that merges with a previous
+ read will increment this count. */
+#else /* Word 0 - Little Endian */
+ uint64_t rmerge_merged : 64; /**< [ 63: 0](R/W/H) Each NCBO read operation mapped to MEM type by the ACC table that merges with a previous
+ read will increment this count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_rmerge_merged_pc_s cn; */
+};
+typedef union bdk_pemx_rmerge_merged_pc bdk_pemx_rmerge_merged_pc_t;
+
+static inline uint64_t BDK_PEMX_RMERGE_MERGED_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_RMERGE_MERGED_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000198ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_RMERGE_MERGED_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_RMERGE_MERGED_PC(a) bdk_pemx_rmerge_merged_pc_t
+#define bustype_BDK_PEMX_RMERGE_MERGED_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_RMERGE_MERGED_PC(a) "PEMX_RMERGE_MERGED_PC"
+#define device_bar_BDK_PEMX_RMERGE_MERGED_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_RMERGE_MERGED_PC(a) (a)
+#define arguments_BDK_PEMX_RMERGE_MERGED_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_rmerge_received_pc
+ *
+ * PEM Merge Reads Received Performance Counter Register
+ * This register reports the number of reads that enter the outbound read merge unit.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_rmerge_received_pc
+{
+ uint64_t u;
+ struct bdk_pemx_rmerge_received_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rmerge_reads : 64; /**< [ 63: 0](R/W/H) Each NCBO read operation mapped to MEM type by the ACC table will increment this count. */
+#else /* Word 0 - Little Endian */
+ uint64_t rmerge_reads : 64; /**< [ 63: 0](R/W/H) Each NCBO read operation mapped to MEM type by the ACC table will increment this count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_rmerge_received_pc_s cn; */
+};
+typedef union bdk_pemx_rmerge_received_pc bdk_pemx_rmerge_received_pc_t;
+
+static inline uint64_t BDK_PEMX_RMERGE_RECEIVED_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_RMERGE_RECEIVED_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000190ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_RMERGE_RECEIVED_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_RMERGE_RECEIVED_PC(a) bdk_pemx_rmerge_received_pc_t
+#define bustype_BDK_PEMX_RMERGE_RECEIVED_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_RMERGE_RECEIVED_PC(a) "PEMX_RMERGE_RECEIVED_PC"
+#define device_bar_BDK_PEMX_RMERGE_RECEIVED_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_RMERGE_RECEIVED_PC(a) (a)
+#define arguments_BDK_PEMX_RMERGE_RECEIVED_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_spi_ctl
+ *
+ * PEM SPI Control Register
+ */
+union bdk_pemx_spi_ctl
+{
+ uint64_t u;
+ struct bdk_pemx_spi_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t start_busy : 1; /**< [ 33: 33](R/W/H) Start/busy status. Starts SPI xctn when written; reads 1 when EEPROM busy, 0 when complete. */
+ uint64_t tvalid : 1; /**< [ 32: 32](R/W/H) Reads 1 if at least one valid entry was read from EEPROM and written to a CSR. Write to
+ clear status. */
+ uint64_t cmd : 8; /**< [ 31: 24](R/W/H) SPI command to be passed to the flash memory.
+ This field will clear when command is complete.
+
+ Examples of some commonly used commands:
+ 0x1 = WRSR: Write status register. A single-byte write of
+ corresponding PEM()_SPI_DATA[DATA\<7:0\>] to the register.
+ 0x2 = PAGE PROGRAM/WRITE: An eight-byte page-mode write of the 64-bits of corresponding
+ PEM()_SPI_DATA to the memory array. Can only be issued to Sector 0.
+ Note, most devices require BULK or SECTOR ERASE to set bits first.
+ 0x3 = READ: An eight-byte page-mode read access from the memory array
+ with result in the 64-bits of corresponding PEM()_SPI_DATA.
+ Can only be issued to sector 0.
+ 0x4 = WRDI: Clear the write-enable latch (i.e. write protect the device).
+ 0x5 = RDSR: Read status register. A single-byte read access from
+ the register with result in corresponding PEM()_SPI_DATA[DATA]\<7:0\>.
+ 0x6 = WREN: set the write-enable latch (i.e. allow writes to occur).
+ 0xB = READ DATA HIGHER SPEED: Not supported.
+ 0xAB = WAKE: Release from deep power-down.
+ 0xB9 = SLEEP: Deep power-down.
+ 0xC7 = BULK ERASE: Sets all bits to 1.
+ 0xD8 = SECTOR ERASE: Sets to 1 all bits to the chosen sector (pointed to by [ADR]\<18:15\>).
+ 0x9F = READ ID: a two-byte read access to get device identification
+ with result in the 64-bits of corresponding PEM()_SPI_DATA. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t adr : 19; /**< [ 18: 0](R/W/H) EEPROM CMD byte address.
+ For READ and PAGE PROGRAM commands, forced to a 8-byte aligned entry in sector 0, so
+ \<18:16\> and \<2:0\> are forced to zero. For all other commands, the entire ADR is passed.
+
+ This field will clear when command is complete. */
+#else /* Word 0 - Little Endian */
+ uint64_t adr : 19; /**< [ 18: 0](R/W/H) EEPROM CMD byte address.
+ For READ and PAGE PROGRAM commands, forced to a 8-byte aligned entry in sector 0, so
+ \<18:16\> and \<2:0\> are forced to zero. For all other commands, the entire ADR is passed.
+
+ This field will clear when command is complete. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t cmd : 8; /**< [ 31: 24](R/W/H) SPI command to be passed to the flash memory.
+ This field will clear when command is complete.
+
+ Examples of some commonly used commands:
+ 0x1 = WRSR: Write status register. A single-byte write of
+ corresponding PEM()_SPI_DATA[DATA\<7:0\>] to the register.
+ 0x2 = PAGE PROGRAM/WRITE: An eight-byte page-mode write of the 64-bits of corresponding
+ PEM()_SPI_DATA to the memory array. Can only be issued to Sector 0.
+ Note, most devices require BULK or SECTOR ERASE to set bits first.
+ 0x3 = READ: An eight-byte page-mode read access from the memory array
+ with result in the 64-bits of corresponding PEM()_SPI_DATA.
+ Can only be issued to sector 0.
+ 0x4 = WRDI: Clear the write-enable latch (i.e. write protect the device).
+ 0x5 = RDSR: Read status register. A single-byte read access from
+ the register with result in corresponding PEM()_SPI_DATA[DATA]\<7:0\>.
+ 0x6 = WREN: set the write-enable latch (i.e. allow writes to occur).
+ 0xB = READ DATA HIGHER SPEED: Not supported.
+ 0xAB = WAKE: Release from deep power-down.
+ 0xB9 = SLEEP: Deep power-down.
+ 0xC7 = BULK ERASE: Sets all bits to 1.
+ 0xD8 = SECTOR ERASE: Sets to 1 all bits to the chosen sector (pointed to by [ADR]\<18:15\>).
+ 0x9F = READ ID: a two-byte read access to get device identification
+ with result in the 64-bits of corresponding PEM()_SPI_DATA. */
+ uint64_t tvalid : 1; /**< [ 32: 32](R/W/H) Reads 1 if at least one valid entry was read from EEPROM and written to a CSR. Write to
+ clear status. */
+ uint64_t start_busy : 1; /**< [ 33: 33](R/W/H) Start/busy status. Starts SPI xctn when written; reads 1 when EEPROM busy, 0 when complete. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_spi_ctl_s cn; */
+};
+typedef union bdk_pemx_spi_ctl bdk_pemx_spi_ctl_t;
+
+static inline uint64_t BDK_PEMX_SPI_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_SPI_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000180ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_SPI_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_SPI_CTL(a) bdk_pemx_spi_ctl_t
+#define bustype_BDK_PEMX_SPI_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_SPI_CTL(a) "PEMX_SPI_CTL"
+#define device_bar_BDK_PEMX_SPI_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_SPI_CTL(a) (a)
+#define arguments_BDK_PEMX_SPI_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_spi_data
+ *
+ * PEM SPI Data Register
+ * This register contains the most recently read or written SPI data and is unpredictable upon
+ * power-up.
+ */
+union bdk_pemx_spi_data
+{
+ uint64_t u;
+ struct bdk_pemx_spi_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t preamble : 16; /**< [ 63: 48](R/W/H) EEPROM PREAMBLE read or write data. */
+ uint64_t spi_rsvd : 3; /**< [ 47: 45](R/W/H) Reserved. */
+ uint64_t cs2 : 1; /**< [ 44: 44](R/W/H) EEPROM CS2 read or write data bit. */
+ uint64_t adr : 12; /**< [ 43: 32](R/W/H) EEPROM CFG ADR read or write data. */
+ uint64_t data : 32; /**< [ 31: 0](R/W/H) EEPROM DATA read or write data. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W/H) EEPROM DATA read or write data. */
+ uint64_t adr : 12; /**< [ 43: 32](R/W/H) EEPROM CFG ADR read or write data. */
+ uint64_t cs2 : 1; /**< [ 44: 44](R/W/H) EEPROM CS2 read or write data bit. */
+ uint64_t spi_rsvd : 3; /**< [ 47: 45](R/W/H) Reserved. */
+ uint64_t preamble : 16; /**< [ 63: 48](R/W/H) EEPROM PREAMBLE read or write data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_spi_data_s cn; */
+};
+typedef union bdk_pemx_spi_data bdk_pemx_spi_data_t;
+
+static inline uint64_t BDK_PEMX_SPI_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_SPI_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000188ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_SPI_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_SPI_DATA(a) bdk_pemx_spi_data_t
+#define bustype_BDK_PEMX_SPI_DATA(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_SPI_DATA(a) "PEMX_SPI_DATA"
+#define device_bar_BDK_PEMX_SPI_DATA(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_SPI_DATA(a) (a)
+#define arguments_BDK_PEMX_SPI_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_strap
+ *
+ * PEM Pin Strapping Register
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on cold reset.
+ */
+union bdk_pemx_strap
+{
+ uint64_t u;
+ struct bdk_pemx_strap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t pilaneswap : 1; /**< [ 3: 3](RO/H) The value of PCIE_REV_LANES, which is captured on chip cold reset. It is not
+ affected by any other reset. When set, lane swapping is performed to/from the
+ SerDes. When clear, no lane swapping is performed. */
+ uint64_t pilanes8 : 1; /**< [ 2: 2](RO/H) The value of bit \<2\> of PCIE*_MODE\<2:0\>, which is captured on chip cold reset. It is not
+ affected by any other reset. When set, the PEM is configured for a maximum of
+ 8-lanes, When clear, the PEM is configured for a maximum of 4-lanes. */
+ uint64_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_1 : 2;
+ uint64_t pilanes8 : 1; /**< [ 2: 2](RO/H) The value of bit \<2\> of PCIE*_MODE\<2:0\>, which is captured on chip cold reset. It is not
+ affected by any other reset. When set, the PEM is configured for a maximum of
+ 8-lanes, When clear, the PEM is configured for a maximum of 4-lanes. */
+ uint64_t pilaneswap : 1; /**< [ 3: 3](RO/H) The value of PCIE_REV_LANES, which is captured on chip cold reset. It is not
+ affected by any other reset. When set, lane swapping is performed to/from the
+ SerDes. When clear, no lane swapping is performed. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_strap_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t pilaneswap : 1; /**< [ 3: 3](RO/H) The value of PCIE_REV_LANES, which is captured on chip cold reset. It is not
+ affected by any other reset. When set, lane swapping is performed to/from the
+ SerDes. When clear, no lane swapping is performed. */
+ uint64_t pilanes8 : 1; /**< [ 2: 2](RO/H) The value of bit \<2\> of PCIE*_MODE\<2:0\>, which is captured on chip cold reset. It is not
+ affected by any other reset. When set, the PEM is configured for a maximum of
+ 8-lanes, When clear, the PEM is configured for a maximum of 4-lanes. */
+ uint64_t pimode : 2; /**< [ 1: 0](RO/H) The value of PCIE_MODE\<1:0\>, which are captured on chip cold reset. They are
+ not affected by any other reset.
+ 0x0 = EP mode, Gen1 speed.
+ 0x1 = EP mode, Gen2 speed.
+ 0x2 = EP mode, Gen3 speed.
+ 0x3 = RC mode, defaults to Gen3 speed. */
+#else /* Word 0 - Little Endian */
+ uint64_t pimode : 2; /**< [ 1: 0](RO/H) The value of PCIE_MODE\<1:0\>, which are captured on chip cold reset. They are
+ not affected by any other reset.
+ 0x0 = EP mode, Gen1 speed.
+ 0x1 = EP mode, Gen2 speed.
+ 0x2 = EP mode, Gen3 speed.
+ 0x3 = RC mode, defaults to Gen3 speed. */
+ uint64_t pilanes8 : 1; /**< [ 2: 2](RO/H) The value of bit \<2\> of PCIE*_MODE\<2:0\>, which is captured on chip cold reset. It is not
+ affected by any other reset. When set, the PEM is configured for a maximum of
+ 8-lanes, When clear, the PEM is configured for a maximum of 4-lanes. */
+ uint64_t pilaneswap : 1; /**< [ 3: 3](RO/H) The value of PCIE_REV_LANES, which is captured on chip cold reset. It is not
+ affected by any other reset. When set, lane swapping is performed to/from the
+ SerDes. When clear, no lane swapping is performed. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_pemx_strap_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t pirc : 1; /**< [ 0: 0](RO/H) The inverted value of the endpoint strap (GPIO_STRAP_PIN_E::PCIE0_EP_MODE,
+ GPIO_STRAP_PIN_E::PCIE2_EP_MODE, 1 for other PEMs) which is captured on chip
+ cold reset. It is not affected by any other reset. When set, PEM defaults to
+ root complex mode. When clear, PEM defaults to endpoint mode. */
+#else /* Word 0 - Little Endian */
+ uint64_t pirc : 1; /**< [ 0: 0](RO/H) The inverted value of the endpoint strap (GPIO_STRAP_PIN_E::PCIE0_EP_MODE,
+ GPIO_STRAP_PIN_E::PCIE2_EP_MODE, 1 for other PEMs) which is captured on chip
+ cold reset. It is not affected by any other reset. When set, PEM defaults to
+ root complex mode. When clear, PEM defaults to endpoint mode. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pemx_strap bdk_pemx_strap_t;
+
+static inline uint64_t BDK_PEMX_STRAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_STRAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000408ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e00000000c0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_STRAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_STRAP(a) bdk_pemx_strap_t
+#define bustype_BDK_PEMX_STRAP(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_STRAP(a) "PEMX_STRAP"
+#define device_bar_BDK_PEMX_STRAP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_STRAP(a) (a)
+#define arguments_BDK_PEMX_STRAP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) pem#_tlp_credits
+ *
+ * PEM TLP Credits Register
+ * This register specifies the number of credits for use in moving TLPs. When this register is
+ * written, the credit values are reset to the register value. A write to this register should
+ * take place before traffic flow starts.
+ */
+union bdk_pemx_tlp_credits
+{
+ uint64_t u;
+ struct bdk_pemx_tlp_credits_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_cpl : 12; /**< [ 63: 52](R/W) TLP 16 B credits for completion TLPs in the peer. Legal values are 0x42 to 0x104. */
+ uint64_t pem_np : 8; /**< [ 51: 44](R/W) TLP 16 B credits for nonposted TLPs in the peer. Legal values are 0x4 to 0x20. */
+ uint64_t pem_p : 12; /**< [ 43: 32](R/W) TLP 16 B credits for posted TLPs in the peer. Legal values are 0x42 to 0x104. */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t pem_p : 12; /**< [ 43: 32](R/W) TLP 16 B credits for posted TLPs in the peer. Legal values are 0x42 to 0x104. */
+ uint64_t pem_np : 8; /**< [ 51: 44](R/W) TLP 16 B credits for nonposted TLPs in the peer. Legal values are 0x4 to 0x20. */
+ uint64_t pem_cpl : 12; /**< [ 63: 52](R/W) TLP 16 B credits for completion TLPs in the peer. Legal values are 0x42 to 0x104. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pemx_tlp_credits_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t sli_cpl : 8; /**< [ 23: 16](R/W) TLP 16B credits for Completion TLPs in the SLI. Legal values are 0x24 to 0x80 and this
+ value is not dependent of the number of PEMS wire-OR'd together. Software should
+ reprogram this register for performance reasons. */
+ uint64_t sli_np : 8; /**< [ 15: 8](R/W) TLP 16B credits for Non-Posted TLPs in the SLI. Legal values are 0x4 to 0x10 and this
+ value is not dependent of the number of PEMS wire-OR'd together. Software should
+ reprogram this register for performance reasons. */
+ uint64_t sli_p : 8; /**< [ 7: 0](R/W) TLP 16B credits for Posted TLPs in the SLI. Legal values are 0x24 to 0x80 and this value
+ is not dependent of the number of PEMS wire-OR'd together. Software should reprogram this
+ register for performance reasons. */
+#else /* Word 0 - Little Endian */
+ uint64_t sli_p : 8; /**< [ 7: 0](R/W) TLP 16B credits for Posted TLPs in the SLI. Legal values are 0x24 to 0x80 and this value
+ is not dependent of the number of PEMS wire-OR'd together. Software should reprogram this
+ register for performance reasons. */
+ uint64_t sli_np : 8; /**< [ 15: 8](R/W) TLP 16B credits for Non-Posted TLPs in the SLI. Legal values are 0x4 to 0x10 and this
+ value is not dependent of the number of PEMS wire-OR'd together. Software should
+ reprogram this register for performance reasons. */
+ uint64_t sli_cpl : 8; /**< [ 23: 16](R/W) TLP 16B credits for Completion TLPs in the SLI. Legal values are 0x24 to 0x80 and this
+ value is not dependent of the number of PEMS wire-OR'd together. Software should
+ reprogram this register for performance reasons. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pemx_tlp_credits_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t sli_cpl : 8; /**< [ 23: 16](R/W) TLP 16B credits for Completion TLPs in the SLI. Legal values are 0x24 to 0x4F
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_np : 8; /**< [ 15: 8](R/W) TLP 16B credits for Non-Posted TLPs in the SLI. Legal values are 0x8 to 0x17
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_p : 8; /**< [ 7: 0](R/W) TLP 16B credits for Non-Posted TLPs in the SLI. Legal values are 0x24 to 0x4F
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+#else /* Word 0 - Little Endian */
+ uint64_t sli_p : 8; /**< [ 7: 0](R/W) TLP 16B credits for Non-Posted TLPs in the SLI. Legal values are 0x24 to 0x4F
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_np : 8; /**< [ 15: 8](R/W) TLP 16B credits for Non-Posted TLPs in the SLI. Legal values are 0x8 to 0x17
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_cpl : 8; /**< [ 23: 16](R/W) TLP 16B credits for Completion TLPs in the SLI. Legal values are 0x24 to 0x4F
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pemx_tlp_credits_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_cpl : 12; /**< [ 63: 52](R/W) TLP 16 B credits for completion TLPs in the peer. Legal values are 0x42 to 0x104. */
+ uint64_t pem_np : 8; /**< [ 51: 44](R/W) TLP 16 B credits for nonposted TLPs in the peer. Legal values are 0x4 to 0x20. */
+ uint64_t pem_p : 12; /**< [ 43: 32](R/W) TLP 16 B credits for posted TLPs in the peer. Legal values are 0x42 to 0x104. */
+ uint64_t sli_cpl : 12; /**< [ 31: 20](R/W) TLP 16 B credits for completion TLPs in the SLI. Legal values are 0x41 to 0x104
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_np : 8; /**< [ 19: 12](R/W) TLP 16 B credits for non-posted TLPs in the SLI. Legal values are 0x3 to 0x20
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_p : 12; /**< [ 11: 0](R/W) TLP 16 B credits for posted TLPs in the SLI. Legal values are 0x41 to 0x104 and this value
+ is not dependent of the number of PEMS wire-OR'd together. Software should reprogram this
+ register for performance reasons. */
+#else /* Word 0 - Little Endian */
+ uint64_t sli_p : 12; /**< [ 11: 0](R/W) TLP 16 B credits for posted TLPs in the SLI. Legal values are 0x41 to 0x104 and this value
+ is not dependent of the number of PEMS wire-OR'd together. Software should reprogram this
+ register for performance reasons. */
+ uint64_t sli_np : 8; /**< [ 19: 12](R/W) TLP 16 B credits for non-posted TLPs in the SLI. Legal values are 0x3 to 0x20
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_cpl : 12; /**< [ 31: 20](R/W) TLP 16 B credits for completion TLPs in the SLI. Legal values are 0x41 to 0x104
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t pem_p : 12; /**< [ 43: 32](R/W) TLP 16 B credits for posted TLPs in the peer. Legal values are 0x42 to 0x104. */
+ uint64_t pem_np : 8; /**< [ 51: 44](R/W) TLP 16 B credits for nonposted TLPs in the peer. Legal values are 0x4 to 0x20. */
+ uint64_t pem_cpl : 12; /**< [ 63: 52](R/W) TLP 16 B credits for completion TLPs in the peer. Legal values are 0x42 to 0x104. */
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_pemx_tlp_credits_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t sli_cpl : 8; /**< [ 23: 16](R/W) TLP 16B credits for Completion TLPs in the SLI. Legal values are 0x24 to 0xff
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_np : 8; /**< [ 15: 8](R/W) TLP 16 B credits for non-posted TLPs in the SLI. Legal values are 0x4 to 0x20
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_p : 8; /**< [ 7: 0](R/W) TLP 16B credits for Non-Posted TLPs in the SLI. Legal values are 0x24 to 0xff
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+#else /* Word 0 - Little Endian */
+ uint64_t sli_p : 8; /**< [ 7: 0](R/W) TLP 16B credits for Non-Posted TLPs in the SLI. Legal values are 0x24 to 0xff
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_np : 8; /**< [ 15: 8](R/W) TLP 16 B credits for non-posted TLPs in the SLI. Legal values are 0x4 to 0x20
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t sli_cpl : 8; /**< [ 23: 16](R/W) TLP 16B credits for Completion TLPs in the SLI. Legal values are 0x24 to 0xff
+ and this value is not dependent of the number of PEMS wire-OR'd
+ together. Software should reprogram this register for performance reasons. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pemx_tlp_credits bdk_pemx_tlp_credits_t;
+
+static inline uint64_t BDK_PEMX_TLP_CREDITS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_TLP_CREDITS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0c0000038ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0c0000038ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0c0000038ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PEMX_TLP_CREDITS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_TLP_CREDITS(a) bdk_pemx_tlp_credits_t
+#define bustype_BDK_PEMX_TLP_CREDITS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_PEMX_TLP_CREDITS(a) "PEMX_TLP_CREDITS"
+#define device_bar_BDK_PEMX_TLP_CREDITS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_TLP_CREDITS(a) (a)
+#define arguments_BDK_PEMX_TLP_CREDITS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_vf_clr_flr_req
+ *
+ * PEM FLR Request VF Clear Register
+ * This register provides clear request for PCIe PF function level reset (FLR).
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on MAC reset.
+ */
+union bdk_pemx_vf_clr_flr_req
+{
+ uint64_t u;
+ struct bdk_pemx_vf_clr_flr_req_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t vf_num : 8; /**< [ 7: 0](R/W/H) When written, will cause hardware to clear one of the 240 VF FLR conditions
+ indexed by [VF_NUM].
+ This field always reads as zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_num : 8; /**< [ 7: 0](R/W/H) When written, will cause hardware to clear one of the 240 VF FLR conditions
+ indexed by [VF_NUM].
+ This field always reads as zero. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_vf_clr_flr_req_s cn; */
+};
+typedef union bdk_pemx_vf_clr_flr_req bdk_pemx_vf_clr_flr_req_t;
+
+static inline uint64_t BDK_PEMX_VF_CLR_FLR_REQ(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_VF_CLR_FLR_REQ(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000220ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_VF_CLR_FLR_REQ", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_VF_CLR_FLR_REQ(a) bdk_pemx_vf_clr_flr_req_t
+#define bustype_BDK_PEMX_VF_CLR_FLR_REQ(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_VF_CLR_FLR_REQ(a) "PEMX_VF_CLR_FLR_REQ"
+#define device_bar_BDK_PEMX_VF_CLR_FLR_REQ(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_VF_CLR_FLR_REQ(a) (a)
+#define arguments_BDK_PEMX_VF_CLR_FLR_REQ(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_wmerge_merged_pc
+ *
+ * PEM Merge Writes Merged Performance Counter Register
+ * This register reports how many writes merged within the outbound write merge unit.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_wmerge_merged_pc
+{
+ uint64_t u;
+ struct bdk_pemx_wmerge_merged_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wmerge_merged : 64; /**< [ 63: 0](R/W/H) Each NCBO write operation mapped to MEM type by the ACC table that merges with a previous
+ write will increment this count. */
+#else /* Word 0 - Little Endian */
+ uint64_t wmerge_merged : 64; /**< [ 63: 0](R/W/H) Each NCBO write operation mapped to MEM type by the ACC table that merges with a previous
+ write will increment this count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_wmerge_merged_pc_s cn; */
+};
+typedef union bdk_pemx_wmerge_merged_pc bdk_pemx_wmerge_merged_pc_t;
+
+static inline uint64_t BDK_PEMX_WMERGE_MERGED_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_WMERGE_MERGED_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000188ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_WMERGE_MERGED_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_WMERGE_MERGED_PC(a) bdk_pemx_wmerge_merged_pc_t
+#define bustype_BDK_PEMX_WMERGE_MERGED_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_WMERGE_MERGED_PC(a) "PEMX_WMERGE_MERGED_PC"
+#define device_bar_BDK_PEMX_WMERGE_MERGED_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_WMERGE_MERGED_PC(a) (a)
+#define arguments_BDK_PEMX_WMERGE_MERGED_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) pem#_wmerge_received_pc
+ *
+ * PEM Merge Writes Received Performance Counter Register
+ * This register reports the number of writes that enter the outbound write merge unit.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ *
+ * This register is reset on PEM domain reset.
+ */
+union bdk_pemx_wmerge_received_pc
+{
+ uint64_t u;
+ struct bdk_pemx_wmerge_received_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wmerge_writes : 64; /**< [ 63: 0](R/W/H) Each NCBO write operation mapped to MEM type by the ACC table will increment this count. */
+#else /* Word 0 - Little Endian */
+ uint64_t wmerge_writes : 64; /**< [ 63: 0](R/W/H) Each NCBO write operation mapped to MEM type by the ACC table will increment this count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pemx_wmerge_received_pc_s cn; */
+};
+typedef union bdk_pemx_wmerge_received_pc bdk_pemx_wmerge_received_pc_t;
+
+static inline uint64_t BDK_PEMX_WMERGE_RECEIVED_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PEMX_WMERGE_RECEIVED_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8e0000000180ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PEMX_WMERGE_RECEIVED_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PEMX_WMERGE_RECEIVED_PC(a) bdk_pemx_wmerge_received_pc_t
+#define bustype_BDK_PEMX_WMERGE_RECEIVED_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_PEMX_WMERGE_RECEIVED_PC(a) "PEMX_WMERGE_RECEIVED_PC"
+#define device_bar_BDK_PEMX_WMERGE_RECEIVED_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_PEMX_WMERGE_RECEIVED_PC(a) (a)
+#define arguments_BDK_PEMX_WMERGE_RECEIVED_PC(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_PEM_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rnm.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rnm.h
new file mode 100644
index 0000000000..45ae7e4642
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rnm.h
@@ -0,0 +1,738 @@
+#ifndef __BDK_CSRS_RNM_H__
+#define __BDK_CSRS_RNM_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium RNM.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration rnm_bar_e
+ *
+ * RNM Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_RNM_BAR_E_RNM_PF_BAR0 (0x87e040000000ll)
+#define BDK_RNM_BAR_E_RNM_PF_BAR0_SIZE 0x100000ull
+#define BDK_RNM_BAR_E_RNM_VF_BAR0 (0x840000800000ll)
+#define BDK_RNM_BAR_E_RNM_VF_BAR0_SIZE 0x100000ull
+
+/**
+ * Register (RSL) rnm_bist_status
+ *
+ * RNM BIST Status Register
+ * This register is the RNM memory BIST status register, indicating status of built-in self-
+ * tests. 0 = passed BIST, 1 = failed BIST.
+ */
+union bdk_rnm_bist_status
+{
+ uint64_t u;
+ struct bdk_rnm_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rnm_bist_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t nz : 1; /**< [ 1: 1](RO/H) Status of the no-zeros memory BIST. 0 = passed BIST, 1 = failed BIST. */
+ uint64_t nml : 1; /**< [ 0: 0](RO/H) Status of the normal memory BIST. 0 = passed BIST, 1 = failed BIST. */
+#else /* Word 0 - Little Endian */
+ uint64_t nml : 1; /**< [ 0: 0](RO/H) Status of the normal memory BIST. 0 = passed BIST, 1 = failed BIST. */
+ uint64_t nz : 1; /**< [ 1: 1](RO/H) Status of the no-zeros memory BIST. 0 = passed BIST, 1 = failed BIST. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rnm_bist_status_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t rrc : 1; /**< [ 1: 1](RO/H) Status of the RRC memory block BIST. 0 = passed BIST, 1 = failed BIST. */
+ uint64_t mem : 1; /**< [ 0: 0](RO/H) Status of MEM memory block BIST. 0 = passed BIST, 1 = failed BIST. */
+#else /* Word 0 - Little Endian */
+ uint64_t mem : 1; /**< [ 0: 0](RO/H) Status of MEM memory block BIST. 0 = passed BIST, 1 = failed BIST. */
+ uint64_t rrc : 1; /**< [ 1: 1](RO/H) Status of the RRC memory block BIST. 0 = passed BIST, 1 = failed BIST. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_rnm_bist_status_cn81xx cn83xx; */
+};
+typedef union bdk_rnm_bist_status bdk_rnm_bist_status_t;
+
+#define BDK_RNM_BIST_STATUS BDK_RNM_BIST_STATUS_FUNC()
+static inline uint64_t BDK_RNM_BIST_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_BIST_STATUS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e040000008ll;
+ __bdk_csr_fatal("RNM_BIST_STATUS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RNM_BIST_STATUS bdk_rnm_bist_status_t
+#define bustype_BDK_RNM_BIST_STATUS BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_BIST_STATUS "RNM_BIST_STATUS"
+#define device_bar_BDK_RNM_BIST_STATUS 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_BIST_STATUS 0
+#define arguments_BDK_RNM_BIST_STATUS -1,-1,-1,-1
+
+/**
+ * Register (RSL) rnm_bp_test
+ *
+ * INTERNAL: RNM Backpressure Test Register
+ */
+union bdk_rnm_bp_test
+{
+ uint64_t u;
+ struct bdk_rnm_bp_test_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 3; /**< [ 63: 61](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Normal random number memory writes.
+ \<62\> = No-zeros random number memory writes.
+ \<61\> = No-zeros random number memory reads. */
+ uint64_t reserved_24_60 : 37;
+ uint64_t bp_cfg : 6; /**< [ 23: 18](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Normal random number memory writes.
+ \<21:20\> = No-zeros random number memory writes.
+ \<19:18\> = No-zeros random number memory reads. */
+ uint64_t reserved_12_17 : 6;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_17 : 6;
+ uint64_t bp_cfg : 6; /**< [ 23: 18](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Normal random number memory writes.
+ \<21:20\> = No-zeros random number memory writes.
+ \<19:18\> = No-zeros random number memory reads. */
+ uint64_t reserved_24_60 : 37;
+ uint64_t enable : 3; /**< [ 63: 61](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Normal random number memory writes.
+ \<62\> = No-zeros random number memory writes.
+ \<61\> = No-zeros random number memory reads. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rnm_bp_test_s cn; */
+};
+typedef union bdk_rnm_bp_test bdk_rnm_bp_test_t;
+
+#define BDK_RNM_BP_TEST BDK_RNM_BP_TEST_FUNC()
+static inline uint64_t BDK_RNM_BP_TEST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_BP_TEST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e040000028ll;
+ __bdk_csr_fatal("RNM_BP_TEST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RNM_BP_TEST bdk_rnm_bp_test_t
+#define bustype_BDK_RNM_BP_TEST BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_BP_TEST "RNM_BP_TEST"
+#define device_bar_BDK_RNM_BP_TEST 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_BP_TEST 0
+#define arguments_BDK_RNM_BP_TEST -1,-1,-1,-1
+
+/**
+ * Register (RSL) rnm_const
+ *
+ * RNM PF Constants Register
+ * This register is used for software discovery.
+ */
+union bdk_rnm_const
+{
+ uint64_t u;
+ struct bdk_rnm_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rnm_const_s cn; */
+};
+typedef union bdk_rnm_const bdk_rnm_const_t;
+
+#define BDK_RNM_CONST BDK_RNM_CONST_FUNC()
+static inline uint64_t BDK_RNM_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e040000030ll;
+ __bdk_csr_fatal("RNM_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RNM_CONST bdk_rnm_const_t
+#define bustype_BDK_RNM_CONST BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_CONST "RNM_CONST"
+#define device_bar_BDK_RNM_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_CONST 0
+#define arguments_BDK_RNM_CONST -1,-1,-1,-1
+
+/**
+ * Register (RSL) rnm_ctl_status
+ *
+ * RNM Control and Status Register
+ * This register is the RNM control register.
+ * This register is secure only to prevent the nonsecure world from affecting
+ * secure-world clients using true random numbers.
+ */
+union bdk_rnm_ctl_status
+{
+ uint64_t u;
+ struct bdk_rnm_ctl_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t eer_lck : 1; /**< [ 10: 10](SRO/H) Encryption enable register locked. */
+ uint64_t eer_val : 1; /**< [ 9: 9](SRO/H) Dormant encryption key match. */
+ uint64_t ent_sel : 4; /**< [ 8: 5](SR/W) Select input to RNM FIFO.
+ 0x0 = 0-7.
+ 0x1 = 8-15.
+ 0x2 = 16-23.
+ 0x3 = 24-31.
+ 0x4 = 32-39.
+ 0x5 = 40-47.
+ 0x6 = 48-55.
+ 0x7 = 56-63.
+ 0x8 = 64-71.
+ 0x9 = 72-79.
+ 0xA = 80-87.
+ 0xB = 88-95.
+ 0xC = 96-103.
+ 0xD = 104-111.
+ 0xE = 112-119.
+ 0xF = 120-127. */
+ uint64_t exp_ent : 1; /**< [ 4: 4](SR/W) Exported entropy enable for random number generator. The next random number is
+ available 80 coprocessor-clock cycles after switching this bit from zero to one. The
+ next random number is available 730 coprocessor-clock cycles after switching this
+ bit from one to zero. */
+ uint64_t rng_rst : 1; /**< [ 3: 3](SR/W) Reset the RNG. Setting this bit to one cancels the generation of the current random
+ number. The next random number is available 730 coprocessor-clock cycles after this
+ bit is cleared if [EXP_ENT] is set to zero. The next random number is available 80
+ coprocessor-clock cycles after this bit is cleared if [EXP_ENT] is set to one. This bit is
+ not automatically cleared. */
+ uint64_t rnm_rst : 1; /**< [ 2: 2](SR/W) Reset the RNM. Setting this bit to 1 drops all RNM transactions in flight and clears
+ all stored numbers in the random number memory. Any outstanding NCBO credits will
+ not be returned. RNM will not respond to any pending NCBI grants. RNM can accept
+ new requests immediately after reset is cleared. This bit is not automatically
+ cleared and will not reset any CSR fields. */
+ uint64_t rng_en : 1; /**< [ 1: 1](SR/W) Enables the output of the RNG. */
+ uint64_t ent_en : 1; /**< [ 0: 0](SR/W) Entropy enable for random number generator. */
+#else /* Word 0 - Little Endian */
+ uint64_t ent_en : 1; /**< [ 0: 0](SR/W) Entropy enable for random number generator. */
+ uint64_t rng_en : 1; /**< [ 1: 1](SR/W) Enables the output of the RNG. */
+ uint64_t rnm_rst : 1; /**< [ 2: 2](SR/W) Reset the RNM. Setting this bit to 1 drops all RNM transactions in flight and clears
+ all stored numbers in the random number memory. Any outstanding NCBO credits will
+ not be returned. RNM will not respond to any pending NCBI grants. RNM can accept
+ new requests immediately after reset is cleared. This bit is not automatically
+ cleared and will not reset any CSR fields. */
+ uint64_t rng_rst : 1; /**< [ 3: 3](SR/W) Reset the RNG. Setting this bit to one cancels the generation of the current random
+ number. The next random number is available 730 coprocessor-clock cycles after this
+ bit is cleared if [EXP_ENT] is set to zero. The next random number is available 80
+ coprocessor-clock cycles after this bit is cleared if [EXP_ENT] is set to one. This bit is
+ not automatically cleared. */
+ uint64_t exp_ent : 1; /**< [ 4: 4](SR/W) Exported entropy enable for random number generator. The next random number is
+ available 80 coprocessor-clock cycles after switching this bit from zero to one. The
+ next random number is available 730 coprocessor-clock cycles after switching this
+ bit from one to zero. */
+ uint64_t ent_sel : 4; /**< [ 8: 5](SR/W) Select input to RNM FIFO.
+ 0x0 = 0-7.
+ 0x1 = 8-15.
+ 0x2 = 16-23.
+ 0x3 = 24-31.
+ 0x4 = 32-39.
+ 0x5 = 40-47.
+ 0x6 = 48-55.
+ 0x7 = 56-63.
+ 0x8 = 64-71.
+ 0x9 = 72-79.
+ 0xA = 80-87.
+ 0xB = 88-95.
+ 0xC = 96-103.
+ 0xD = 104-111.
+ 0xE = 112-119.
+ 0xF = 120-127. */
+ uint64_t eer_val : 1; /**< [ 9: 9](SRO/H) Dormant encryption key match. */
+ uint64_t eer_lck : 1; /**< [ 10: 10](SRO/H) Encryption enable register locked. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rnm_ctl_status_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t dis_mak : 1; /**< [ 11: 11](SR/W1S/H) Disable use of master AES KEY. */
+ uint64_t eer_lck : 1; /**< [ 10: 10](SRO/H) Encryption enable register locked. */
+ uint64_t eer_val : 1; /**< [ 9: 9](SRO/H) Dormant encryption key match. */
+ uint64_t ent_sel : 4; /**< [ 8: 5](SR/W) Select input to RNM FIFO.
+ 0x0 = 0-7.
+ 0x1 = 8-15.
+ 0x2 = 16-23.
+ 0x3 = 24-31.
+ 0x4 = 32-39.
+ 0x5 = 40-47.
+ 0x6 = 48-55.
+ 0x7 = 56-63.
+ 0x8 = 64-71.
+ 0x9 = 72-79.
+ 0xA = 80-87.
+ 0xB = 88-95.
+ 0xC = 96-103.
+ 0xD = 104-111.
+ 0xE = 112-119.
+ 0xF = 120-127. */
+ uint64_t exp_ent : 1; /**< [ 4: 4](SR/W) Exported entropy enable for random number generator. The next random number is
+ available 80 coprocessor-clock cycles after switching this bit from zero to one. The
+ next random number is available 730 coprocessor-clock cycles after switching this
+ bit from one to zero. */
+ uint64_t rng_rst : 1; /**< [ 3: 3](SR/W) Reset the RNG. Setting this bit to one cancels the generation of the current random
+ number. The next random number is available 730 coprocessor-clock cycles after this
+ bit is cleared if [EXP_ENT] is set to zero. The next random number is available 80
+ coprocessor-clock cycles after this bit is cleared if [EXP_ENT] is set to one. This bit is
+ not automatically cleared. */
+ uint64_t rnm_rst : 1; /**< [ 2: 2](SR/W) Reset the RNM. Setting this bit to 1 drops all RNM transactions in flight and clears
+ all stored numbers in the random number memory. Any outstanding NCBO credits will
+ not be returned. RNM will not respond to any pending NCBI grants. RNM can accept
+ new requests immediately after reset is cleared. This bit is not automatically
+ cleared and will not reset any CSR fields. */
+ uint64_t rng_en : 1; /**< [ 1: 1](SR/W) Enables the output of the RNG. */
+ uint64_t ent_en : 1; /**< [ 0: 0](SR/W) Entropy enable for random number generator. */
+#else /* Word 0 - Little Endian */
+ uint64_t ent_en : 1; /**< [ 0: 0](SR/W) Entropy enable for random number generator. */
+ uint64_t rng_en : 1; /**< [ 1: 1](SR/W) Enables the output of the RNG. */
+ uint64_t rnm_rst : 1; /**< [ 2: 2](SR/W) Reset the RNM. Setting this bit to 1 drops all RNM transactions in flight and clears
+ all stored numbers in the random number memory. Any outstanding NCBO credits will
+ not be returned. RNM will not respond to any pending NCBI grants. RNM can accept
+ new requests immediately after reset is cleared. This bit is not automatically
+ cleared and will not reset any CSR fields. */
+ uint64_t rng_rst : 1; /**< [ 3: 3](SR/W) Reset the RNG. Setting this bit to one cancels the generation of the current random
+ number. The next random number is available 730 coprocessor-clock cycles after this
+ bit is cleared if [EXP_ENT] is set to zero. The next random number is available 80
+ coprocessor-clock cycles after this bit is cleared if [EXP_ENT] is set to one. This bit is
+ not automatically cleared. */
+ uint64_t exp_ent : 1; /**< [ 4: 4](SR/W) Exported entropy enable for random number generator. The next random number is
+ available 80 coprocessor-clock cycles after switching this bit from zero to one. The
+ next random number is available 730 coprocessor-clock cycles after switching this
+ bit from one to zero. */
+ uint64_t ent_sel : 4; /**< [ 8: 5](SR/W) Select input to RNM FIFO.
+ 0x0 = 0-7.
+ 0x1 = 8-15.
+ 0x2 = 16-23.
+ 0x3 = 24-31.
+ 0x4 = 32-39.
+ 0x5 = 40-47.
+ 0x6 = 48-55.
+ 0x7 = 56-63.
+ 0x8 = 64-71.
+ 0x9 = 72-79.
+ 0xA = 80-87.
+ 0xB = 88-95.
+ 0xC = 96-103.
+ 0xD = 104-111.
+ 0xE = 112-119.
+ 0xF = 120-127. */
+ uint64_t eer_val : 1; /**< [ 9: 9](SRO/H) Dormant encryption key match. */
+ uint64_t eer_lck : 1; /**< [ 10: 10](SRO/H) Encryption enable register locked. */
+ uint64_t dis_mak : 1; /**< [ 11: 11](SR/W1S/H) Disable use of master AES KEY. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_rnm_ctl_status_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t zuc_en : 1; /**< [ 11: 11](SR/W) Enable output of the ZUC engine. Before setting this bit software must write to
+ all RNM_ZUC_INIT_LFSR(), and RNM_ZUC_INIT_NLF() registers. */
+ uint64_t eer_lck : 1; /**< [ 10: 10](SRO/H) Encryption enable register locked. */
+ uint64_t eer_val : 1; /**< [ 9: 9](SRO/H) Dormant encryption key match. */
+ uint64_t ent_sel : 4; /**< [ 8: 5](SR/W) Select input to RNM FIFO.
+ 0x0 = 0-7.
+ 0x1 = 8-15.
+ 0x2 = 16-23.
+ 0x3 = 24-31.
+ 0x4 = 32-39.
+ 0x5 = 40-47.
+ 0x6 = 48-55.
+ 0x7 = 56-63.
+ 0x8 = 64-71.
+ 0x9 = 72-79.
+ 0xA = 80-87.
+ 0xB = 88-95.
+ 0xC = 96-103.
+ 0xD = 104-111.
+ 0xE = 112-119.
+ 0xF = 120-127. */
+ uint64_t exp_ent : 1; /**< [ 4: 4](SR/W) Exported entropy enable for random number generator. The next random number is
+ available 80 coprocessor-clock cycles after switching this bit from zero to one. The
+ next random number is available 730 coprocessor-clock cycles after switching this
+ bit from one to zero. */
+ uint64_t rng_rst : 1; /**< [ 3: 3](SR/W) Reset the RNG. Setting this bit to one cancels the generation of the current random
+ number. The next random number is available 730 coprocessor-clock cycles after this
+ bit is cleared if [EXP_ENT] is set to zero. The next random number is available 80
+ coprocessor-clock cycles after this bit is cleared if [EXP_ENT] is set to one. This bit is
+ not automatically cleared. */
+ uint64_t rnm_rst : 1; /**< [ 2: 2](SRO) Reserved. Writes are ignored for backward compatibility. */
+ uint64_t rng_en : 1; /**< [ 1: 1](SR/W) Enables the output of the RNG. */
+ uint64_t ent_en : 1; /**< [ 0: 0](SR/W) Entropy enable for random number generator. */
+#else /* Word 0 - Little Endian */
+ uint64_t ent_en : 1; /**< [ 0: 0](SR/W) Entropy enable for random number generator. */
+ uint64_t rng_en : 1; /**< [ 1: 1](SR/W) Enables the output of the RNG. */
+ uint64_t rnm_rst : 1; /**< [ 2: 2](SRO) Reserved. Writes are ignored for backward compatibility. */
+ uint64_t rng_rst : 1; /**< [ 3: 3](SR/W) Reset the RNG. Setting this bit to one cancels the generation of the current random
+ number. The next random number is available 730 coprocessor-clock cycles after this
+ bit is cleared if [EXP_ENT] is set to zero. The next random number is available 80
+ coprocessor-clock cycles after this bit is cleared if [EXP_ENT] is set to one. This bit is
+ not automatically cleared. */
+ uint64_t exp_ent : 1; /**< [ 4: 4](SR/W) Exported entropy enable for random number generator. The next random number is
+ available 80 coprocessor-clock cycles after switching this bit from zero to one. The
+ next random number is available 730 coprocessor-clock cycles after switching this
+ bit from one to zero. */
+ uint64_t ent_sel : 4; /**< [ 8: 5](SR/W) Select input to RNM FIFO.
+ 0x0 = 0-7.
+ 0x1 = 8-15.
+ 0x2 = 16-23.
+ 0x3 = 24-31.
+ 0x4 = 32-39.
+ 0x5 = 40-47.
+ 0x6 = 48-55.
+ 0x7 = 56-63.
+ 0x8 = 64-71.
+ 0x9 = 72-79.
+ 0xA = 80-87.
+ 0xB = 88-95.
+ 0xC = 96-103.
+ 0xD = 104-111.
+ 0xE = 112-119.
+ 0xF = 120-127. */
+ uint64_t eer_val : 1; /**< [ 9: 9](SRO/H) Dormant encryption key match. */
+ uint64_t eer_lck : 1; /**< [ 10: 10](SRO/H) Encryption enable register locked. */
+ uint64_t zuc_en : 1; /**< [ 11: 11](SR/W) Enable output of the ZUC engine. Before setting this bit software must write to
+ all RNM_ZUC_INIT_LFSR(), and RNM_ZUC_INIT_NLF() registers. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rnm_ctl_status bdk_rnm_ctl_status_t;
+
+#define BDK_RNM_CTL_STATUS BDK_RNM_CTL_STATUS_FUNC()
+static inline uint64_t BDK_RNM_CTL_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_CTL_STATUS_FUNC(void)
+{
+ return 0x87e040000000ll;
+}
+
+#define typedef_BDK_RNM_CTL_STATUS bdk_rnm_ctl_status_t
+#define bustype_BDK_RNM_CTL_STATUS BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_CTL_STATUS "RNM_CTL_STATUS"
+#define device_bar_BDK_RNM_CTL_STATUS 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_CTL_STATUS 0
+#define arguments_BDK_RNM_CTL_STATUS -1,-1,-1,-1
+
+/**
+ * Register (RSL) rnm_eer_dbg
+ *
+ * INTERNAL: RNM Encryption Enable Debug Register
+ *
+ * This register is the encryption enable debug register.
+ */
+union bdk_rnm_eer_dbg
+{
+ uint64_t u;
+ struct bdk_rnm_eer_dbg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO/H) Dormant encryption debug info. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](SRO/H) Dormant encryption debug info. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rnm_eer_dbg_s cn; */
+};
+typedef union bdk_rnm_eer_dbg bdk_rnm_eer_dbg_t;
+
+#define BDK_RNM_EER_DBG BDK_RNM_EER_DBG_FUNC()
+static inline uint64_t BDK_RNM_EER_DBG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_EER_DBG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ return 0x87e040000018ll;
+ __bdk_csr_fatal("RNM_EER_DBG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RNM_EER_DBG bdk_rnm_eer_dbg_t
+#define bustype_BDK_RNM_EER_DBG BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_EER_DBG "RNM_EER_DBG"
+#define device_bar_BDK_RNM_EER_DBG 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_EER_DBG 0
+#define arguments_BDK_RNM_EER_DBG -1,-1,-1,-1
+
+/**
+ * Register (RSL) rnm_eer_key
+ *
+ * RNM Encryption Enable Register
+ * This register is the encryption enable register.
+ */
+union bdk_rnm_eer_key
+{
+ uint64_t u;
+ struct bdk_rnm_eer_key_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t key : 64; /**< [ 63: 0](SWO) Dormant encryption key. If dormant crypto is fuse-enabled, crypto can be enabled by
+ writing this register with the correct key. */
+#else /* Word 0 - Little Endian */
+ uint64_t key : 64; /**< [ 63: 0](SWO) Dormant encryption key. If dormant crypto is fuse-enabled, crypto can be enabled by
+ writing this register with the correct key. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rnm_eer_key_s cn; */
+};
+typedef union bdk_rnm_eer_key bdk_rnm_eer_key_t;
+
+#define BDK_RNM_EER_KEY BDK_RNM_EER_KEY_FUNC()
+static inline uint64_t BDK_RNM_EER_KEY_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_EER_KEY_FUNC(void)
+{
+ return 0x87e040000010ll;
+}
+
+#define typedef_BDK_RNM_EER_KEY bdk_rnm_eer_key_t
+#define bustype_BDK_RNM_EER_KEY BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_EER_KEY "RNM_EER_KEY"
+#define device_bar_BDK_RNM_EER_KEY 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_EER_KEY 0
+#define arguments_BDK_RNM_EER_KEY -1,-1,-1,-1
+
+/**
+ * Register (NCB) rnm_random
+ *
+ * RNM Random Register
+ */
+union bdk_rnm_random
+{
+ uint64_t u;
+ struct bdk_rnm_random_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) Generated random number. This register may be accessed with a 8, 16, 32 or 64-bit
+ operation. This register is on a independent page, and may be mapped into guest operating
+ systems. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) Generated random number. This register may be accessed with a 8, 16, 32 or 64-bit
+ operation. This register is on a independent page, and may be mapped into guest operating
+ systems. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rnm_random_s cn8; */
+ struct bdk_rnm_random_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) Generated random number. This register may be accessed with a 8, 16, 32 or 64-bit
+ operation. This register is on a independent page, and may be mapped into guest operating
+ systems. Accesses to RNM_RANDOM larger than 64 bits will return 0x0 and fault. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) Generated random number. This register may be accessed with a 8, 16, 32 or 64-bit
+ operation. This register is on a independent page, and may be mapped into guest operating
+ systems. Accesses to RNM_RANDOM larger than 64 bits will return 0x0 and fault. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rnm_random bdk_rnm_random_t;
+
+#define BDK_RNM_RANDOM BDK_RNM_RANDOM_FUNC()
+static inline uint64_t BDK_RNM_RANDOM_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_RANDOM_FUNC(void)
+{
+ return 0x840000800000ll;
+}
+
+#define typedef_BDK_RNM_RANDOM bdk_rnm_random_t
+#define bustype_BDK_RNM_RANDOM BDK_CSR_TYPE_NCB
+#define basename_BDK_RNM_RANDOM "RNM_RANDOM"
+#define device_bar_BDK_RNM_RANDOM 0x0 /* VF_BAR0 */
+#define busnum_BDK_RNM_RANDOM 0
+#define arguments_BDK_RNM_RANDOM -1,-1,-1,-1
+
+/**
+ * Register (RSL) rnm_serial_num
+ *
+ * RNM Fuse Serial Number Register
+ */
+union bdk_rnm_serial_num
+{
+ uint64_t u;
+ struct bdk_rnm_serial_num_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) Dormant encryption serial number. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](RO/H) Dormant encryption serial number. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rnm_serial_num_s cn; */
+};
+typedef union bdk_rnm_serial_num bdk_rnm_serial_num_t;
+
+#define BDK_RNM_SERIAL_NUM BDK_RNM_SERIAL_NUM_FUNC()
+static inline uint64_t BDK_RNM_SERIAL_NUM_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_SERIAL_NUM_FUNC(void)
+{
+ return 0x87e040000020ll;
+}
+
+#define typedef_BDK_RNM_SERIAL_NUM bdk_rnm_serial_num_t
+#define bustype_BDK_RNM_SERIAL_NUM BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_SERIAL_NUM "RNM_SERIAL_NUM"
+#define device_bar_BDK_RNM_SERIAL_NUM 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_SERIAL_NUM 0
+#define arguments_BDK_RNM_SERIAL_NUM -1,-1,-1,-1
+
+/**
+ * Register (RSL) rnm_zuc_init_lfsr#
+ *
+ * RNM ZUC LFSR Initialization Register
+ * This register is used to initialize the state of the 16 state elements in RNM's ZUC
+ * LFSR. See RNM_CTL_STATUS[ZUC_EN].
+ *
+ * Before writing to this register, RNM_CTL_STATUS[ZUC_EN] must be zero to turn off the
+ * ZUC engine.
+ */
+union bdk_rnm_zuc_init_lfsrx
+{
+ uint64_t u;
+ struct bdk_rnm_zuc_init_lfsrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t lfsr : 31; /**< [ 30: 0](SWO) Write the state of one ZUC LFSR element. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr : 31; /**< [ 30: 0](SWO) Write the state of one ZUC LFSR element. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rnm_zuc_init_lfsrx_s cn; */
+};
+typedef union bdk_rnm_zuc_init_lfsrx bdk_rnm_zuc_init_lfsrx_t;
+
+static inline uint64_t BDK_RNM_ZUC_INIT_LFSRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_ZUC_INIT_LFSRX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x87e040000100ll + 8ll * ((a) & 0xf);
+ __bdk_csr_fatal("RNM_ZUC_INIT_LFSRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RNM_ZUC_INIT_LFSRX(a) bdk_rnm_zuc_init_lfsrx_t
+#define bustype_BDK_RNM_ZUC_INIT_LFSRX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_ZUC_INIT_LFSRX(a) "RNM_ZUC_INIT_LFSRX"
+#define device_bar_BDK_RNM_ZUC_INIT_LFSRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_ZUC_INIT_LFSRX(a) (a)
+#define arguments_BDK_RNM_ZUC_INIT_LFSRX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rnm_zuc_init_nlf#
+ *
+ * RNM ZUC LFSR Initialization Register
+ * This register is used to initialize the state of the two 32-bit memory cells in
+ * ZUC's nonlinear function. See RNM_CTL_STATUS[ZUC_EN].
+ *
+ * Before writing to this register, RNM_CTL_STATUS[ZUC_EN] must be zero to turn off the
+ * ZUC engine.
+ */
+union bdk_rnm_zuc_init_nlfx
+{
+ uint64_t u;
+ struct bdk_rnm_zuc_init_nlfx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t r_state : 32; /**< [ 31: 0](SWO) Write the state of one ZUC nonlinear function element. */
+#else /* Word 0 - Little Endian */
+ uint64_t r_state : 32; /**< [ 31: 0](SWO) Write the state of one ZUC nonlinear function element. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rnm_zuc_init_nlfx_s cn; */
+};
+typedef union bdk_rnm_zuc_init_nlfx bdk_rnm_zuc_init_nlfx_t;
+
+static inline uint64_t BDK_RNM_ZUC_INIT_NLFX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RNM_ZUC_INIT_NLFX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e040000200ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("RNM_ZUC_INIT_NLFX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RNM_ZUC_INIT_NLFX(a) bdk_rnm_zuc_init_nlfx_t
+#define bustype_BDK_RNM_ZUC_INIT_NLFX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RNM_ZUC_INIT_NLFX(a) "RNM_ZUC_INIT_NLFX"
+#define device_bar_BDK_RNM_ZUC_INIT_NLFX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_RNM_ZUC_INIT_NLFX(a) (a)
+#define arguments_BDK_RNM_ZUC_INIT_NLFX(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_RNM_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h
new file mode 100644
index 0000000000..86f0358a96
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h
@@ -0,0 +1,6117 @@
+#ifndef __BDK_CSRS_RST_H__
+#define __BDK_CSRS_RST_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium RST.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration rst_bar_e
+ *
+ * RST Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_RST_BAR_E_RST_PF_BAR0_CN8 (0x87e006000000ll)
+#define BDK_RST_BAR_E_RST_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_RST_BAR_E_RST_PF_BAR0_CN9 (0x87e006000000ll)
+#define BDK_RST_BAR_E_RST_PF_BAR0_CN9_SIZE 0x10000ull
+#define BDK_RST_BAR_E_RST_PF_BAR2 (0x87e00a000000ll)
+#define BDK_RST_BAR_E_RST_PF_BAR2_SIZE 0x10000ull
+#define BDK_RST_BAR_E_RST_PF_BAR4 (0x87e006f00000ll)
+#define BDK_RST_BAR_E_RST_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration rst_boot_fail_e
+ *
+ * RST Boot Failure Code Enumeration
+ * Enumerates the reasons for boot failure, returned to post-boot code
+ * in argument register 0 and blinked on GPIO\<11\>.
+ */
+#define BDK_RST_BOOT_FAIL_E_AUTH (6)
+#define BDK_RST_BOOT_FAIL_E_BUS_ERROR (0xb)
+#define BDK_RST_BOOT_FAIL_E_DEVICE (3)
+#define BDK_RST_BOOT_FAIL_E_GOOD_CN8 (0)
+#define BDK_RST_BOOT_FAIL_E_GOOD_CN9 (1)
+#define BDK_RST_BOOT_FAIL_E_HASH (8)
+#define BDK_RST_BOOT_FAIL_E_KEY (7)
+#define BDK_RST_BOOT_FAIL_E_MAGIC (4)
+#define BDK_RST_BOOT_FAIL_E_MCORE (5)
+#define BDK_RST_BOOT_FAIL_E_METH (2)
+#define BDK_RST_BOOT_FAIL_E_SCRIPT_ACC_ERROR (0xa)
+#define BDK_RST_BOOT_FAIL_E_SCRIPT_INVALID (9)
+#define BDK_RST_BOOT_FAIL_E_UNINIT (0)
+
+/**
+ * Enumeration rst_boot_method_e
+ *
+ * RST Primary Boot-strap Method Enumeration
+ * Enumerates the primary (first choice) and secondary (second choice) boot
+ * device. Primary boot method is selected with the straps
+ * GPIO_STRAP_PIN_E::BOOT_METHOD2..0, and secondary is selected with the straps
+ * GPIO_STRAP_PIN_E::BOOT_METHOD5..3.
+ *
+ * To disable the secondary method, use ::REMOTE.
+ */
+#define BDK_RST_BOOT_METHOD_E_CCPI0 (9)
+#define BDK_RST_BOOT_METHOD_E_CCPI1 (0xa)
+#define BDK_RST_BOOT_METHOD_E_CCPI2 (0xb)
+#define BDK_RST_BOOT_METHOD_E_EMMC_CS0 (0)
+#define BDK_RST_BOOT_METHOD_E_EMMC_CS1 (1)
+#define BDK_RST_BOOT_METHOD_E_EMMC_LS (3)
+#define BDK_RST_BOOT_METHOD_E_EMMC_SS (2)
+#define BDK_RST_BOOT_METHOD_E_PCIE0 (0xc)
+#define BDK_RST_BOOT_METHOD_E_PCIE2 (0xd)
+#define BDK_RST_BOOT_METHOD_E_REMOTE_CN8 (8)
+#define BDK_RST_BOOT_METHOD_E_REMOTE_CN9 (7)
+#define BDK_RST_BOOT_METHOD_E_SPI0_CS0 (2)
+#define BDK_RST_BOOT_METHOD_E_SPI0_CS1 (3)
+#define BDK_RST_BOOT_METHOD_E_SPI1_CS0 (4)
+#define BDK_RST_BOOT_METHOD_E_SPI1_CS1 (5)
+#define BDK_RST_BOOT_METHOD_E_SPI24 (5)
+#define BDK_RST_BOOT_METHOD_E_SPI32 (6)
+
+/**
+ * Enumeration rst_dev_e
+ *
+ * Programmable Reset Device Enumeration
+ * Enumerates devices that have programmable reset domains, and index {a} of RST_DEV_MAP().
+ */
+#define BDK_RST_DEV_E_AVS (1)
+#define BDK_RST_DEV_E_CGXX(a) (0x12 + (a))
+#define BDK_RST_DEV_E_EMMC (0x19)
+#define BDK_RST_DEV_E_GSERX(a) (0x1a + (a))
+#define BDK_RST_DEV_E_MPIX(a) (2 + (a))
+#define BDK_RST_DEV_E_NCSI (0)
+#define BDK_RST_DEV_E_PEMX(a) (0x28 + (a))
+#define BDK_RST_DEV_E_ROC_OCLA (0x18)
+#define BDK_RST_DEV_E_SGPIO (0x17)
+#define BDK_RST_DEV_E_SMI (0x16)
+#define BDK_RST_DEV_E_TWSX(a) (4 + (a))
+#define BDK_RST_DEV_E_UAAX(a) (0xa + (a))
+
+/**
+ * Enumeration rst_domain_e
+ *
+ * RST Domain Enumeration
+ * This enumerates the values of RST_DEV_MAP()[DMN].
+ */
+#define BDK_RST_DOMAIN_E_CHIP (0)
+#define BDK_RST_DOMAIN_E_CORE (1)
+#define BDK_RST_DOMAIN_E_MCP (2)
+#define BDK_RST_DOMAIN_E_OFF (4)
+#define BDK_RST_DOMAIN_E_SCP (3)
+
+/**
+ * Enumeration rst_int_vec_e
+ *
+ * RST MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_RST_INT_VEC_E_INTS (0)
+
+/**
+ * Enumeration rst_source_e
+ *
+ * RST Cause Enumeration
+ * Enumerates the reset sources for both reset domain mapping and cause of last reset,
+ * corresponding to the bit numbers of RST_LBOOT.
+ */
+#define BDK_RST_SOURCE_E_CHIPKILL (4)
+#define BDK_RST_SOURCE_E_CHIP_RESET_PIN (2)
+#define BDK_RST_SOURCE_E_CHIP_SOFT (3)
+#define BDK_RST_SOURCE_E_COLD_SOFT (1)
+#define BDK_RST_SOURCE_E_CORE_RESET_PIN (0xb)
+#define BDK_RST_SOURCE_E_CORE_SOFT (0xc)
+#define BDK_RST_SOURCE_E_CORE_WDOG (0xd)
+#define BDK_RST_SOURCE_E_DCOK_PIN (0)
+#define BDK_RST_SOURCE_E_MCP_RESET_PIN (8)
+#define BDK_RST_SOURCE_E_MCP_SOFT (9)
+#define BDK_RST_SOURCE_E_MCP_WDOG (0xa)
+#define BDK_RST_SOURCE_E_OCX (0xe)
+#define BDK_RST_SOURCE_E_PEM_LINKDOWNX(a) (0x12 + 4 * (a))
+#define BDK_RST_SOURCE_E_PEM_PFFLRX(a) (0x13 + 4 * (a))
+#define BDK_RST_SOURCE_E_PERST_PINX(a) (0x11 + 4 * (a))
+#define BDK_RST_SOURCE_E_PERST_SOFTX(a) (0x10 + 4 * (a))
+#define BDK_RST_SOURCE_E_RSVD_F (0xf)
+#define BDK_RST_SOURCE_E_SCP_RESET_PIN (5)
+#define BDK_RST_SOURCE_E_SCP_SOFT (6)
+#define BDK_RST_SOURCE_E_SCP_WDOG (7)
+
+/**
+ * Structure rst_boot_stat_s
+ *
+ * BOOT_STATUS field Structure
+ * The rom boot code stores this data in the RST_BOOT_STATUS register, once per each boot attempt.
+ */
+union bdk_rst_boot_stat_s
+{
+ uint32_t u;
+ struct bdk_rst_boot_stat_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t trusted : 1; /**< [ 15: 15] This was a trusted-mode boot. */
+ uint32_t primary : 1; /**< [ 14: 14] This was a boot from the primary device. */
+ uint32_t scr_done : 1; /**< [ 13: 13] The ROM script ran to completion on this boot. */
+ uint32_t reserved_7_12 : 6;
+ uint32_t boot_method : 3; /**< [ 6: 4] The boot method for this boot attempt RST_BOOT_METHOD_E. */
+ uint32_t fail : 4; /**< [ 3: 0] The failure code for this boot attempt RST_BOOT_FAIL_E. */
+#else /* Word 0 - Little Endian */
+ uint32_t fail : 4; /**< [ 3: 0] The failure code for this boot attempt RST_BOOT_FAIL_E. */
+ uint32_t boot_method : 3; /**< [ 6: 4] The boot method for this boot attempt RST_BOOT_METHOD_E. */
+ uint32_t reserved_7_12 : 6;
+ uint32_t scr_done : 1; /**< [ 13: 13] The ROM script ran to completion on this boot. */
+ uint32_t primary : 1; /**< [ 14: 14] This was a boot from the primary device. */
+ uint32_t trusted : 1; /**< [ 15: 15] This was a trusted-mode boot. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_boot_stat_s_s cn; */
+};
+
+/**
+ * Structure rst_pp_pwr_s
+ *
+ * INTERNAL: Core Reset Power Delivery Structure
+ *
+ * This structure specifies the layout of RTL reset and power delivery. It is not visible to software.
+ */
+union bdk_rst_pp_pwr_s
+{
+ uint32_t u;
+ struct bdk_rst_pp_pwr_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t valid : 1; /**< [ 9: 9] Data transmitted on interface is valid.. */
+ uint32_t ppvid : 6; /**< [ 8: 3] Virtual core number. */
+ uint32_t dbg_rst : 1; /**< [ 2: 2] Reset control for the core specified by PPVID. */
+ uint32_t pwrdwn : 1; /**< [ 1: 1] Core does not require power. */
+ uint32_t rst : 1; /**< [ 0: 0] Reset control for the core specified by PPVID. */
+#else /* Word 0 - Little Endian */
+ uint32_t rst : 1; /**< [ 0: 0] Reset control for the core specified by PPVID. */
+ uint32_t pwrdwn : 1; /**< [ 1: 1] Core does not require power. */
+ uint32_t dbg_rst : 1; /**< [ 2: 2] Reset control for the core specified by PPVID. */
+ uint32_t ppvid : 6; /**< [ 8: 3] Virtual core number. */
+ uint32_t valid : 1; /**< [ 9: 9] Data transmitted on interface is valid.. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_pp_pwr_s_s cn; */
+};
+
+/**
+ * Register (RSL) rst_ap#_affinity_const
+ *
+ * RST Virtual AP Affinity Map Register
+ * This register indicates the processor affinity identification and logical core
+ * number mapping to physical core numbers. This is indexed by logical core number.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_apx_affinity_const
+{
+ uint64_t u;
+ struct bdk_rst_apx_affinity_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t core : 8; /**< [ 55: 48](RO) Physical core number. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t phy_clu : 4; /**< [ 43: 40](RO) Physical cluster number. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t phy_core : 4; /**< [ 35: 32](RO) Physical core-within-cluster number. */
+ uint64_t fov : 1; /**< [ 31: 31](RO) Set to indicate if the fields are valid. */
+ uint64_t u : 1; /**< [ 30: 30](RO) Set to indicate processors are part of a multprocessor system. */
+ uint64_t reserved_25_29 : 5;
+ uint64_t mt : 1; /**< [ 24: 24](RO) Set to indicate multithreaded and [AFF0] is thread number in core. */
+ uint64_t aff2 : 8; /**< [ 23: 16](RO/H) Affinity 2 for this logical core number. In CNXXXX, the node id. */
+ uint64_t aff1 : 8; /**< [ 15: 8](RO/H) Affinity 1 for this logical core number. In CNXXXX, the logical cluster id. */
+ uint64_t aff0 : 8; /**< [ 7: 0](RO/H) Affinity 0 for this logical core number. In CNXXXX, the logical core number within a cluster. */
+#else /* Word 0 - Little Endian */
+ uint64_t aff0 : 8; /**< [ 7: 0](RO/H) Affinity 0 for this logical core number. In CNXXXX, the logical core number within a cluster. */
+ uint64_t aff1 : 8; /**< [ 15: 8](RO/H) Affinity 1 for this logical core number. In CNXXXX, the logical cluster id. */
+ uint64_t aff2 : 8; /**< [ 23: 16](RO/H) Affinity 2 for this logical core number. In CNXXXX, the node id. */
+ uint64_t mt : 1; /**< [ 24: 24](RO) Set to indicate multithreaded and [AFF0] is thread number in core. */
+ uint64_t reserved_25_29 : 5;
+ uint64_t u : 1; /**< [ 30: 30](RO) Set to indicate processors are part of a multprocessor system. */
+ uint64_t fov : 1; /**< [ 31: 31](RO) Set to indicate if the fields are valid. */
+ uint64_t phy_core : 4; /**< [ 35: 32](RO) Physical core-within-cluster number. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t phy_clu : 4; /**< [ 43: 40](RO) Physical cluster number. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t core : 8; /**< [ 55: 48](RO) Physical core number. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_apx_affinity_const_s cn; */
+};
+typedef union bdk_rst_apx_affinity_const bdk_rst_apx_affinity_const_t;
+
+static inline uint64_t BDK_RST_APX_AFFINITY_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_APX_AFFINITY_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x87e006001000ll + 8ll * ((a) & 0x1f);
+ __bdk_csr_fatal("RST_APX_AFFINITY_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_APX_AFFINITY_CONST(a) bdk_rst_apx_affinity_const_t
+#define bustype_BDK_RST_APX_AFFINITY_CONST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_APX_AFFINITY_CONST(a) "RST_APX_AFFINITY_CONST"
+#define device_bar_BDK_RST_APX_AFFINITY_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_APX_AFFINITY_CONST(a) (a)
+#define arguments_BDK_RST_APX_AFFINITY_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rst_bist_active
+ *
+ * RST BIST Active Status Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_bist_active
+{
+ uint64_t u;
+ struct bdk_rst_bist_active_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t ap : 1; /**< [ 5: 5](RO/H) BIST in progress due to AP cores being put into reset. When set, memories
+ accociated with this group are being tested. */
+ uint64_t csr : 1; /**< [ 4: 4](RO/H) BIST in progress due to access to RST_DEV_MAP(). When set, memories
+ accociated with this access are being tested. */
+ uint64_t scp : 1; /**< [ 3: 3](RO/H) SCP domain BIST in progress. When set, memories accociated with
+ the SCP domain are being tested. */
+ uint64_t mcp : 1; /**< [ 2: 2](RO/H) MCP domain BIST in progress. When set, memories accociated with
+ the MCP domain are being tested. */
+ uint64_t core : 1; /**< [ 1: 1](RO/H) Core domain BIST in progress. When set, memories accociated with
+ the core domain are being tested. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t core : 1; /**< [ 1: 1](RO/H) Core domain BIST in progress. When set, memories accociated with
+ the core domain are being tested. */
+ uint64_t mcp : 1; /**< [ 2: 2](RO/H) MCP domain BIST in progress. When set, memories accociated with
+ the MCP domain are being tested. */
+ uint64_t scp : 1; /**< [ 3: 3](RO/H) SCP domain BIST in progress. When set, memories accociated with
+ the SCP domain are being tested. */
+ uint64_t csr : 1; /**< [ 4: 4](RO/H) BIST in progress due to access to RST_DEV_MAP(). When set, memories
+ accociated with this access are being tested. */
+ uint64_t ap : 1; /**< [ 5: 5](RO/H) BIST in progress due to AP cores being put into reset. When set, memories
+ accociated with this group are being tested. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_bist_active_s cn; */
+};
+typedef union bdk_rst_bist_active bdk_rst_bist_active_t;
+
+#define BDK_RST_BIST_ACTIVE BDK_RST_BIST_ACTIVE_FUNC()
+static inline uint64_t BDK_RST_BIST_ACTIVE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_BIST_ACTIVE_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001890ll;
+ __bdk_csr_fatal("RST_BIST_ACTIVE", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_BIST_ACTIVE bdk_rst_bist_active_t
+#define bustype_BDK_RST_BIST_ACTIVE BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_BIST_ACTIVE "RST_BIST_ACTIVE"
+#define device_bar_BDK_RST_BIST_ACTIVE 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_BIST_ACTIVE 0
+#define arguments_BDK_RST_BIST_ACTIVE -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_bist_timer
+ *
+ * INTERNAL: RST BIST Timer Register
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_bist_timer
+{
+ uint64_t u;
+ struct bdk_rst_bist_timer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_29_63 : 35;
+ uint64_t count : 29; /**< [ 28: 0](RO) Number of 50 MHz reference clocks that have elapsed during BIST and repair during the last
+ reset.
+ If MSB is set the BIST chain did not complete as expected. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 29; /**< [ 28: 0](RO) Number of 50 MHz reference clocks that have elapsed during BIST and repair during the last
+ reset.
+ If MSB is set the BIST chain did not complete as expected. */
+ uint64_t reserved_29_63 : 35;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_bist_timer_s cn8; */
+ struct bdk_rst_bist_timer_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_29_63 : 35;
+ uint64_t count : 29; /**< [ 28: 0](RO/H) Number of 100 MHz reference clocks that have elapsed during the
+ last BIST operation. If MSB is set the BIST did not
+ complete as expected. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 29; /**< [ 28: 0](RO/H) Number of 100 MHz reference clocks that have elapsed during the
+ last BIST operation. If MSB is set the BIST did not
+ complete as expected. */
+ uint64_t reserved_29_63 : 35;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_bist_timer bdk_rst_bist_timer_t;
+
+#define BDK_RST_BIST_TIMER BDK_RST_BIST_TIMER_FUNC()
+static inline uint64_t BDK_RST_BIST_TIMER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_BIST_TIMER_FUNC(void)
+{
+ return 0x87e006001760ll;
+}
+
+#define typedef_BDK_RST_BIST_TIMER bdk_rst_bist_timer_t
+#define bustype_BDK_RST_BIST_TIMER BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_BIST_TIMER "RST_BIST_TIMER"
+#define device_bar_BDK_RST_BIST_TIMER 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_BIST_TIMER 0
+#define arguments_BDK_RST_BIST_TIMER -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_boot
+ *
+ * RST Boot Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_boot
+{
+ uint64_t u;
+ struct bdk_rst_boot_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A 0-to-1 transition of CHIPKILL starts the CHIPKILL timer. When CHIPKILL=1 and the timer
+ expires, chip reset is asserted internally. The CHIPKILL timer can be stopped only by
+ a reset (cold, warm, soft). The length of the CHIPKILL timer is specified by
+ RST_CKILL[TIMER]. This feature is effectively a delayed warm reset. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO) When set, chip is operating as a trusted device. This bit is asserted when
+ either MIO_FUS_DAT2[TRUSTZONE_EN], FUSF_CTL[TZ_FORCE2], or the trusted-mode
+ strap GPIO_STRAP\<10\> are set. */
+ uint64_t ckill_ppdis : 1; /**< [ 59: 59](R/W) Chipkill core disable. When set to 1, cores other than core 0 will immediately
+ be disabled when RST_BOOT[CHIPKILL] is set. Writes have no effect when
+ RST_BOOT[CHIPKILL]=1. */
+ uint64_t jt_tstmode : 1; /**< [ 58: 58](RO) JTAG test mode. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](RO) Reserved. */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to (!trusted_mode && FUSF_CTL[FJ_DIS_HUK]).
+
+ Software must write a one to this bit when the chain of trust is broken. */
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When written to 1, and FUSF_CTL[ROT_LCK] = 1, reads as 1 and scan is not
+ allowed in the part.
+ This state persists across soft and warm resets.
+
+ Internal:
+ This state will persist across a simulation */
+ uint64_t mcp_jtagdis : 1; /**< [ 54: 54](R/W/H) MCP JTAG debugger disable. When set, the MCP Debug interface of
+ the EJTAG TAP controller will be disabled. This field does not
+ control the SCP EJTAG interface (See EJTAGDIS).
+ This field resets to one in trusted mode otherwise it is cleared.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t gpio_ejtag : 1; /**< [ 53: 53](R/W/H) Use GPIO pins for EJTAG. When set, the EJTAG chain consisting
+ of MCP and SCP devices is routed directly to GPIO pins. When
+ cleared these devices are included in the standard JTAG chain.
+ The specific GPIO pins are selected with GPIO_BIT_CFG()[PIN_SEL].
+ This field is reinitialized with a cold domain reset.
+ Reset value is determined by GPIO strap pin number
+ GPIO_STRAP_PIN_E::MCP_DBG_ON_GPIO. */
+ uint64_t reserved_47_52 : 6;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Core-clock multiplier. [C_MUL] = (core-clock speed) / (ref-clock speed). The value
+ ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [C_MUL] is set from the pi_pll_mul pins plus 6 and is limited by a set of
+ fuses[127:123]. If the fuse value is \> 0, it is compared with the pi_pll_mul[5:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_39 : 1;
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Coprocessor-clock multiplier. [PNR_MUL] = (coprocessor-clock speed) /(ref-clock speed).
+ The value ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [PNR_MUL] is set from the pi_pnr_pll_mul pins plus 6 and is limited by a set of
+ fuses[122:119]. If the fuse value is \> 0, it is compared with the pi_pnr_pll_mul[4:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_24_32 : 9;
+ uint64_t lboot_ext45 : 6; /**< [ 23: 18](R/W1C/H) Last boot cause mask for PEM5 and PEM4; resets only with PLL_DC_OK.
+ \<23\> = Warm reset due to Cntl5 link-down or hot-reset.
+ \<22\> = Warm reset due to Cntl4 link-down or hot-reset.
+ \<21\> = Cntl5 reset due to PERST5_L pin.
+ \<20\> = Cntl4 reset due to PERST4_L pin.
+ \<19\> = Warm reset due to PERST5_L pin.
+ \<18\> = Warm reset due to PERST4_L pin. */
+ uint64_t lboot_ext23 : 6; /**< [ 17: 12](R/W1C/H) Last boot cause mask for PEM3 and PEM2; resets only with PLL_DC_OK.
+ \<17\> = Warm reset due to Cntl3 link-down or hot-reset.
+ \<16\> = Warm reset due to Cntl2 link-down or hot-reset.
+ \<15\> = Cntl3 reset due to PERST3_L pin.
+ \<14\> = Cntl2 reset due to PERST2_L pin.
+ \<13\> = Warm reset due to PERST3_L pin.
+ \<12\> = Warm reset due to PERST2_L pin. */
+ uint64_t lboot : 10; /**< [ 11: 2](R/W1C/H) Last boot cause mask for PEM1 and PEM0; resets only with PLL_DC_OK.
+ \<11\> = Soft reset due to watchdog.
+ \<10\> = Soft reset due to RST_SOFT_RST write.
+ \<9\> = Warm reset due to Cntl1 link-down or hot-reset.
+ \<8\> = Warm reset due to Cntl0 link-down or hot-reset.
+ \<7\> = Cntl1 reset due to PERST1_L pin.
+ \<6\> = Cntl0 reset due to PERST0_L pin.
+ \<5\> = Warm reset due to PERST1_L pin.
+ \<4\> = Warm reset due to PERST0_L pin.
+ \<3\> = Warm reset due to CHIP_RESET_L pin.
+ \<2\> = Cold reset due to PLL_DC_OK pin. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W) Remote boot. If set, indicates that core 0 will remain in reset after a
+ chip warm/soft reset. The initial value mimics the setting of the [RBOOT_PIN]. */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO/H) Remote Boot strap. Indicates the state of remote boot as initially determined by
+ GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE. If set core 0 will remain in reset
+ for the cold reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO/H) Remote Boot strap. Indicates the state of remote boot as initially determined by
+ GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE. If set core 0 will remain in reset
+ for the cold reset. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W) Remote boot. If set, indicates that core 0 will remain in reset after a
+ chip warm/soft reset. The initial value mimics the setting of the [RBOOT_PIN]. */
+ uint64_t lboot : 10; /**< [ 11: 2](R/W1C/H) Last boot cause mask for PEM1 and PEM0; resets only with PLL_DC_OK.
+ \<11\> = Soft reset due to watchdog.
+ \<10\> = Soft reset due to RST_SOFT_RST write.
+ \<9\> = Warm reset due to Cntl1 link-down or hot-reset.
+ \<8\> = Warm reset due to Cntl0 link-down or hot-reset.
+ \<7\> = Cntl1 reset due to PERST1_L pin.
+ \<6\> = Cntl0 reset due to PERST0_L pin.
+ \<5\> = Warm reset due to PERST1_L pin.
+ \<4\> = Warm reset due to PERST0_L pin.
+ \<3\> = Warm reset due to CHIP_RESET_L pin.
+ \<2\> = Cold reset due to PLL_DC_OK pin. */
+ uint64_t lboot_ext23 : 6; /**< [ 17: 12](R/W1C/H) Last boot cause mask for PEM3 and PEM2; resets only with PLL_DC_OK.
+ \<17\> = Warm reset due to Cntl3 link-down or hot-reset.
+ \<16\> = Warm reset due to Cntl2 link-down or hot-reset.
+ \<15\> = Cntl3 reset due to PERST3_L pin.
+ \<14\> = Cntl2 reset due to PERST2_L pin.
+ \<13\> = Warm reset due to PERST3_L pin.
+ \<12\> = Warm reset due to PERST2_L pin. */
+ uint64_t lboot_ext45 : 6; /**< [ 23: 18](R/W1C/H) Last boot cause mask for PEM5 and PEM4; resets only with PLL_DC_OK.
+ \<23\> = Warm reset due to Cntl5 link-down or hot-reset.
+ \<22\> = Warm reset due to Cntl4 link-down or hot-reset.
+ \<21\> = Cntl5 reset due to PERST5_L pin.
+ \<20\> = Cntl4 reset due to PERST4_L pin.
+ \<19\> = Warm reset due to PERST5_L pin.
+ \<18\> = Warm reset due to PERST4_L pin. */
+ uint64_t reserved_24_32 : 9;
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Coprocessor-clock multiplier. [PNR_MUL] = (coprocessor-clock speed) /(ref-clock speed).
+ The value ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [PNR_MUL] is set from the pi_pnr_pll_mul pins plus 6 and is limited by a set of
+ fuses[122:119]. If the fuse value is \> 0, it is compared with the pi_pnr_pll_mul[4:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_39 : 1;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Core-clock multiplier. [C_MUL] = (core-clock speed) / (ref-clock speed). The value
+ ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [C_MUL] is set from the pi_pll_mul pins plus 6 and is limited by a set of
+ fuses[127:123]. If the fuse value is \> 0, it is compared with the pi_pll_mul[5:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_47_52 : 6;
+ uint64_t gpio_ejtag : 1; /**< [ 53: 53](R/W/H) Use GPIO pins for EJTAG. When set, the EJTAG chain consisting
+ of MCP and SCP devices is routed directly to GPIO pins. When
+ cleared these devices are included in the standard JTAG chain.
+ The specific GPIO pins are selected with GPIO_BIT_CFG()[PIN_SEL].
+ This field is reinitialized with a cold domain reset.
+ Reset value is determined by GPIO strap pin number
+ GPIO_STRAP_PIN_E::MCP_DBG_ON_GPIO. */
+ uint64_t mcp_jtagdis : 1; /**< [ 54: 54](R/W/H) MCP JTAG debugger disable. When set, the MCP Debug interface of
+ the EJTAG TAP controller will be disabled. This field does not
+ control the SCP EJTAG interface (See EJTAGDIS).
+ This field resets to one in trusted mode otherwise it is cleared.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When written to 1, and FUSF_CTL[ROT_LCK] = 1, reads as 1 and scan is not
+ allowed in the part.
+ This state persists across soft and warm resets.
+
+ Internal:
+ This state will persist across a simulation */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to (!trusted_mode && FUSF_CTL[FJ_DIS_HUK]).
+
+ Software must write a one to this bit when the chain of trust is broken. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](RO) Reserved. */
+ uint64_t jt_tstmode : 1; /**< [ 58: 58](RO) JTAG test mode. */
+ uint64_t ckill_ppdis : 1; /**< [ 59: 59](R/W) Chipkill core disable. When set to 1, cores other than core 0 will immediately
+ be disabled when RST_BOOT[CHIPKILL] is set. Writes have no effect when
+ RST_BOOT[CHIPKILL]=1. */
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO) When set, chip is operating as a trusted device. This bit is asserted when
+ either MIO_FUS_DAT2[TRUSTZONE_EN], FUSF_CTL[TZ_FORCE2], or the trusted-mode
+ strap GPIO_STRAP\<10\> are set. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A 0-to-1 transition of CHIPKILL starts the CHIPKILL timer. When CHIPKILL=1 and the timer
+ expires, chip reset is asserted internally. The CHIPKILL timer can be stopped only by
+ a reset (cold, warm, soft). The length of the CHIPKILL timer is specified by
+ RST_CKILL[TIMER]. This feature is effectively a delayed warm reset. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_boot_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A zero-to-one transition of CHIPKILL starts the CHIPKILL timer. When set and the timer
+ expires, chip domain reset is asserted.
+ The length of the CHIPKILL timer is specified by RST_CKILL[TIMER].
+ This feature is effectively a delayed reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t jtagdis : 1; /**< [ 62: 62](R/W/H) JTAG access disable. When set, the debug access port of the
+ JTAG TAP controller will be disabled, i.e. DAP_IMP_DAR will be zero.
+ This field resets to one in trusted mode otherwise it is cleared.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t scp_jtagdis : 1; /**< [ 61: 61](R/W/H) SCP JTAG debugger disable. When set, the SCP debug interface of
+ the EJTAG TAP controller will be disabled. This field does not
+ control the MCP EJTAG interface (See [MCP_JTAGDIS]).
+ This field resets to one in trusted mode otherwise it is cleared.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO/H) When set, chip is operating as a trusted device. This bit is asserted when
+ either FUSF_CTL[TZ_FORCE2], or the trusted mode strap on GPIO number
+ GPIO_STRAP_PIN_E::TRUSTED_MODE is set. */
+ uint64_t reserved_58_59 : 2;
+ uint64_t vrm_err : 1; /**< [ 57: 57](R/W1) Reserved. */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set, FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to one if FUSF_CTL[FJ_DIS_HUK] is set and not in trusted mode.
+ It is also set anytime scan mode is activated while FUSF_CTL[FJ_DIS_HUK] is set.
+ Software must set this bit when the chain of trust is broken.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When set and FUSF_CTL[ROT_LCK] = 1, scan is not
+ allowed in the part.
+ This field is reinitialized with a cold domain reset.
+
+ Internal:
+ The field is actually reset only after DCOK has been left
+ deasserted for an extended period of time. */
+ uint64_t mcp_jtagdis : 1; /**< [ 54: 54](R/W/H) MCP JTAG debugger disable. When set, the MCP Debug interface of
+ the EJTAG TAP controller will be disabled. This field does not
+ control the SCP EJTAG interface (See EJTAGDIS).
+ This field resets to one in trusted mode otherwise it is cleared.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t gpio_ejtag : 1; /**< [ 53: 53](R/W/H) Use GPIO pins for EJTAG. When set, the EJTAG chain consisting
+ of MCP and SCP devices is routed directly to GPIO pins. When
+ cleared these devices are included in the standard JTAG chain.
+ The specific GPIO pins are selected with GPIO_BIT_CFG()[PIN_SEL].
+ This field is reinitialized with a cold domain reset.
+ Reset value is determined by GPIO strap pin number
+ GPIO_STRAP_PIN_E::MCP_DBG_ON_GPIO. */
+ uint64_t reserved_47_52 : 6;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Current core-clock multiplier. Clock frequency = [C_MUL] * 50 MHz.
+ See RST_CORE_PLL for details on programming and initial values.
+
+ Internal:
+ [C_MUL] is a copy of RST_CORE_PLL[CUR_MUL]. */
+ uint64_t reserved_39 : 1;
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Current coprocessor-clock multiplier. Clock frequency = [PNR_MUL] * 50 MHz.
+ See RST_PNR_PLL for details on programming and initial values.
+
+ Internal:
+ [PNR_MUL] is a copy of RST_PNR_PLL[CUR_MUL]. */
+ uint64_t reserved_31_32 : 2;
+ uint64_t cpt_mul : 7; /**< [ 30: 24](RO/H) Current crypto-clock multiplier. Clock frequency = [CPT_MUL] * 50 MHz.
+ See RST_CPT_PLL for details on programming and initial values.
+
+ Internal:
+ [CPT_MUL] is a copy of RST_CPT_PLL[CUR_MUL]. */
+ uint64_t reserved_2_23 : 22;
+ uint64_t rboot : 1; /**< [ 1: 1](R/W/H) Remote boot. If set, indicates that SCP will require a write to
+ RST_SCP_DOMAIN_W1C to bring it out of reset. Otherwise it
+ will automatically come out of reset once the reset source has
+ been deasserted.
+ The initial value is set when [RBOOT_PIN] is true and
+ trustzone has not been enabled.
+ This field is reinitialized with a cold domain reset.
+
+ Internal:
+ This field is cleared when jtg__rst_disable_remote is active. */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO) Remote boot strap. The value is set when primary boot method is RST_BOOT_METHOD_E::REMOTE
+ when the GPIO pins are sampled on the rising edge of PLL_DCOK. */
+#else /* Word 0 - Little Endian */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO) Remote boot strap. The value is set when primary boot method is RST_BOOT_METHOD_E::REMOTE
+ when the GPIO pins are sampled on the rising edge of PLL_DCOK. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W/H) Remote boot. If set, indicates that SCP will require a write to
+ RST_SCP_DOMAIN_W1C to bring it out of reset. Otherwise it
+ will automatically come out of reset once the reset source has
+ been deasserted.
+ The initial value is set when [RBOOT_PIN] is true and
+ trustzone has not been enabled.
+ This field is reinitialized with a cold domain reset.
+
+ Internal:
+ This field is cleared when jtg__rst_disable_remote is active. */
+ uint64_t reserved_2_23 : 22;
+ uint64_t cpt_mul : 7; /**< [ 30: 24](RO/H) Current crypto-clock multiplier. Clock frequency = [CPT_MUL] * 50 MHz.
+ See RST_CPT_PLL for details on programming and initial values.
+
+ Internal:
+ [CPT_MUL] is a copy of RST_CPT_PLL[CUR_MUL]. */
+ uint64_t reserved_31_32 : 2;
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Current coprocessor-clock multiplier. Clock frequency = [PNR_MUL] * 50 MHz.
+ See RST_PNR_PLL for details on programming and initial values.
+
+ Internal:
+ [PNR_MUL] is a copy of RST_PNR_PLL[CUR_MUL]. */
+ uint64_t reserved_39 : 1;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Current core-clock multiplier. Clock frequency = [C_MUL] * 50 MHz.
+ See RST_CORE_PLL for details on programming and initial values.
+
+ Internal:
+ [C_MUL] is a copy of RST_CORE_PLL[CUR_MUL]. */
+ uint64_t reserved_47_52 : 6;
+ uint64_t gpio_ejtag : 1; /**< [ 53: 53](R/W/H) Use GPIO pins for EJTAG. When set, the EJTAG chain consisting
+ of MCP and SCP devices is routed directly to GPIO pins. When
+ cleared these devices are included in the standard JTAG chain.
+ The specific GPIO pins are selected with GPIO_BIT_CFG()[PIN_SEL].
+ This field is reinitialized with a cold domain reset.
+ Reset value is determined by GPIO strap pin number
+ GPIO_STRAP_PIN_E::MCP_DBG_ON_GPIO. */
+ uint64_t mcp_jtagdis : 1; /**< [ 54: 54](R/W/H) MCP JTAG debugger disable. When set, the MCP Debug interface of
+ the EJTAG TAP controller will be disabled. This field does not
+ control the SCP EJTAG interface (See EJTAGDIS).
+ This field resets to one in trusted mode otherwise it is cleared.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When set and FUSF_CTL[ROT_LCK] = 1, scan is not
+ allowed in the part.
+ This field is reinitialized with a cold domain reset.
+
+ Internal:
+ The field is actually reset only after DCOK has been left
+ deasserted for an extended period of time. */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set, FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to one if FUSF_CTL[FJ_DIS_HUK] is set and not in trusted mode.
+ It is also set anytime scan mode is activated while FUSF_CTL[FJ_DIS_HUK] is set.
+ Software must set this bit when the chain of trust is broken.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](R/W1) Reserved. */
+ uint64_t reserved_58_59 : 2;
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO/H) When set, chip is operating as a trusted device. This bit is asserted when
+ either FUSF_CTL[TZ_FORCE2], or the trusted mode strap on GPIO number
+ GPIO_STRAP_PIN_E::TRUSTED_MODE is set. */
+ uint64_t scp_jtagdis : 1; /**< [ 61: 61](R/W/H) SCP JTAG debugger disable. When set, the SCP debug interface of
+ the EJTAG TAP controller will be disabled. This field does not
+ control the MCP EJTAG interface (See [MCP_JTAGDIS]).
+ This field resets to one in trusted mode otherwise it is cleared.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t jtagdis : 1; /**< [ 62: 62](R/W/H) JTAG access disable. When set, the debug access port of the
+ JTAG TAP controller will be disabled, i.e. DAP_IMP_DAR will be zero.
+ This field resets to one in trusted mode otherwise it is cleared.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A zero-to-one transition of CHIPKILL starts the CHIPKILL timer. When set and the timer
+ expires, chip domain reset is asserted.
+ The length of the CHIPKILL timer is specified by RST_CKILL[TIMER].
+ This feature is effectively a delayed reset.
+ This field is reinitialized with a chip domain reset. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_boot_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A 0-to-1 transition of CHIPKILL starts the CHIPKILL timer. When CHIPKILL=1 and the timer
+ expires, chip reset is asserted internally. The CHIPKILL timer can be stopped only by
+ a reset (cold, warm, soft). The length of the CHIPKILL timer is specified by
+ RST_CKILL[TIMER]. This feature is effectively a delayed warm reset. */
+ uint64_t jtcsrdis : 1; /**< [ 62: 62](R/W) JTAG CSR disable. When set to 1, during the next warm or soft reset the JTAG TAP
+ controller will be disabled, i.e. DAP_IMP_DAR will be 0. This field resets to 1
+ in trusted-mode, else 0. */
+ uint64_t ejtagdis : 1; /**< [ 61: 61](R/W) Reserved. */
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO) When set, chip is operating as a trusted device. This bit is asserted when
+ either MIO_FUS_DAT2[TRUSTZONE_EN], FUSF_CTL[TZ_FORCE2], or the trusted-mode
+ strap GPIO_STRAP\<10\> are set. */
+ uint64_t ckill_ppdis : 1; /**< [ 59: 59](R/W) Chipkill core disable. When set to 1, cores other than core 0 will immediately
+ be disabled when RST_BOOT[CHIPKILL] is set. Writes have no effect when
+ RST_BOOT[CHIPKILL]=1. */
+ uint64_t jt_tstmode : 1; /**< [ 58: 58](RO) JTAG test mode. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](RO) Reserved. */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to (!trusted_mode && FUSF_CTL[FJ_DIS_HUK]).
+
+ Software must write a one to this bit when the chain of trust is broken. */
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When written to 1, and FUSF_CTL[ROT_LCK] = 1, reads as 1 and scan is not
+ allowed in the part.
+ This state persists across soft and warm resets.
+
+ Internal:
+ This state will persist across a simulation */
+ uint64_t reserved_47_54 : 8;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Core-clock multiplier. [C_MUL] = (core-clock speed) / (ref-clock speed). The value
+ ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [C_MUL] is set from the pi_pll_mul pins plus 6 and is limited by a set of
+ fuses[127:123]. If the fuse value is \> 0, it is compared with the pi_pll_mul[5:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_39 : 1;
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Coprocessor-clock multiplier. [PNR_MUL] = (coprocessor-clock speed) /(ref-clock speed).
+ The value ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [PNR_MUL] is set from the pi_pnr_pll_mul pins plus 6 and is limited by a set of
+ fuses[122:119]. If the fuse value is \> 0, it is compared with the pi_pnr_pll_mul[4:1]
+ pins and the smaller value is used. */
+ uint64_t lboot_oci : 3; /**< [ 32: 30](R/W1C/H) Reserved.
+ Internal:
+ Last boot cause mask for CCPI; resets only with PLL_DC_OK.
+ \<32\> = Warm reset due to CCPI link 2 going down.
+ \<31\> = Warm reset due to CCPI link 1 going down.
+ \<30\> = Warm reset due to CCPI link 0 going down. */
+ uint64_t reserved_26_29 : 4;
+ uint64_t lboot_ckill : 1; /**< [ 25: 25](R/W1C/H) Last boot cause was chip kill timer expiring. See RST_BOOT[CHIPKILL]. */
+ uint64_t lboot_jtg : 1; /**< [ 24: 24](R/W1C/H) Last boot cause was write to JTG reset. */
+ uint64_t lboot_ext45 : 6; /**< [ 23: 18](R/W1C/H) Reserved. */
+ uint64_t lboot_ext23 : 6; /**< [ 17: 12](R/W1C/H) Last boot cause mask for PEM2; resets only with PLL_DC_OK.
+ \<17\> = Reserved
+ \<16\> = Warm reset due to Cntl2 link-down or hot-reset.
+ \<15\> = Reserved
+ \<14\> = Cntl2 reset due to PERST2_L pin.
+ \<13\> = Reserved
+ \<12\> = Warm reset due to PERST2_L pin. */
+ uint64_t lboot : 10; /**< [ 11: 2](R/W1C/H) Last boot cause mask for PEM1 and PEM0; resets only with PLL_DC_OK.
+ \<11\> = Soft reset due to watchdog.
+ \<10\> = Soft reset due to RST_SOFT_RST write.
+ \<9\> = Warm reset due to Cntl1 link-down or hot-reset.
+ \<8\> = Warm reset due to Cntl0 link-down or hot-reset.
+ \<7\> = Cntl1 reset due to PERST1_L pin.
+ \<6\> = Cntl0 reset due to PERST0_L pin.
+ \<5\> = Warm reset due to PERST1_L pin.
+ \<4\> = Warm reset due to PERST0_L pin.
+ \<3\> = Warm reset due to CHIP_RESET_L pin.
+ \<2\> = Cold reset due to PLL_DC_OK pin. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W) Remote boot. If set, indicates that core 0 will remain in reset after a
+ chip warm/soft reset. The initial value mimics the setting of the [RBOOT_PIN]. */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO/H) Remote Boot strap. Indicates the state of remote boot as initially determined by
+ GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE. If set core 0 will remain in reset
+ for the cold reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO/H) Remote Boot strap. Indicates the state of remote boot as initially determined by
+ GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE. If set core 0 will remain in reset
+ for the cold reset. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W) Remote boot. If set, indicates that core 0 will remain in reset after a
+ chip warm/soft reset. The initial value mimics the setting of the [RBOOT_PIN]. */
+ uint64_t lboot : 10; /**< [ 11: 2](R/W1C/H) Last boot cause mask for PEM1 and PEM0; resets only with PLL_DC_OK.
+ \<11\> = Soft reset due to watchdog.
+ \<10\> = Soft reset due to RST_SOFT_RST write.
+ \<9\> = Warm reset due to Cntl1 link-down or hot-reset.
+ \<8\> = Warm reset due to Cntl0 link-down or hot-reset.
+ \<7\> = Cntl1 reset due to PERST1_L pin.
+ \<6\> = Cntl0 reset due to PERST0_L pin.
+ \<5\> = Warm reset due to PERST1_L pin.
+ \<4\> = Warm reset due to PERST0_L pin.
+ \<3\> = Warm reset due to CHIP_RESET_L pin.
+ \<2\> = Cold reset due to PLL_DC_OK pin. */
+ uint64_t lboot_ext23 : 6; /**< [ 17: 12](R/W1C/H) Last boot cause mask for PEM2; resets only with PLL_DC_OK.
+ \<17\> = Reserved
+ \<16\> = Warm reset due to Cntl2 link-down or hot-reset.
+ \<15\> = Reserved
+ \<14\> = Cntl2 reset due to PERST2_L pin.
+ \<13\> = Reserved
+ \<12\> = Warm reset due to PERST2_L pin. */
+ uint64_t lboot_ext45 : 6; /**< [ 23: 18](R/W1C/H) Reserved. */
+ uint64_t lboot_jtg : 1; /**< [ 24: 24](R/W1C/H) Last boot cause was write to JTG reset. */
+ uint64_t lboot_ckill : 1; /**< [ 25: 25](R/W1C/H) Last boot cause was chip kill timer expiring. See RST_BOOT[CHIPKILL]. */
+ uint64_t reserved_26_29 : 4;
+ uint64_t lboot_oci : 3; /**< [ 32: 30](R/W1C/H) Reserved.
+ Internal:
+ Last boot cause mask for CCPI; resets only with PLL_DC_OK.
+ \<32\> = Warm reset due to CCPI link 2 going down.
+ \<31\> = Warm reset due to CCPI link 1 going down.
+ \<30\> = Warm reset due to CCPI link 0 going down. */
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Coprocessor-clock multiplier. [PNR_MUL] = (coprocessor-clock speed) /(ref-clock speed).
+ The value ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [PNR_MUL] is set from the pi_pnr_pll_mul pins plus 6 and is limited by a set of
+ fuses[122:119]. If the fuse value is \> 0, it is compared with the pi_pnr_pll_mul[4:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_39 : 1;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Core-clock multiplier. [C_MUL] = (core-clock speed) / (ref-clock speed). The value
+ ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [C_MUL] is set from the pi_pll_mul pins plus 6 and is limited by a set of
+ fuses[127:123]. If the fuse value is \> 0, it is compared with the pi_pll_mul[5:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_47_54 : 8;
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When written to 1, and FUSF_CTL[ROT_LCK] = 1, reads as 1 and scan is not
+ allowed in the part.
+ This state persists across soft and warm resets.
+
+ Internal:
+ This state will persist across a simulation */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to (!trusted_mode && FUSF_CTL[FJ_DIS_HUK]).
+
+ Software must write a one to this bit when the chain of trust is broken. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](RO) Reserved. */
+ uint64_t jt_tstmode : 1; /**< [ 58: 58](RO) JTAG test mode. */
+ uint64_t ckill_ppdis : 1; /**< [ 59: 59](R/W) Chipkill core disable. When set to 1, cores other than core 0 will immediately
+ be disabled when RST_BOOT[CHIPKILL] is set. Writes have no effect when
+ RST_BOOT[CHIPKILL]=1. */
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO) When set, chip is operating as a trusted device. This bit is asserted when
+ either MIO_FUS_DAT2[TRUSTZONE_EN], FUSF_CTL[TZ_FORCE2], or the trusted-mode
+ strap GPIO_STRAP\<10\> are set. */
+ uint64_t ejtagdis : 1; /**< [ 61: 61](R/W) Reserved. */
+ uint64_t jtcsrdis : 1; /**< [ 62: 62](R/W) JTAG CSR disable. When set to 1, during the next warm or soft reset the JTAG TAP
+ controller will be disabled, i.e. DAP_IMP_DAR will be 0. This field resets to 1
+ in trusted-mode, else 0. */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A 0-to-1 transition of CHIPKILL starts the CHIPKILL timer. When CHIPKILL=1 and the timer
+ expires, chip reset is asserted internally. The CHIPKILL timer can be stopped only by
+ a reset (cold, warm, soft). The length of the CHIPKILL timer is specified by
+ RST_CKILL[TIMER]. This feature is effectively a delayed warm reset. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rst_boot_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A 0-to-1 transition of CHIPKILL starts the CHIPKILL timer. When CHIPKILL=1 and the timer
+ expires, chip reset is asserted internally. The CHIPKILL timer can be stopped only by
+ a reset (cold, warm, soft). The length of the CHIPKILL timer is specified by
+ RST_CKILL[TIMER]. This feature is effectively a delayed warm reset. */
+ uint64_t jtcsrdis : 1; /**< [ 62: 62](R/W) JTAG CSR disable. When set to 1, during the next warm or soft reset the JTAG TAP
+ controller will be disabled, i.e. DAP_IMP_DAR will be 0. This field resets to 1
+ in trusted-mode, else 0. */
+ uint64_t ejtagdis : 1; /**< [ 61: 61](R/W) Reserved. */
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO) When set, chip is operating as a trusted device. This bit is asserted when
+ either MIO_FUS_DAT2[TRUSTZONE_EN], FUSF_CTL[TZ_FORCE2], or the trusted-mode
+ strap GPIO_STRAP\<10\> are set. */
+ uint64_t ckill_ppdis : 1; /**< [ 59: 59](R/W) Chipkill core disable. When set to 1, cores other than core 0 will immediately
+ be disabled when RST_BOOT[CHIPKILL] is set. Writes have no effect when
+ RST_BOOT[CHIPKILL]=1. */
+ uint64_t jt_tstmode : 1; /**< [ 58: 58](RO) JTAG test mode. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](RO) VRM error. VRM did not complete operations within 5.25ms of PLL_DC_OK being
+ asserted. PLLs were released automatically. */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to (!trusted_mode && FUSF_CTL[FJ_DIS_HUK]).
+
+ Software must write a one to this bit when the chain of trust is broken. */
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When written to 1, and FUSF_CTL[ROT_LCK] = 1, reads as 1 and scan is not
+ allowed in the part.
+ This state persists across soft and warm resets.
+
+ Internal:
+ This state will persist across a simulation */
+ uint64_t reserved_47_54 : 8;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Core-clock multiplier. [C_MUL] = (core-clock speed) / (ref-clock speed). The value
+ ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [C_MUL] is set from the pi_pll_mul pins plus 6 and is limited by a set of
+ fuses[127:123]. If the fuse value is \> 0, it is compared with the pi_pll_mul[5:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_39 : 1;
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Coprocessor-clock multiplier. [PNR_MUL] = (coprocessor-clock speed) /(ref-clock speed).
+ The value ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [PNR_MUL] is set from the pi_pnr_pll_mul pins plus 6 and is limited by a set of
+ fuses[122:119]. If the fuse value is \> 0, it is compared with the pi_pnr_pll_mul[4:1]
+ pins and the smaller value is used. */
+ uint64_t lboot_oci : 3; /**< [ 32: 30](R/W1C/H) Last boot cause mask for CCPI; resets only with PLL_DC_OK.
+ \<32\> = Warm reset due to CCPI link 2 going down.
+ \<31\> = Warm reset due to CCPI link 1 going down.
+ \<30\> = Warm reset due to CCPI link 0 going down. */
+ uint64_t reserved_26_29 : 4;
+ uint64_t reserved_24_25 : 2;
+ uint64_t lboot_ext45 : 6; /**< [ 23: 18](R/W1C/H) Last boot cause mask for PEM5 and PEM4; resets only with PLL_DC_OK.
+ \<23\> = Warm reset due to Cntl5 link-down or hot-reset.
+ \<22\> = Warm reset due to Cntl4 link-down or hot-reset.
+ \<21\> = Cntl5 reset due to PERST5_L pin.
+ \<20\> = Cntl4 reset due to PERST4_L pin.
+ \<19\> = Warm reset due to PERST5_L pin.
+ \<18\> = Warm reset due to PERST4_L pin. */
+ uint64_t lboot_ext23 : 6; /**< [ 17: 12](R/W1C/H) Last boot cause mask for PEM3 and PEM2; resets only with PLL_DC_OK.
+ \<17\> = Warm reset due to Cntl3 link-down or hot-reset.
+ \<16\> = Warm reset due to Cntl2 link-down or hot-reset.
+ \<15\> = Cntl3 reset due to PERST3_L pin.
+ \<14\> = Cntl2 reset due to PERST2_L pin.
+ \<13\> = Warm reset due to PERST3_L pin.
+ \<12\> = Warm reset due to PERST2_L pin. */
+ uint64_t lboot : 10; /**< [ 11: 2](R/W1C/H) Last boot cause mask for PEM1 and PEM0; resets only with PLL_DC_OK.
+ \<11\> = Soft reset due to watchdog.
+ \<10\> = Soft reset due to RST_SOFT_RST write.
+ \<9\> = Warm reset due to Cntl1 link-down or hot-reset.
+ \<8\> = Warm reset due to Cntl0 link-down or hot-reset.
+ \<7\> = Cntl1 reset due to PERST1_L pin.
+ \<6\> = Cntl0 reset due to PERST0_L pin.
+ \<5\> = Warm reset due to PERST1_L pin.
+ \<4\> = Warm reset due to PERST0_L pin.
+ \<3\> = Warm reset due to CHIP_RESET_L pin.
+ \<2\> = Cold reset due to PLL_DC_OK pin. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W) Remote boot. If set, indicates that core 0 will remain in reset after a
+ chip warm/soft reset. The initial value mimics the setting of the [RBOOT_PIN]. */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO/H) Remote Boot strap. Indicates the state of remote boot as initially determined by
+ GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE. If set core 0 will remain in reset
+ for the cold reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO/H) Remote Boot strap. Indicates the state of remote boot as initially determined by
+ GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE. If set core 0 will remain in reset
+ for the cold reset. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W) Remote boot. If set, indicates that core 0 will remain in reset after a
+ chip warm/soft reset. The initial value mimics the setting of the [RBOOT_PIN]. */
+ uint64_t lboot : 10; /**< [ 11: 2](R/W1C/H) Last boot cause mask for PEM1 and PEM0; resets only with PLL_DC_OK.
+ \<11\> = Soft reset due to watchdog.
+ \<10\> = Soft reset due to RST_SOFT_RST write.
+ \<9\> = Warm reset due to Cntl1 link-down or hot-reset.
+ \<8\> = Warm reset due to Cntl0 link-down or hot-reset.
+ \<7\> = Cntl1 reset due to PERST1_L pin.
+ \<6\> = Cntl0 reset due to PERST0_L pin.
+ \<5\> = Warm reset due to PERST1_L pin.
+ \<4\> = Warm reset due to PERST0_L pin.
+ \<3\> = Warm reset due to CHIP_RESET_L pin.
+ \<2\> = Cold reset due to PLL_DC_OK pin. */
+ uint64_t lboot_ext23 : 6; /**< [ 17: 12](R/W1C/H) Last boot cause mask for PEM3 and PEM2; resets only with PLL_DC_OK.
+ \<17\> = Warm reset due to Cntl3 link-down or hot-reset.
+ \<16\> = Warm reset due to Cntl2 link-down or hot-reset.
+ \<15\> = Cntl3 reset due to PERST3_L pin.
+ \<14\> = Cntl2 reset due to PERST2_L pin.
+ \<13\> = Warm reset due to PERST3_L pin.
+ \<12\> = Warm reset due to PERST2_L pin. */
+ uint64_t lboot_ext45 : 6; /**< [ 23: 18](R/W1C/H) Last boot cause mask for PEM5 and PEM4; resets only with PLL_DC_OK.
+ \<23\> = Warm reset due to Cntl5 link-down or hot-reset.
+ \<22\> = Warm reset due to Cntl4 link-down or hot-reset.
+ \<21\> = Cntl5 reset due to PERST5_L pin.
+ \<20\> = Cntl4 reset due to PERST4_L pin.
+ \<19\> = Warm reset due to PERST5_L pin.
+ \<18\> = Warm reset due to PERST4_L pin. */
+ uint64_t reserved_24_25 : 2;
+ uint64_t reserved_26_29 : 4;
+ uint64_t lboot_oci : 3; /**< [ 32: 30](R/W1C/H) Last boot cause mask for CCPI; resets only with PLL_DC_OK.
+ \<32\> = Warm reset due to CCPI link 2 going down.
+ \<31\> = Warm reset due to CCPI link 1 going down.
+ \<30\> = Warm reset due to CCPI link 0 going down. */
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Coprocessor-clock multiplier. [PNR_MUL] = (coprocessor-clock speed) /(ref-clock speed).
+ The value ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [PNR_MUL] is set from the pi_pnr_pll_mul pins plus 6 and is limited by a set of
+ fuses[122:119]. If the fuse value is \> 0, it is compared with the pi_pnr_pll_mul[4:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_39 : 1;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Core-clock multiplier. [C_MUL] = (core-clock speed) / (ref-clock speed). The value
+ ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [C_MUL] is set from the pi_pll_mul pins plus 6 and is limited by a set of
+ fuses[127:123]. If the fuse value is \> 0, it is compared with the pi_pll_mul[5:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_47_54 : 8;
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When written to 1, and FUSF_CTL[ROT_LCK] = 1, reads as 1 and scan is not
+ allowed in the part.
+ This state persists across soft and warm resets.
+
+ Internal:
+ This state will persist across a simulation */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to (!trusted_mode && FUSF_CTL[FJ_DIS_HUK]).
+
+ Software must write a one to this bit when the chain of trust is broken. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](RO) VRM error. VRM did not complete operations within 5.25ms of PLL_DC_OK being
+ asserted. PLLs were released automatically. */
+ uint64_t jt_tstmode : 1; /**< [ 58: 58](RO) JTAG test mode. */
+ uint64_t ckill_ppdis : 1; /**< [ 59: 59](R/W) Chipkill core disable. When set to 1, cores other than core 0 will immediately
+ be disabled when RST_BOOT[CHIPKILL] is set. Writes have no effect when
+ RST_BOOT[CHIPKILL]=1. */
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO) When set, chip is operating as a trusted device. This bit is asserted when
+ either MIO_FUS_DAT2[TRUSTZONE_EN], FUSF_CTL[TZ_FORCE2], or the trusted-mode
+ strap GPIO_STRAP\<10\> are set. */
+ uint64_t ejtagdis : 1; /**< [ 61: 61](R/W) Reserved. */
+ uint64_t jtcsrdis : 1; /**< [ 62: 62](R/W) JTAG CSR disable. When set to 1, during the next warm or soft reset the JTAG TAP
+ controller will be disabled, i.e. DAP_IMP_DAR will be 0. This field resets to 1
+ in trusted-mode, else 0. */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A 0-to-1 transition of CHIPKILL starts the CHIPKILL timer. When CHIPKILL=1 and the timer
+ expires, chip reset is asserted internally. The CHIPKILL timer can be stopped only by
+ a reset (cold, warm, soft). The length of the CHIPKILL timer is specified by
+ RST_CKILL[TIMER]. This feature is effectively a delayed warm reset. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_rst_boot_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A 0-to-1 transition of CHIPKILL starts the CHIPKILL timer. When CHIPKILL=1 and the timer
+ expires, chip reset is asserted internally. The CHIPKILL timer can be stopped only by
+ a reset (cold, warm, soft). The length of the CHIPKILL timer is specified by
+ RST_CKILL[TIMER]. This feature is effectively a delayed warm reset. */
+ uint64_t jtcsrdis : 1; /**< [ 62: 62](R/W) JTAG CSR disable. When set to 1, during the next warm or soft reset the JTAG TAP
+ controller will be disabled, i.e. DAP_IMP_DAR will be 0. This field resets to 1
+ in trusted-mode, else 0. */
+ uint64_t ejtagdis : 1; /**< [ 61: 61](R/W) Reserved. */
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO) When set, chip is operating as a trusted device. This bit is asserted when
+ either MIO_FUS_DAT2[TRUSTZONE_EN], FUSF_CTL[TZ_FORCE2], or the trusted-mode
+ strap GPIO_STRAP\<10\> are set. */
+ uint64_t ckill_ppdis : 1; /**< [ 59: 59](R/W) Chipkill core disable. When set to 1, cores other than core 0 will immediately
+ be disabled when RST_BOOT[CHIPKILL] is set. Writes have no effect when
+ RST_BOOT[CHIPKILL]=1. */
+ uint64_t jt_tstmode : 1; /**< [ 58: 58](RO) JTAG test mode. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](RO) VRM error. VRM did not complete operations within 5.25ms of PLL_DC_OK being
+ asserted. PLLs were released automatically. */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to (!trusted_mode && FUSF_CTL[FJ_DIS_HUK]).
+
+ Software must write a one to this bit when the chain of trust is broken. */
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When written to 1, and FUSF_CTL[ROT_LCK] = 1, reads as 1 and scan is not
+ allowed in the part.
+ This state persists across soft and warm resets.
+
+ Internal:
+ This state will persist across a simulation */
+ uint64_t reserved_47_54 : 8;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Core-clock multiplier. [C_MUL] = (core-clock speed) / (ref-clock speed). The value
+ ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [C_MUL] is set from the pi_pll_mul pins plus 6 and is limited by a set of
+ fuses[127:123]. If the fuse value is \> 0, it is compared with the pi_pll_mul[5:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_39 : 1;
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Coprocessor-clock multiplier. [PNR_MUL] = (coprocessor-clock speed) /(ref-clock speed).
+ The value ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [PNR_MUL] is set from the pi_pnr_pll_mul pins plus 6 and is limited by a set of
+ fuses[122:119]. If the fuse value is \> 0, it is compared with the pi_pnr_pll_mul[4:1]
+ pins and the smaller value is used. */
+ uint64_t lboot_oci : 3; /**< [ 32: 30](R/W1C/H) Reserved.
+ Internal:
+ Last boot cause mask for CCPI; resets only with PLL_DC_OK.
+ \<32\> = Warm reset due to CCPI link 2 going down.
+ \<31\> = Warm reset due to CCPI link 1 going down.
+ \<30\> = Warm reset due to CCPI link 0 going down. */
+ uint64_t lboot_pf_flr : 4; /**< [ 29: 26](R/W1C/H) Last boot cause was caused by a PF Function Level Reset event.
+ \<29\> = Warm reset due to PF FLR on PEM3.
+ \<28\> = Warm reset due to PF FLR on PEM2.
+ \<27\> = Warm reset due to PF FLR on PEM1.
+ \<26\> = Warm reset due to PF FLR on PEM0. */
+ uint64_t lboot_ckill : 1; /**< [ 25: 25](R/W1C/H) Last boot cause was chip kill timer expiring. See RST_BOOT[CHIPKILL]. */
+ uint64_t lboot_jtg : 1; /**< [ 24: 24](R/W1C/H) Last boot cause was write to JTG reset. */
+ uint64_t lboot_ext45 : 6; /**< [ 23: 18](R/W1C/H) Reserved. */
+ uint64_t lboot_ext23 : 6; /**< [ 17: 12](R/W1C/H) Last boot cause mask for PEM3 and PEM2; resets only with PLL_DC_OK.
+ \<17\> = Warm reset due to Cntl3 link-down or hot-reset.
+ \<16\> = Warm reset due to Cntl2 link-down or hot-reset.
+ \<15\> = Cntl3 reset due to PERST3_L pin.
+ \<14\> = Cntl2 reset due to PERST2_L pin.
+ \<13\> = Warm reset due to PERST3_L pin.
+ \<12\> = Warm reset due to PERST2_L pin. */
+ uint64_t lboot : 10; /**< [ 11: 2](R/W1C/H) Last boot cause mask for PEM1 and PEM0; resets only with PLL_DC_OK.
+ \<11\> = Soft reset due to watchdog.
+ \<10\> = Soft reset due to RST_SOFT_RST write.
+ \<9\> = Warm reset due to Cntl1 link-down or hot-reset.
+ \<8\> = Warm reset due to Cntl0 link-down or hot-reset.
+ \<7\> = Cntl1 reset due to PERST1_L pin.
+ \<6\> = Cntl0 reset due to PERST0_L pin.
+ \<5\> = Warm reset due to PERST1_L pin.
+ \<4\> = Warm reset due to PERST0_L pin.
+ \<3\> = Warm reset due to CHIP_RESET_L pin.
+ \<2\> = Cold reset due to PLL_DC_OK pin. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W) Remote boot. If set, indicates that core 0 will remain in reset after a
+ chip warm/soft reset. The initial value mimics the setting of the [RBOOT_PIN]. */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO/H) Remote Boot strap. Indicates the state of remote boot as initially determined by
+ GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE. If set core 0 will remain in reset
+ for the cold reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rboot_pin : 1; /**< [ 0: 0](RO/H) Remote Boot strap. Indicates the state of remote boot as initially determined by
+ GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE. If set core 0 will remain in reset
+ for the cold reset. */
+ uint64_t rboot : 1; /**< [ 1: 1](R/W) Remote boot. If set, indicates that core 0 will remain in reset after a
+ chip warm/soft reset. The initial value mimics the setting of the [RBOOT_PIN]. */
+ uint64_t lboot : 10; /**< [ 11: 2](R/W1C/H) Last boot cause mask for PEM1 and PEM0; resets only with PLL_DC_OK.
+ \<11\> = Soft reset due to watchdog.
+ \<10\> = Soft reset due to RST_SOFT_RST write.
+ \<9\> = Warm reset due to Cntl1 link-down or hot-reset.
+ \<8\> = Warm reset due to Cntl0 link-down or hot-reset.
+ \<7\> = Cntl1 reset due to PERST1_L pin.
+ \<6\> = Cntl0 reset due to PERST0_L pin.
+ \<5\> = Warm reset due to PERST1_L pin.
+ \<4\> = Warm reset due to PERST0_L pin.
+ \<3\> = Warm reset due to CHIP_RESET_L pin.
+ \<2\> = Cold reset due to PLL_DC_OK pin. */
+ uint64_t lboot_ext23 : 6; /**< [ 17: 12](R/W1C/H) Last boot cause mask for PEM3 and PEM2; resets only with PLL_DC_OK.
+ \<17\> = Warm reset due to Cntl3 link-down or hot-reset.
+ \<16\> = Warm reset due to Cntl2 link-down or hot-reset.
+ \<15\> = Cntl3 reset due to PERST3_L pin.
+ \<14\> = Cntl2 reset due to PERST2_L pin.
+ \<13\> = Warm reset due to PERST3_L pin.
+ \<12\> = Warm reset due to PERST2_L pin. */
+ uint64_t lboot_ext45 : 6; /**< [ 23: 18](R/W1C/H) Reserved. */
+ uint64_t lboot_jtg : 1; /**< [ 24: 24](R/W1C/H) Last boot cause was write to JTG reset. */
+ uint64_t lboot_ckill : 1; /**< [ 25: 25](R/W1C/H) Last boot cause was chip kill timer expiring. See RST_BOOT[CHIPKILL]. */
+ uint64_t lboot_pf_flr : 4; /**< [ 29: 26](R/W1C/H) Last boot cause was caused by a PF Function Level Reset event.
+ \<29\> = Warm reset due to PF FLR on PEM3.
+ \<28\> = Warm reset due to PF FLR on PEM2.
+ \<27\> = Warm reset due to PF FLR on PEM1.
+ \<26\> = Warm reset due to PF FLR on PEM0. */
+ uint64_t lboot_oci : 3; /**< [ 32: 30](R/W1C/H) Reserved.
+ Internal:
+ Last boot cause mask for CCPI; resets only with PLL_DC_OK.
+ \<32\> = Warm reset due to CCPI link 2 going down.
+ \<31\> = Warm reset due to CCPI link 1 going down.
+ \<30\> = Warm reset due to CCPI link 0 going down. */
+ uint64_t pnr_mul : 6; /**< [ 38: 33](RO/H) Coprocessor-clock multiplier. [PNR_MUL] = (coprocessor-clock speed) /(ref-clock speed).
+ The value ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [PNR_MUL] is set from the pi_pnr_pll_mul pins plus 6 and is limited by a set of
+ fuses[122:119]. If the fuse value is \> 0, it is compared with the pi_pnr_pll_mul[4:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_39 : 1;
+ uint64_t c_mul : 7; /**< [ 46: 40](RO/H) Core-clock multiplier. [C_MUL] = (core-clock speed) / (ref-clock speed). The value
+ ref-clock speed should always be 50 MHz.
+
+ Internal:
+ [C_MUL] is set from the pi_pll_mul pins plus 6 and is limited by a set of
+ fuses[127:123]. If the fuse value is \> 0, it is compared with the pi_pll_mul[5:1]
+ pins and the smaller value is used. */
+ uint64_t reserved_47_54 : 8;
+ uint64_t dis_scan : 1; /**< [ 55: 55](R/W1S) Disable scan. When written to 1, and FUSF_CTL[ROT_LCK] = 1, reads as 1 and scan is not
+ allowed in the part.
+ This state persists across soft and warm resets.
+
+ Internal:
+ This state will persist across a simulation */
+ uint64_t dis_huk : 1; /**< [ 56: 56](R/W1S) Disable HUK. Secure only and W1S set-only. When set FUSF_SSK(),
+ FUSF_HUK(), FUSF_EK(), and FUSF_SW() cannot be read.
+ Resets to (!trusted_mode && FUSF_CTL[FJ_DIS_HUK]).
+
+ Software must write a one to this bit when the chain of trust is broken. */
+ uint64_t vrm_err : 1; /**< [ 57: 57](RO) VRM error. VRM did not complete operations within 5.25ms of PLL_DC_OK being
+ asserted. PLLs were released automatically. */
+ uint64_t jt_tstmode : 1; /**< [ 58: 58](RO) JTAG test mode. */
+ uint64_t ckill_ppdis : 1; /**< [ 59: 59](R/W) Chipkill core disable. When set to 1, cores other than core 0 will immediately
+ be disabled when RST_BOOT[CHIPKILL] is set. Writes have no effect when
+ RST_BOOT[CHIPKILL]=1. */
+ uint64_t trusted_mode : 1; /**< [ 60: 60](RO) When set, chip is operating as a trusted device. This bit is asserted when
+ either MIO_FUS_DAT2[TRUSTZONE_EN], FUSF_CTL[TZ_FORCE2], or the trusted-mode
+ strap GPIO_STRAP\<10\> are set. */
+ uint64_t ejtagdis : 1; /**< [ 61: 61](R/W) Reserved. */
+ uint64_t jtcsrdis : 1; /**< [ 62: 62](R/W) JTAG CSR disable. When set to 1, during the next warm or soft reset the JTAG TAP
+ controller will be disabled, i.e. DAP_IMP_DAR will be 0. This field resets to 1
+ in trusted-mode, else 0. */
+ uint64_t chipkill : 1; /**< [ 63: 63](R/W1S) A 0-to-1 transition of CHIPKILL starts the CHIPKILL timer. When CHIPKILL=1 and the timer
+ expires, chip reset is asserted internally. The CHIPKILL timer can be stopped only by
+ a reset (cold, warm, soft). The length of the CHIPKILL timer is specified by
+ RST_CKILL[TIMER]. This feature is effectively a delayed warm reset. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_boot bdk_rst_boot_t;
+
+#define BDK_RST_BOOT BDK_RST_BOOT_FUNC()
+static inline uint64_t BDK_RST_BOOT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_BOOT_FUNC(void)
+{
+ return 0x87e006001600ll;
+}
+
+#define typedef_BDK_RST_BOOT bdk_rst_boot_t
+#define bustype_BDK_RST_BOOT BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_BOOT "RST_BOOT"
+#define device_bar_BDK_RST_BOOT 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_BOOT 0
+#define arguments_BDK_RST_BOOT -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_boot_status
+ *
+ * RST Boot Status Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_boot_status
+{
+ uint64_t u;
+ struct bdk_rst_boot_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t stat3 : 16; /**< [ 63: 48](R/W) JTAG accessible boot status word one. Used by software to indicate progress of
+ boot. Accessible via JTG/DTX with offset 17.
+
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t stat2 : 16; /**< [ 47: 32](R/W) JTAG accessible boot status word one. Used by software to indicate progress of
+ boot. Accessible via JTG/DTX with offset 16.
+
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t stat1 : 16; /**< [ 31: 16](R/W) JTAG accessible boot status word one. Used by software to indicate progress of
+ boot. Accessible via JTG/DTX with offset 13.
+
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t stat0 : 16; /**< [ 15: 0](R/W) JTAG accessable boot status word zero. Used by software to indicate progress of
+ boot. Accessable via JTG/DTX with offset 12.
+
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t stat0 : 16; /**< [ 15: 0](R/W) JTAG accessable boot status word zero. Used by software to indicate progress of
+ boot. Accessable via JTG/DTX with offset 12.
+
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t stat1 : 16; /**< [ 31: 16](R/W) JTAG accessible boot status word one. Used by software to indicate progress of
+ boot. Accessible via JTG/DTX with offset 13.
+
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t stat2 : 16; /**< [ 47: 32](R/W) JTAG accessible boot status word one. Used by software to indicate progress of
+ boot. Accessible via JTG/DTX with offset 16.
+
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t stat3 : 16; /**< [ 63: 48](R/W) JTAG accessible boot status word one. Used by software to indicate progress of
+ boot. Accessible via JTG/DTX with offset 17.
+
+ This field is always reinitialized on a chip domain reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_boot_status_s cn; */
+};
+typedef union bdk_rst_boot_status bdk_rst_boot_status_t;
+
+#define BDK_RST_BOOT_STATUS BDK_RST_BOOT_STATUS_FUNC()
+static inline uint64_t BDK_RST_BOOT_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_BOOT_STATUS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001800ll;
+ __bdk_csr_fatal("RST_BOOT_STATUS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_BOOT_STATUS bdk_rst_boot_status_t
+#define bustype_BDK_RST_BOOT_STATUS BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_BOOT_STATUS "RST_BOOT_STATUS"
+#define device_bar_BDK_RST_BOOT_STATUS 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_BOOT_STATUS 0
+#define arguments_BDK_RST_BOOT_STATUS -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_cfg
+ *
+ * RST Configuration Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_cfg
+{
+ uint64_t u;
+ struct bdk_rst_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bist_delay : 58; /**< [ 63: 6](RO/H) Reserved. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t cntl_clr_bist : 1; /**< [ 2: 2](R/W) Perform clear BIST during control-only reset, instead of a full BIST. A warm/soft reset
+ does not change this field. */
+ uint64_t warm_clr_bist : 1; /**< [ 1: 1](R/W) Perform clear BIST during warm reset, instead of a full BIST. A warm/soft reset does not
+ change this field. Note that a cold reset always performs a full BIST. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t warm_clr_bist : 1; /**< [ 1: 1](R/W) Perform clear BIST during warm reset, instead of a full BIST. A warm/soft reset does not
+ change this field. Note that a cold reset always performs a full BIST. */
+ uint64_t cntl_clr_bist : 1; /**< [ 2: 2](R/W) Perform clear BIST during control-only reset, instead of a full BIST. A warm/soft reset
+ does not change this field. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t bist_delay : 58; /**< [ 63: 6](RO/H) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_cfg_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bist_delay : 58; /**< [ 63: 6](RO/H) Reserved. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t cntl_clr_bist : 1; /**< [ 2: 2](R/W) Perform clear BIST during control-only reset, instead of a full BIST. A warm/soft reset
+ does not change this field. */
+ uint64_t warm_clr_bist : 1; /**< [ 1: 1](R/W) Perform clear BIST during warm reset, instead of a full BIST. A warm/soft reset does not
+ change this field. Note that a cold reset always performs a full BIST. */
+ uint64_t soft_clr_bist : 1; /**< [ 0: 0](R/W) Perform clear BIST during soft reset, instead of a full BIST. A warm/soft reset does not
+ change this field. Note that a cold reset always performs a full BIST. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_clr_bist : 1; /**< [ 0: 0](R/W) Perform clear BIST during soft reset, instead of a full BIST. A warm/soft reset does not
+ change this field. Note that a cold reset always performs a full BIST. */
+ uint64_t warm_clr_bist : 1; /**< [ 1: 1](R/W) Perform clear BIST during warm reset, instead of a full BIST. A warm/soft reset does not
+ change this field. Note that a cold reset always performs a full BIST. */
+ uint64_t cntl_clr_bist : 1; /**< [ 2: 2](R/W) Perform clear BIST during control-only reset, instead of a full BIST. A warm/soft reset
+ does not change this field. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t bist_delay : 58; /**< [ 63: 6](RO/H) Reserved. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_rst_cfg_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t clr_bist : 1; /**< [ 0: 0](R/W) Perform clear BIST on each chip domain reset, instead of a full BIST.
+ Note that the first BIST during a cold domain reset is always a full BIST.
+ This field is reinitialized with a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t clr_bist : 1; /**< [ 0: 0](R/W) Perform clear BIST on each chip domain reset, instead of a full BIST.
+ Note that the first BIST during a cold domain reset is always a full BIST.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_cfg bdk_rst_cfg_t;
+
+#define BDK_RST_CFG BDK_RST_CFG_FUNC()
+static inline uint64_t BDK_RST_CFG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CFG_FUNC(void)
+{
+ return 0x87e006001610ll;
+}
+
+#define typedef_BDK_RST_CFG bdk_rst_cfg_t
+#define bustype_BDK_RST_CFG BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CFG "RST_CFG"
+#define device_bar_BDK_RST_CFG 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_CFG 0
+#define arguments_BDK_RST_CFG -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_chip_domain_w1s
+ *
+ * RST Chip Domain Soft Pulse Reset Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_chip_domain_w1s
+{
+ uint64_t u;
+ struct bdk_rst_chip_domain_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Soft reset of entire chip.
+ When set to one, places the entire chip into reset. At the completion
+ of the reset the field is cleared. This is similar to asserting and
+ deasserting the CHIP_RESET_L pin.
+ When performing a reset, set this bit and then read any register to
+ confirm that chip is out of reset.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Soft reset of entire chip.
+ When set to one, places the entire chip into reset. At the completion
+ of the reset the field is cleared. This is similar to asserting and
+ deasserting the CHIP_RESET_L pin.
+ When performing a reset, set this bit and then read any register to
+ confirm that chip is out of reset.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_chip_domain_w1s_s cn; */
+};
+typedef union bdk_rst_chip_domain_w1s bdk_rst_chip_domain_w1s_t;
+
+#define BDK_RST_CHIP_DOMAIN_W1S BDK_RST_CHIP_DOMAIN_W1S_FUNC()
+static inline uint64_t BDK_RST_CHIP_DOMAIN_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CHIP_DOMAIN_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001810ll;
+ __bdk_csr_fatal("RST_CHIP_DOMAIN_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_CHIP_DOMAIN_W1S bdk_rst_chip_domain_w1s_t
+#define bustype_BDK_RST_CHIP_DOMAIN_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CHIP_DOMAIN_W1S "RST_CHIP_DOMAIN_W1S"
+#define device_bar_BDK_RST_CHIP_DOMAIN_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_CHIP_DOMAIN_W1S 0
+#define arguments_BDK_RST_CHIP_DOMAIN_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_ckill
+ *
+ * RST Chipkill Timer Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_ckill
+{
+ uint64_t u;
+ struct bdk_rst_ckill_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_47_63 : 17;
+ uint64_t timer : 47; /**< [ 46: 0](R/W/H) Chipkill timer measured in coprocessor-clock cycles. Read requests return
+ current chipkill timer. Write operations have no effect when RST_BOOT[CHIPKILL]
+ = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t timer : 47; /**< [ 46: 0](R/W/H) Chipkill timer measured in coprocessor-clock cycles. Read requests return
+ current chipkill timer. Write operations have no effect when RST_BOOT[CHIPKILL]
+ = 1. */
+ uint64_t reserved_47_63 : 17;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_ckill_s cn8; */
+ struct bdk_rst_ckill_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_47_63 : 17;
+ uint64_t timer : 47; /**< [ 46: 0](R/W/H) Chipkill timer measured in 100 MHz PLL reference clocks. Read
+ requests return current chip kill timer. Write operations have
+ no effect when RST_BOOT[CHIPKILL] is set.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t timer : 47; /**< [ 46: 0](R/W/H) Chipkill timer measured in 100 MHz PLL reference clocks. Read
+ requests return current chip kill timer. Write operations have
+ no effect when RST_BOOT[CHIPKILL] is set.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_47_63 : 17;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_ckill bdk_rst_ckill_t;
+
+#define BDK_RST_CKILL BDK_RST_CKILL_FUNC()
+static inline uint64_t BDK_RST_CKILL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CKILL_FUNC(void)
+{
+ return 0x87e006001638ll;
+}
+
+#define typedef_BDK_RST_CKILL bdk_rst_ckill_t
+#define bustype_BDK_RST_CKILL BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CKILL "RST_CKILL"
+#define device_bar_BDK_RST_CKILL 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_CKILL 0
+#define arguments_BDK_RST_CKILL -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_cold_data#
+ *
+ * RST Cold Reset Data Registers
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_cold_datax
+{
+ uint64_t u;
+ struct bdk_rst_cold_datax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Scratch data registers preserved through warm reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Scratch data registers preserved through warm reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_cold_datax_s cn8; */
+ struct bdk_rst_cold_datax_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Scratch data registers preserved through chip, core,
+ MCP and SCP domain resets.
+ This field is always reinitialized on a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Scratch data registers preserved through chip, core,
+ MCP and SCP domain resets.
+ This field is always reinitialized on a cold domain reset. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_cold_datax bdk_rst_cold_datax_t;
+
+static inline uint64_t BDK_RST_COLD_DATAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_COLD_DATAX(unsigned long a)
+{
+ if (a<=5)
+ return 0x87e0060017c0ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("RST_COLD_DATAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_COLD_DATAX(a) bdk_rst_cold_datax_t
+#define bustype_BDK_RST_COLD_DATAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_COLD_DATAX(a) "RST_COLD_DATAX"
+#define device_bar_BDK_RST_COLD_DATAX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_COLD_DATAX(a) (a)
+#define arguments_BDK_RST_COLD_DATAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rst_cold_domain_w1s
+ *
+ * RST Cold Domain Pulse Reset Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_cold_domain_w1s
+{
+ uint64_t u;
+ struct bdk_rst_cold_domain_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Soft reset of entire chip emulating a cold domain reset.
+ When set to one, places the entire chip into reset. At the completion
+ of the reset the field is cleared.
+ This action is similar to deasserting and asserting PLL_DCOK with the
+ exception that external pins are not sampled again.
+ When performing a reset, set this bit and
+ then read any register to confirm that chip is out of reset.
+ This field is always reinitialized on a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Soft reset of entire chip emulating a cold domain reset.
+ When set to one, places the entire chip into reset. At the completion
+ of the reset the field is cleared.
+ This action is similar to deasserting and asserting PLL_DCOK with the
+ exception that external pins are not sampled again.
+ When performing a reset, set this bit and
+ then read any register to confirm that chip is out of reset.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_cold_domain_w1s_s cn; */
+};
+typedef union bdk_rst_cold_domain_w1s bdk_rst_cold_domain_w1s_t;
+
+#define BDK_RST_COLD_DOMAIN_W1S BDK_RST_COLD_DOMAIN_W1S_FUNC()
+static inline uint64_t BDK_RST_COLD_DOMAIN_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_COLD_DOMAIN_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001808ll;
+ __bdk_csr_fatal("RST_COLD_DOMAIN_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_COLD_DOMAIN_W1S bdk_rst_cold_domain_w1s_t
+#define bustype_BDK_RST_COLD_DOMAIN_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_COLD_DOMAIN_W1S "RST_COLD_DOMAIN_W1S"
+#define device_bar_BDK_RST_COLD_DOMAIN_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_COLD_DOMAIN_W1S 0
+#define arguments_BDK_RST_COLD_DOMAIN_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_const
+ *
+ * RST Constant Register
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_const
+{
+ uint64_t u;
+ struct bdk_rst_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rst_devs : 8; /**< [ 15: 8](RO) Number of RST_DEV_E enumeration values supported, and size of RST_DEV_MAP(). */
+ uint64_t pems : 8; /**< [ 7: 0](RO) Number of PEMs supported by RST, and size of RST_CTL(), RST_SOFT_PRST(). */
+#else /* Word 0 - Little Endian */
+ uint64_t pems : 8; /**< [ 7: 0](RO) Number of PEMs supported by RST, and size of RST_CTL(), RST_SOFT_PRST(). */
+ uint64_t rst_devs : 8; /**< [ 15: 8](RO) Number of RST_DEV_E enumeration values supported, and size of RST_DEV_MAP(). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_const_s cn; */
+};
+typedef union bdk_rst_const bdk_rst_const_t;
+
+#define BDK_RST_CONST BDK_RST_CONST_FUNC()
+static inline uint64_t BDK_RST_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0060018f8ll;
+ __bdk_csr_fatal("RST_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_CONST bdk_rst_const_t
+#define bustype_BDK_RST_CONST BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CONST "RST_CONST"
+#define device_bar_BDK_RST_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_CONST 0
+#define arguments_BDK_RST_CONST -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_core_domain_w1c
+ *
+ * RST Core Domain Soft Reset Clear Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_core_domain_w1c
+{
+ uint64_t u;
+ struct bdk_rst_core_domain_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1C/H) Clear soft reset of AP cores, cache, NCB and associated logic.
+ When set to one, the soft reset of the core is removed.
+ Reads of this register show the soft reset state. Not the actual core domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[CORE] shows
+ the actual reset state. To compensate for delays in reset, this field should only
+ be set if RST_RESET_ACTIVE[CORE] is set.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1C/H) Clear soft reset of AP cores, cache, NCB and associated logic.
+ When set to one, the soft reset of the core is removed.
+ Reads of this register show the soft reset state. Not the actual core domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[CORE] shows
+ the actual reset state. To compensate for delays in reset, this field should only
+ be set if RST_RESET_ACTIVE[CORE] is set.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_core_domain_w1c_s cn; */
+};
+typedef union bdk_rst_core_domain_w1c bdk_rst_core_domain_w1c_t;
+
+#define BDK_RST_CORE_DOMAIN_W1C BDK_RST_CORE_DOMAIN_W1C_FUNC()
+static inline uint64_t BDK_RST_CORE_DOMAIN_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CORE_DOMAIN_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001828ll;
+ __bdk_csr_fatal("RST_CORE_DOMAIN_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_CORE_DOMAIN_W1C bdk_rst_core_domain_w1c_t
+#define bustype_BDK_RST_CORE_DOMAIN_W1C BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CORE_DOMAIN_W1C "RST_CORE_DOMAIN_W1C"
+#define device_bar_BDK_RST_CORE_DOMAIN_W1C 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_CORE_DOMAIN_W1C 0
+#define arguments_BDK_RST_CORE_DOMAIN_W1C -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_core_domain_w1s
+ *
+ * RST Core Domain Soft Reset Set Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_core_domain_w1s
+{
+ uint64_t u;
+ struct bdk_rst_core_domain_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Set soft reset of AP cores, cache, NCB and associated logic.
+ When set to one, all logic associated with the core domain is placed in reset.
+ Reads of this register show the soft reset state. Not the actual core domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[CORE] shows
+ the actual reset state.
+ It is typically cleared by writing to RST_CORE_DOMAIN_W1C.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Set soft reset of AP cores, cache, NCB and associated logic.
+ When set to one, all logic associated with the core domain is placed in reset.
+ Reads of this register show the soft reset state. Not the actual core domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[CORE] shows
+ the actual reset state.
+ It is typically cleared by writing to RST_CORE_DOMAIN_W1C.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_core_domain_w1s_s cn; */
+};
+typedef union bdk_rst_core_domain_w1s bdk_rst_core_domain_w1s_t;
+
+#define BDK_RST_CORE_DOMAIN_W1S BDK_RST_CORE_DOMAIN_W1S_FUNC()
+static inline uint64_t BDK_RST_CORE_DOMAIN_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CORE_DOMAIN_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001820ll;
+ __bdk_csr_fatal("RST_CORE_DOMAIN_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_CORE_DOMAIN_W1S bdk_rst_core_domain_w1s_t
+#define bustype_BDK_RST_CORE_DOMAIN_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CORE_DOMAIN_W1S "RST_CORE_DOMAIN_W1S"
+#define device_bar_BDK_RST_CORE_DOMAIN_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_CORE_DOMAIN_W1S 0
+#define arguments_BDK_RST_CORE_DOMAIN_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_core_pll
+ *
+ * RST Core Clock PLL Control Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_core_pll
+{
+ uint64_t u;
+ struct bdk_rst_core_pll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t cout_sel : 2; /**< [ 50: 49](R/W) Core clockout select.
+ 0x0 = Core clock divided by 16.
+ 0x1 = Core clock tree output divided by 16.
+ 0x2 = PLL0 output divided by 16.
+ 0x3 = PLL1 output divided by 16.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t cout_reset : 1; /**< [ 48: 48](R/W) Core clockout reset. The core clockout should be placed in
+ reset at least 10 PLL reference clocks prior
+ to changing [COUT_SEL]. It should remain under reset for at least 10
+ PLL reference clocks after [COUT_SEL] changes.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t pd_switch : 1; /**< [ 44: 44](R/W) PLL powerdown on switch. When set, hardware automatically
+ powers down the inactive PLL after the switch has occured.
+ When cleared, the inactive PLL remains in operation.
+ If [PD_SWITCH] is written to a one while both [DLY_SWITCH] and
+ [NXT_PGM] are cleared then the inactive PLL will immediately powerdown.
+
+ Note that a powered down PLL requires an additional 575 reference
+ clocks to become active. This time is automatically added by the
+ hardware.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t dly_switch : 12; /**< [ 43: 32](R/W/H) Switch the active PLL after delaying this number of 100 MHz clocks.
+ When set to a nonzero value, the hardware will wait for
+ any PLL programming to complete and then switch to the inactive
+ PLL after the specified number of PLL reference clocks. Hardware
+ will add additional clocks if required.
+ This field is always reinitialized on a chip domain reset.
+
+ Internal:
+ Hardware will add counts to maintain 256 cpt_clk/sclk/rclk notification to hardware.
+ Additional time will be added to wakeup powered down AP cores but that
+ time not be included in this count. */
+ uint64_t pll1_pd : 1; /**< [ 31: 31](RO) Core PLL1 power down. When set PLL is currently powered down. */
+ uint64_t pll0_pd : 1; /**< [ 30: 30](RO) Core PLL0 power down. When set PLL is currently powered down. */
+ uint64_t reserved_23_29 : 7;
+ uint64_t init_mul : 7; /**< [ 22: 16](R/W) Core clock multiplier to be used during a core or chip domain
+ reset. Actual frequency is [INIT_MUL] * 50 MHz. The actual value
+ used is limited by RST_PLL_LIMIT[CORE_MAX_MUL].
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t nxt_pgm : 1; /**< [ 15: 15](R/W/H) Program non-active PLL using [NXT_MUL]. Hardware automatically
+ clears bit when both pll is updated and any delay specified
+ in [DLY_SWITCH] has completed.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t nxt_mul : 7; /**< [ 14: 8](R/W) Core PLL frequency to be program in 50 MHz increments. The
+ actual value used is limited by RST_PLL_LIMIT[CORE_MAX_MUL] and
+ a minimum setting of 300 MHz.
+ Value will match [INIT_MUL] immediately after a cold or chip domain reset. */
+ uint64_t active_pll : 1; /**< [ 7: 7](RO) Indicates which physical PLL is in use. For diagnostic use only. */
+ uint64_t cur_mul : 7; /**< [ 6: 0](RO/H) Core clock frequency. Actual frequency is [CUR_MUL] * 50 MHz.
+ Value will reflect [NXT_MUL] after [DLY_SWITCH] has completed or [INIT_MUL]
+ immediately after a cold or chip domain reset. In both cases, value
+ is limited by RST_PLL_LIMIT[CORE_MAX_MUL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cur_mul : 7; /**< [ 6: 0](RO/H) Core clock frequency. Actual frequency is [CUR_MUL] * 50 MHz.
+ Value will reflect [NXT_MUL] after [DLY_SWITCH] has completed or [INIT_MUL]
+ immediately after a cold or chip domain reset. In both cases, value
+ is limited by RST_PLL_LIMIT[CORE_MAX_MUL]. */
+ uint64_t active_pll : 1; /**< [ 7: 7](RO) Indicates which physical PLL is in use. For diagnostic use only. */
+ uint64_t nxt_mul : 7; /**< [ 14: 8](R/W) Core PLL frequency to be program in 50 MHz increments. The
+ actual value used is limited by RST_PLL_LIMIT[CORE_MAX_MUL] and
+ a minimum setting of 300 MHz.
+ Value will match [INIT_MUL] immediately after a cold or chip domain reset. */
+ uint64_t nxt_pgm : 1; /**< [ 15: 15](R/W/H) Program non-active PLL using [NXT_MUL]. Hardware automatically
+ clears bit when both pll is updated and any delay specified
+ in [DLY_SWITCH] has completed.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t init_mul : 7; /**< [ 22: 16](R/W) Core clock multiplier to be used during a core or chip domain
+ reset. Actual frequency is [INIT_MUL] * 50 MHz. The actual value
+ used is limited by RST_PLL_LIMIT[CORE_MAX_MUL].
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_23_29 : 7;
+ uint64_t pll0_pd : 1; /**< [ 30: 30](RO) Core PLL0 power down. When set PLL is currently powered down. */
+ uint64_t pll1_pd : 1; /**< [ 31: 31](RO) Core PLL1 power down. When set PLL is currently powered down. */
+ uint64_t dly_switch : 12; /**< [ 43: 32](R/W/H) Switch the active PLL after delaying this number of 100 MHz clocks.
+ When set to a nonzero value, the hardware will wait for
+ any PLL programming to complete and then switch to the inactive
+ PLL after the specified number of PLL reference clocks. Hardware
+ will add additional clocks if required.
+ This field is always reinitialized on a chip domain reset.
+
+ Internal:
+ Hardware will add counts to maintain 256 cpt_clk/sclk/rclk notification to hardware.
+ Additional time will be added to wakeup powered down AP cores but that
+ time not be included in this count. */
+ uint64_t pd_switch : 1; /**< [ 44: 44](R/W) PLL powerdown on switch. When set, hardware automatically
+ powers down the inactive PLL after the switch has occured.
+ When cleared, the inactive PLL remains in operation.
+ If [PD_SWITCH] is written to a one while both [DLY_SWITCH] and
+ [NXT_PGM] are cleared then the inactive PLL will immediately powerdown.
+
+ Note that a powered down PLL requires an additional 575 reference
+ clocks to become active. This time is automatically added by the
+ hardware.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t cout_reset : 1; /**< [ 48: 48](R/W) Core clockout reset. The core clockout should be placed in
+ reset at least 10 PLL reference clocks prior
+ to changing [COUT_SEL]. It should remain under reset for at least 10
+ PLL reference clocks after [COUT_SEL] changes.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t cout_sel : 2; /**< [ 50: 49](R/W) Core clockout select.
+ 0x0 = Core clock divided by 16.
+ 0x1 = Core clock tree output divided by 16.
+ 0x2 = PLL0 output divided by 16.
+ 0x3 = PLL1 output divided by 16.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_core_pll_s cn; */
+};
+typedef union bdk_rst_core_pll bdk_rst_core_pll_t;
+
+#define BDK_RST_CORE_PLL BDK_RST_CORE_PLL_FUNC()
+static inline uint64_t BDK_RST_CORE_PLL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CORE_PLL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e00a001780ll;
+ __bdk_csr_fatal("RST_CORE_PLL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_CORE_PLL bdk_rst_core_pll_t
+#define bustype_BDK_RST_CORE_PLL BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CORE_PLL "RST_CORE_PLL"
+#define device_bar_BDK_RST_CORE_PLL 0x2 /* PF_BAR2 */
+#define busnum_BDK_RST_CORE_PLL 0
+#define arguments_BDK_RST_CORE_PLL -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_cpt_pll
+ *
+ * RST Crypto Clock PLL Control Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_cpt_pll
+{
+ uint64_t u;
+ struct bdk_rst_cpt_pll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t cout_sel : 2; /**< [ 50: 49](R/W) Cypto clockout select.
+ 0x0 = Crypto clock divided by 16.
+ 0x1 = Crypto clock tree output divided by 16.
+ 0x2 = PLL0 output divided by 16.
+ 0x3 = PLL1 output divided by 16.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t cout_reset : 1; /**< [ 48: 48](R/W) Crypto clockout reset. The crypto clockout should be placed in
+ reset at least 10 PLL reference clocks prior
+ to changing [COUT_SEL]. It should remain under reset for at least 10
+ PLL reference clocks after [COUT_SEL] changes.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t pd_switch : 1; /**< [ 44: 44](R/W) PLL powerdown on switch. When set, hardware automatically
+ powers down the inactive PLL after the switch has occured.
+ When cleared, the inactive PLL remains in operation.
+ If [PD_SWITCH] is written to a one while both [DLY_SWITCH] and
+ [NXT_PGM] are cleared then the inactive PLL will immediately powerdown.
+
+ Note that a powered down PLL requires an additional 575 reference
+ clocks to become active. This time is automatically added by the
+ hardware.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t dly_switch : 12; /**< [ 43: 32](R/W/H) Switch the active PLL after delaying this number of 100 MHz clocks.
+ When set to a nonzero value, the hardware will wait for
+ any PLL programming to complete and then switch to the inactive
+ PLL after the specified number of PLL reference clocks. Hardware
+ will add additional clocks if required.
+ This field is always reinitialized on a chip domain reset.
+
+ Internal:
+ Hardware will add counts to maintain 256 cpt_clk/sclk/rclk notification to hardware.
+ Additional time will be added to wakeup powered down AP cores but that
+ time not be included in this count. */
+ uint64_t pll1_pd : 1; /**< [ 31: 31](RO) CPT PLL1 power down. When set PLL is currently powered down. */
+ uint64_t pll0_pd : 1; /**< [ 30: 30](RO) CPT PLL0 power down. When set PLL is currently powered down. */
+ uint64_t reserved_23_29 : 7;
+ uint64_t init_mul : 7; /**< [ 22: 16](R/W) Crypto clock multiplier to be used during a core or chip domain
+ reset. Actual frequency is [INIT_MUL] * 50 MHz. The actual value
+ used is limited by RST_PLL_LIMIT[CPT_MAX_MUL].
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t nxt_pgm : 1; /**< [ 15: 15](R/W/H) Program non-active PLL using [NXT_MUL]. Hardware automatically
+ clears bit when both pll is updated and any delay specified
+ in [DLY_SWITCH] has completed.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t nxt_mul : 7; /**< [ 14: 8](R/W) Crypto PLL frequency to be program in 50 MHz increments. The
+ actual value used is limited by RST_PLL_LIMIT[CPT_MAX_MUL] and
+ a minimum setting of 200 MHz.
+ Value will match [INIT_MUL] immediately after a cold or chip domain reset. */
+ uint64_t active_pll : 1; /**< [ 7: 7](RO) Indicates which physical PLL is in use. For diagnostic use only. */
+ uint64_t cur_mul : 7; /**< [ 6: 0](RO/H) Crypto clock frequency. Actual frequency is [CUR_MUL] * 50 MHz.
+ Value will reflect [NXT_MUL] after [DLY_SWITCH] has completed or [INIT_MUL]
+ immediately after a cold or chip domain reset. In both cases, value
+ is limited by RST_PLL_LIMIT[CPT_MAX_MUL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cur_mul : 7; /**< [ 6: 0](RO/H) Crypto clock frequency. Actual frequency is [CUR_MUL] * 50 MHz.
+ Value will reflect [NXT_MUL] after [DLY_SWITCH] has completed or [INIT_MUL]
+ immediately after a cold or chip domain reset. In both cases, value
+ is limited by RST_PLL_LIMIT[CPT_MAX_MUL]. */
+ uint64_t active_pll : 1; /**< [ 7: 7](RO) Indicates which physical PLL is in use. For diagnostic use only. */
+ uint64_t nxt_mul : 7; /**< [ 14: 8](R/W) Crypto PLL frequency to be program in 50 MHz increments. The
+ actual value used is limited by RST_PLL_LIMIT[CPT_MAX_MUL] and
+ a minimum setting of 200 MHz.
+ Value will match [INIT_MUL] immediately after a cold or chip domain reset. */
+ uint64_t nxt_pgm : 1; /**< [ 15: 15](R/W/H) Program non-active PLL using [NXT_MUL]. Hardware automatically
+ clears bit when both pll is updated and any delay specified
+ in [DLY_SWITCH] has completed.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t init_mul : 7; /**< [ 22: 16](R/W) Crypto clock multiplier to be used during a core or chip domain
+ reset. Actual frequency is [INIT_MUL] * 50 MHz. The actual value
+ used is limited by RST_PLL_LIMIT[CPT_MAX_MUL].
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_23_29 : 7;
+ uint64_t pll0_pd : 1; /**< [ 30: 30](RO) CPT PLL0 power down. When set PLL is currently powered down. */
+ uint64_t pll1_pd : 1; /**< [ 31: 31](RO) CPT PLL1 power down. When set PLL is currently powered down. */
+ uint64_t dly_switch : 12; /**< [ 43: 32](R/W/H) Switch the active PLL after delaying this number of 100 MHz clocks.
+ When set to a nonzero value, the hardware will wait for
+ any PLL programming to complete and then switch to the inactive
+ PLL after the specified number of PLL reference clocks. Hardware
+ will add additional clocks if required.
+ This field is always reinitialized on a chip domain reset.
+
+ Internal:
+ Hardware will add counts to maintain 256 cpt_clk/sclk/rclk notification to hardware.
+ Additional time will be added to wakeup powered down AP cores but that
+ time not be included in this count. */
+ uint64_t pd_switch : 1; /**< [ 44: 44](R/W) PLL powerdown on switch. When set, hardware automatically
+ powers down the inactive PLL after the switch has occured.
+ When cleared, the inactive PLL remains in operation.
+ If [PD_SWITCH] is written to a one while both [DLY_SWITCH] and
+ [NXT_PGM] are cleared then the inactive PLL will immediately powerdown.
+
+ Note that a powered down PLL requires an additional 575 reference
+ clocks to become active. This time is automatically added by the
+ hardware.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t cout_reset : 1; /**< [ 48: 48](R/W) Crypto clockout reset. The crypto clockout should be placed in
+ reset at least 10 PLL reference clocks prior
+ to changing [COUT_SEL]. It should remain under reset for at least 10
+ PLL reference clocks after [COUT_SEL] changes.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t cout_sel : 2; /**< [ 50: 49](R/W) Cypto clockout select.
+ 0x0 = Crypto clock divided by 16.
+ 0x1 = Crypto clock tree output divided by 16.
+ 0x2 = PLL0 output divided by 16.
+ 0x3 = PLL1 output divided by 16.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_cpt_pll_s cn; */
+};
+typedef union bdk_rst_cpt_pll bdk_rst_cpt_pll_t;
+
+#define BDK_RST_CPT_PLL BDK_RST_CPT_PLL_FUNC()
+static inline uint64_t BDK_RST_CPT_PLL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CPT_PLL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e00a001778ll;
+ __bdk_csr_fatal("RST_CPT_PLL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_CPT_PLL bdk_rst_cpt_pll_t
+#define bustype_BDK_RST_CPT_PLL BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CPT_PLL "RST_CPT_PLL"
+#define device_bar_BDK_RST_CPT_PLL 0x2 /* PF_BAR2 */
+#define busnum_BDK_RST_CPT_PLL 0
+#define arguments_BDK_RST_CPT_PLL -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_ctl#
+ *
+ * RST Controllers Registers
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_ctlx
+{
+ uint64_t u;
+ struct bdk_rst_ctlx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t reset_type : 1; /**< [ 13: 13](R/W) Type of reset generated internally by PCI MAC PF FLR, link down/hot reset and
+ PERST events. See [PF_FLR_CHIP], [RST_LINK] and [RST_CHIP].
+
+ 0 = Chip domain reset.
+ 1 = Core domain reset.
+
+ On cold reset, this field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+ _ 1 when RST_CTL()[HOST_MODE] = 1. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t pf_flr_chip : 1; /**< [ 10: 10](R/W) Controls whether corresponding controller PF Function Level Reset causes a chip warm
+ reset like CHIP_RESET_L. A warm/soft reset does not change this field.
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0. */
+ uint64_t prst_link : 1; /**< [ 9: 9](R/W) PEM reset on link down.
+ 0 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and (provided properly configured) the link should come back up
+ automatically.
+ 1 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and set RST_SOFT_PRST()[SOFT_PRST]. This will hold the link in reset
+ until software clears RST_SOFT_PRST()[SOFT_PRST].
+
+ A warm/soft reset does not change this field. On cold reset, this field is initialized to
+ 0. */
+ uint64_t rst_done : 1; /**< [ 8: 8](RO/H) Reset done. Indicates the controller reset status. [RST_DONE] is always 0
+ (i.e. the controller is held in reset) when
+ * RST_SOFT_PRST()[SOFT_PRST] = 1, or
+ * [RST_RCV] = 1 and PERST*_L pin is asserted. */
+ uint64_t rst_link : 1; /**< [ 7: 7](R/W) Reset on link down. When set, a corresponding controller link-down reset or hot
+ reset causes a warm chip reset.
+
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ Note that a link-down or hot-reset event can never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a warm reset when [RST_DONE] = 0). */
+ uint64_t host_mode : 1; /**< [ 6: 6](RO) For all controllers this field is set as host. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t rst_drv : 1; /**< [ 3: 3](R/W) Controls whether PERST*_L is driven. A warm/soft reset does not change this field. On cold
+ reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 1.
+
+ When set, CNXXXX drives the corresponding PERST*_L pin. Otherwise, CNXXXX does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t rst_rcv : 1; /**< [ 2: 2](R/W) Reset received. Controls whether PERST*_L is received. A warm/soft reset does
+ not change this field. On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ When [RST_RCV] = 1, the PERST*_L value is received and can be used to reset the
+ controller and (optionally, based on [RST_CHIP]) warm reset the chip.
+
+ When [RST_RCV] = 1 (and [RST_CHIP] = 0), RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert software whenever the external reset pin initiates
+ a controller reset sequence.)
+
+ [RST_VAL] gives the PERST*_L pin value when [RST_RCV] = 1.
+
+ When [RST_RCV] = 0, the PERST*_L pin value is ignored. */
+ uint64_t rst_chip : 1; /**< [ 1: 1](R/W) Controls whether PERST*_L causes a chip warm reset like CHIP_RESET_L. A warm/soft reset
+ does not change this field. On cold reset, this field is initialized to 0.
+
+ When [RST_RCV] = 0, [RST_CHIP] is ignored.
+
+ When [RST_RCV] = 1, [RST_CHIP] = 1, and PERST*_L asserts, a chip warm reset is generated. */
+ uint64_t rst_val : 1; /**< [ 0: 0](RO/H) Read-only access to PERST*_L. Unpredictable when [RST_RCV] = 0.
+
+ Reads as 1 when [RST_RCV] = 1 and the PERST*_L pin is asserted.
+
+ Reads as 0 when [RST_RCV] = 1 and the PERST*_L pin is not asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_val : 1; /**< [ 0: 0](RO/H) Read-only access to PERST*_L. Unpredictable when [RST_RCV] = 0.
+
+ Reads as 1 when [RST_RCV] = 1 and the PERST*_L pin is asserted.
+
+ Reads as 0 when [RST_RCV] = 1 and the PERST*_L pin is not asserted. */
+ uint64_t rst_chip : 1; /**< [ 1: 1](R/W) Controls whether PERST*_L causes a chip warm reset like CHIP_RESET_L. A warm/soft reset
+ does not change this field. On cold reset, this field is initialized to 0.
+
+ When [RST_RCV] = 0, [RST_CHIP] is ignored.
+
+ When [RST_RCV] = 1, [RST_CHIP] = 1, and PERST*_L asserts, a chip warm reset is generated. */
+ uint64_t rst_rcv : 1; /**< [ 2: 2](R/W) Reset received. Controls whether PERST*_L is received. A warm/soft reset does
+ not change this field. On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ When [RST_RCV] = 1, the PERST*_L value is received and can be used to reset the
+ controller and (optionally, based on [RST_CHIP]) warm reset the chip.
+
+ When [RST_RCV] = 1 (and [RST_CHIP] = 0), RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert software whenever the external reset pin initiates
+ a controller reset sequence.)
+
+ [RST_VAL] gives the PERST*_L pin value when [RST_RCV] = 1.
+
+ When [RST_RCV] = 0, the PERST*_L pin value is ignored. */
+ uint64_t rst_drv : 1; /**< [ 3: 3](R/W) Controls whether PERST*_L is driven. A warm/soft reset does not change this field. On cold
+ reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 1.
+
+ When set, CNXXXX drives the corresponding PERST*_L pin. Otherwise, CNXXXX does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t host_mode : 1; /**< [ 6: 6](RO) For all controllers this field is set as host. */
+ uint64_t rst_link : 1; /**< [ 7: 7](R/W) Reset on link down. When set, a corresponding controller link-down reset or hot
+ reset causes a warm chip reset.
+
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ Note that a link-down or hot-reset event can never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a warm reset when [RST_DONE] = 0). */
+ uint64_t rst_done : 1; /**< [ 8: 8](RO/H) Reset done. Indicates the controller reset status. [RST_DONE] is always 0
+ (i.e. the controller is held in reset) when
+ * RST_SOFT_PRST()[SOFT_PRST] = 1, or
+ * [RST_RCV] = 1 and PERST*_L pin is asserted. */
+ uint64_t prst_link : 1; /**< [ 9: 9](R/W) PEM reset on link down.
+ 0 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and (provided properly configured) the link should come back up
+ automatically.
+ 1 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and set RST_SOFT_PRST()[SOFT_PRST]. This will hold the link in reset
+ until software clears RST_SOFT_PRST()[SOFT_PRST].
+
+ A warm/soft reset does not change this field. On cold reset, this field is initialized to
+ 0. */
+ uint64_t pf_flr_chip : 1; /**< [ 10: 10](R/W) Controls whether corresponding controller PF Function Level Reset causes a chip warm
+ reset like CHIP_RESET_L. A warm/soft reset does not change this field.
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t reset_type : 1; /**< [ 13: 13](R/W) Type of reset generated internally by PCI MAC PF FLR, link down/hot reset and
+ PERST events. See [PF_FLR_CHIP], [RST_LINK] and [RST_CHIP].
+
+ 0 = Chip domain reset.
+ 1 = Core domain reset.
+
+ On cold reset, this field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+ _ 1 when RST_CTL()[HOST_MODE] = 1. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_ctlx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t reset_type : 1; /**< [ 13: 13](R/W) Type of reset generated internally by PCI MAC PF FLR, link down/hot reset and
+ PERST events. See [PF_FLR_CHIP], [RST_LINK] and [RST_CHIP].
+
+ 0 = Chip domain reset.
+ 1 = Core domain reset.
+
+ On cold reset, this field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+ _ 1 when RST_CTL()[HOST_MODE] = 1. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t pf_flr_chip : 1; /**< [ 10: 10](R/W) PF FLR internal reset enable.
+ 0 = PF FLR events will not cause a reset.
+ 1 = A PF FLR event received by the PCIe logic causes the internal reset
+ specified by [RESET_TYPE].
+
+ On cold reset, this field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+ _ 1 when RST_CTL()[HOST_MODE] = 0. */
+ uint64_t prst_link : 1; /**< [ 9: 9](R/W) PEM reset on link down.
+ 0 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and (provided properly configured) the link should come back up
+ automatically.
+ 1 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and set RST_SOFT_PRST()[SOFT_PRST]. This will hold the link in reset
+ until software clears RST_SOFT_PRST()[SOFT_PRST].
+
+ A warm/soft reset does not change this field. On cold reset, this field is
+ initialized to 0. */
+ uint64_t rst_done : 1; /**< [ 8: 8](RO/H) Reset done. Indicates the controller reset status. [RST_DONE] is always 0
+ (i.e. the controller is held in reset) when
+ * RST_SOFT_PRST()[SOFT_PRST] = 1, or
+ * [RST_RCV] = 1 and PERST*_L pin is asserted. */
+ uint64_t rst_link : 1; /**< [ 7: 7](R/W) Link down / hot reset event internal reset enable.
+ 0 = Link down or hot reset do not cause a chip/core domain reset.
+ 1 = A link-down or hot-reset event on the PCIe interface causes the internal
+ reset specified by [RESET_TYPE].
+
+ The field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] is set.
+ _ 1 when RST_CTL()[HOST_MODE] is cleared.
+
+ Note that a link-down or hot reset event can never cause a domain reset when the
+ controller is already in reset (i.e. when [RST_DONE] = 0). */
+ uint64_t host_mode : 1; /**< [ 6: 6](RO/H) Read-only access to the corresponding PEM()_CFG[HOSTMD] field
+ indicating PEMn is root complex (host). */
+ uint64_t reserved_4_5 : 2;
+ uint64_t rst_drv : 1; /**< [ 3: 3](R/W) Controls whether PERST*_L is driven.
+ This field is always reinitialized on a cold domain reset.
+ The field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] is cleared.
+ _ 1 when RST_CTL()[HOST_MODE] is set. */
+ uint64_t rst_rcv : 1; /**< [ 2: 2](R/W) Reset received. Controls whether PERST*_L is received.
+ This field is always reinitialized on a cold domain reset.
+ The field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] is set.
+ _ 1 when RST_CTL()[HOST_MODE] is cleared.
+
+ When [RST_RCV] = 1, the PERST*_L value is received and can be used to reset the
+ controller and (optionally, based on [RST_CHIP]) cause a domain reset.
+
+ When [RST_RCV] = 1 (and [RST_CHIP] = 0), RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert software whenever the external reset pin initiates
+ a controller reset sequence.)
+
+ [RST_VAL] gives the PERST*_L pin value when [RST_RCV] = 1.
+
+ When [RST_RCV] = 0, the PERST*_L pin value is ignored. */
+ uint64_t rst_chip : 1; /**< [ 1: 1](R/W) PERST internal reset enable. When set along with [RST_RCV],
+ logic will generate an internal reset specified by [RESET_TYPE]
+ when the corresponding PERST*_L pin is asserted. When cleared or
+ when [RST_RCV] is cleared, the PERST*_L does not cause an internal reset.
+
+ During a cold domain reset this field is initialized to zero. */
+ uint64_t rst_val : 1; /**< [ 0: 0](RO/H) Read-only access to PERST*_L. Unpredictable when [RST_RCV] = 0.
+
+ Reads as 1 when [RST_RCV] = 1 and the PERST*_L pin is asserted.
+ Reads as 0 when [RST_RCV] = 1 and the PERST*_L pin is not asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_val : 1; /**< [ 0: 0](RO/H) Read-only access to PERST*_L. Unpredictable when [RST_RCV] = 0.
+
+ Reads as 1 when [RST_RCV] = 1 and the PERST*_L pin is asserted.
+ Reads as 0 when [RST_RCV] = 1 and the PERST*_L pin is not asserted. */
+ uint64_t rst_chip : 1; /**< [ 1: 1](R/W) PERST internal reset enable. When set along with [RST_RCV],
+ logic will generate an internal reset specified by [RESET_TYPE]
+ when the corresponding PERST*_L pin is asserted. When cleared or
+ when [RST_RCV] is cleared, the PERST*_L does not cause an internal reset.
+
+ During a cold domain reset this field is initialized to zero. */
+ uint64_t rst_rcv : 1; /**< [ 2: 2](R/W) Reset received. Controls whether PERST*_L is received.
+ This field is always reinitialized on a cold domain reset.
+ The field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] is set.
+ _ 1 when RST_CTL()[HOST_MODE] is cleared.
+
+ When [RST_RCV] = 1, the PERST*_L value is received and can be used to reset the
+ controller and (optionally, based on [RST_CHIP]) cause a domain reset.
+
+ When [RST_RCV] = 1 (and [RST_CHIP] = 0), RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert software whenever the external reset pin initiates
+ a controller reset sequence.)
+
+ [RST_VAL] gives the PERST*_L pin value when [RST_RCV] = 1.
+
+ When [RST_RCV] = 0, the PERST*_L pin value is ignored. */
+ uint64_t rst_drv : 1; /**< [ 3: 3](R/W) Controls whether PERST*_L is driven.
+ This field is always reinitialized on a cold domain reset.
+ The field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] is cleared.
+ _ 1 when RST_CTL()[HOST_MODE] is set. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t host_mode : 1; /**< [ 6: 6](RO/H) Read-only access to the corresponding PEM()_CFG[HOSTMD] field
+ indicating PEMn is root complex (host). */
+ uint64_t rst_link : 1; /**< [ 7: 7](R/W) Link down / hot reset event internal reset enable.
+ 0 = Link down or hot reset do not cause a chip/core domain reset.
+ 1 = A link-down or hot-reset event on the PCIe interface causes the internal
+ reset specified by [RESET_TYPE].
+
+ The field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] is set.
+ _ 1 when RST_CTL()[HOST_MODE] is cleared.
+
+ Note that a link-down or hot reset event can never cause a domain reset when the
+ controller is already in reset (i.e. when [RST_DONE] = 0). */
+ uint64_t rst_done : 1; /**< [ 8: 8](RO/H) Reset done. Indicates the controller reset status. [RST_DONE] is always 0
+ (i.e. the controller is held in reset) when
+ * RST_SOFT_PRST()[SOFT_PRST] = 1, or
+ * [RST_RCV] = 1 and PERST*_L pin is asserted. */
+ uint64_t prst_link : 1; /**< [ 9: 9](R/W) PEM reset on link down.
+ 0 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and (provided properly configured) the link should come back up
+ automatically.
+ 1 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and set RST_SOFT_PRST()[SOFT_PRST]. This will hold the link in reset
+ until software clears RST_SOFT_PRST()[SOFT_PRST].
+
+ A warm/soft reset does not change this field. On cold reset, this field is
+ initialized to 0. */
+ uint64_t pf_flr_chip : 1; /**< [ 10: 10](R/W) PF FLR internal reset enable.
+ 0 = PF FLR events will not cause a reset.
+ 1 = A PF FLR event received by the PCIe logic causes the internal reset
+ specified by [RESET_TYPE].
+
+ On cold reset, this field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+ _ 1 when RST_CTL()[HOST_MODE] = 0. */
+ uint64_t reserved_11_12 : 2;
+ uint64_t reset_type : 1; /**< [ 13: 13](R/W) Type of reset generated internally by PCI MAC PF FLR, link down/hot reset and
+ PERST events. See [PF_FLR_CHIP], [RST_LINK] and [RST_CHIP].
+
+ 0 = Chip domain reset.
+ 1 = Core domain reset.
+
+ On cold reset, this field is initialized as follows:
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+ _ 1 when RST_CTL()[HOST_MODE] = 1. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_ctlx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t prst_link : 1; /**< [ 9: 9](R/W) PEM reset on link down.
+ 0 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and (provided properly configured) the link should come back up
+ automatically.
+ 1 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and set RST_SOFT_PRST()[SOFT_PRST]. This will hold the link in reset
+ until software clears RST_SOFT_PRST()[SOFT_PRST].
+
+ A warm/soft reset does not change this field. On cold reset, this field is initialized to
+ 0. */
+ uint64_t rst_done : 1; /**< [ 8: 8](RO/H) Reset done. Indicates the controller reset status. [RST_DONE] is always 0
+ (i.e. the controller is held in reset) when
+ * RST_SOFT_PRST()[SOFT_PRST] = 1, or
+ * [RST_RCV] = 1 and PERST*_L pin is asserted. */
+ uint64_t rst_link : 1; /**< [ 7: 7](R/W) Reset on link down. When set, a corresponding controller link-down reset or hot
+ reset causes a warm chip reset.
+
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ Note that a link-down or hot-reset event can never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a warm reset when [RST_DONE] = 0). */
+ uint64_t host_mode : 1; /**< [ 6: 6](RO) For all controllers this field is set as host. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t rst_drv : 1; /**< [ 3: 3](R/W) Controls whether PERST*_L is driven. A warm/soft reset does not change this field. On cold
+ reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 1.
+
+ When set, CNXXXX drives the corresponding PERST*_L pin. Otherwise, CNXXXX does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t rst_rcv : 1; /**< [ 2: 2](R/W) Reset received. Controls whether PERST*_L is received. A warm/soft reset does
+ not change this field. On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ When [RST_RCV] = 1, the PERST*_L value is received and can be used to reset the
+ controller and (optionally, based on [RST_CHIP]) warm reset the chip.
+
+ When [RST_RCV] = 1 (and [RST_CHIP] = 0), RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert software whenever the external reset pin initiates
+ a controller reset sequence.)
+
+ [RST_VAL] gives the PERST*_L pin value when [RST_RCV] = 1.
+
+ When [RST_RCV] = 0, the PERST*_L pin value is ignored. */
+ uint64_t rst_chip : 1; /**< [ 1: 1](R/W) Controls whether PERST*_L causes a chip warm reset like CHIP_RESET_L. A warm/soft reset
+ does not change this field. On cold reset, this field is initialized to 0.
+
+ When [RST_RCV] = 0, [RST_CHIP] is ignored.
+
+ When [RST_RCV] = 1, [RST_CHIP] = 1, and PERST*_L asserts, a chip warm reset is generated. */
+ uint64_t rst_val : 1; /**< [ 0: 0](RO/H) Read-only access to PERST*_L. Unpredictable when [RST_RCV] = 0.
+
+ Reads as 1 when [RST_RCV] = 1 and the PERST*_L pin is asserted.
+
+ Reads as 0 when [RST_RCV] = 1 and the PERST*_L pin is not asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_val : 1; /**< [ 0: 0](RO/H) Read-only access to PERST*_L. Unpredictable when [RST_RCV] = 0.
+
+ Reads as 1 when [RST_RCV] = 1 and the PERST*_L pin is asserted.
+
+ Reads as 0 when [RST_RCV] = 1 and the PERST*_L pin is not asserted. */
+ uint64_t rst_chip : 1; /**< [ 1: 1](R/W) Controls whether PERST*_L causes a chip warm reset like CHIP_RESET_L. A warm/soft reset
+ does not change this field. On cold reset, this field is initialized to 0.
+
+ When [RST_RCV] = 0, [RST_CHIP] is ignored.
+
+ When [RST_RCV] = 1, [RST_CHIP] = 1, and PERST*_L asserts, a chip warm reset is generated. */
+ uint64_t rst_rcv : 1; /**< [ 2: 2](R/W) Reset received. Controls whether PERST*_L is received. A warm/soft reset does
+ not change this field. On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ When [RST_RCV] = 1, the PERST*_L value is received and can be used to reset the
+ controller and (optionally, based on [RST_CHIP]) warm reset the chip.
+
+ When [RST_RCV] = 1 (and [RST_CHIP] = 0), RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert software whenever the external reset pin initiates
+ a controller reset sequence.)
+
+ [RST_VAL] gives the PERST*_L pin value when [RST_RCV] = 1.
+
+ When [RST_RCV] = 0, the PERST*_L pin value is ignored. */
+ uint64_t rst_drv : 1; /**< [ 3: 3](R/W) Controls whether PERST*_L is driven. A warm/soft reset does not change this field. On cold
+ reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 1.
+
+ When set, CNXXXX drives the corresponding PERST*_L pin. Otherwise, CNXXXX does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t host_mode : 1; /**< [ 6: 6](RO) For all controllers this field is set as host. */
+ uint64_t rst_link : 1; /**< [ 7: 7](R/W) Reset on link down. When set, a corresponding controller link-down reset or hot
+ reset causes a warm chip reset.
+
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ Note that a link-down or hot-reset event can never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a warm reset when [RST_DONE] = 0). */
+ uint64_t rst_done : 1; /**< [ 8: 8](RO/H) Reset done. Indicates the controller reset status. [RST_DONE] is always 0
+ (i.e. the controller is held in reset) when
+ * RST_SOFT_PRST()[SOFT_PRST] = 1, or
+ * [RST_RCV] = 1 and PERST*_L pin is asserted. */
+ uint64_t prst_link : 1; /**< [ 9: 9](R/W) PEM reset on link down.
+ 0 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and (provided properly configured) the link should come back up
+ automatically.
+ 1 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and set RST_SOFT_PRST()[SOFT_PRST]. This will hold the link in reset
+ until software clears RST_SOFT_PRST()[SOFT_PRST].
+
+ A warm/soft reset does not change this field. On cold reset, this field is initialized to
+ 0. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_rst_ctlx_cn81xx cn88xx; */
+ struct bdk_rst_ctlx_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t pf_flr_chip : 1; /**< [ 10: 10](R/W) Controls whether corresponding controller PF Function Level Reset causes a chip warm
+ reset like CHIP_RESET_L. A warm/soft reset does not change this field.
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0. */
+ uint64_t prst_link : 1; /**< [ 9: 9](R/W) PEM reset on link down.
+ 0 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and (provided properly configured) the link should come back up
+ automatically.
+ 1 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and set RST_SOFT_PRST()[SOFT_PRST]. This will hold the link in reset
+ until software clears RST_SOFT_PRST()[SOFT_PRST].
+
+ A warm/soft reset does not change this field. On cold reset, this field is initialized to
+ 0. */
+ uint64_t rst_done : 1; /**< [ 8: 8](RO/H) Reset done. Indicates the controller reset status. [RST_DONE] is always 0
+ (i.e. the controller is held in reset) when
+ * RST_SOFT_PRST()[SOFT_PRST] = 1, or
+ * [RST_RCV] = 1 and PERST*_L pin is asserted. */
+ uint64_t rst_link : 1; /**< [ 7: 7](R/W) Reset on link down. When set, a corresponding controller link-down reset or hot
+ reset causes a warm chip reset.
+
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ Note that a link-down or hot-reset event can never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a warm reset when [RST_DONE] = 0). */
+ uint64_t host_mode : 1; /**< [ 6: 6](RO) Read-only access to the corresponding PEM()_CFG[HOSTMD] field indicating PEMn is root
+ complex (host). For controllers 0 and 2 the initial value is determined by straps. For
+ controllers 1 and 3 this field is initially set as host. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t rst_drv : 1; /**< [ 3: 3](R/W) Controls whether PERST*_L is driven. A warm/soft reset does not change this field. On cold
+ reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 1.
+
+ When set, CNXXXX drives the corresponding PERST*_L pin. Otherwise, CNXXXX does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t rst_rcv : 1; /**< [ 2: 2](R/W) Reset received. Controls whether PERST*_L is received. A warm/soft reset does
+ not change this field. On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ When [RST_RCV] = 1, the PERST*_L value is received and can be used to reset the
+ controller and (optionally, based on [RST_CHIP]) warm reset the chip.
+
+ When [RST_RCV] = 1 (and [RST_CHIP] = 0), RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert software whenever the external reset pin initiates
+ a controller reset sequence.)
+
+ [RST_VAL] gives the PERST*_L pin value when [RST_RCV] = 1.
+
+ When [RST_RCV] = 0, the PERST*_L pin value is ignored. */
+ uint64_t rst_chip : 1; /**< [ 1: 1](R/W) Controls whether PERST*_L causes a chip warm reset like CHIP_RESET_L. A warm/soft reset
+ does not change this field. On cold reset, this field is initialized to 0.
+
+ When [RST_RCV] = 0, [RST_CHIP] is ignored.
+
+ When [RST_RCV] = 1, [RST_CHIP] = 1, and PERST*_L asserts, a chip warm reset is generated. */
+ uint64_t rst_val : 1; /**< [ 0: 0](RO/H) Read-only access to PERST*_L. Unpredictable when [RST_RCV] = 0.
+
+ Reads as 1 when [RST_RCV] = 1 and the PERST*_L pin is asserted.
+
+ Reads as 0 when [RST_RCV] = 1 and the PERST*_L pin is not asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_val : 1; /**< [ 0: 0](RO/H) Read-only access to PERST*_L. Unpredictable when [RST_RCV] = 0.
+
+ Reads as 1 when [RST_RCV] = 1 and the PERST*_L pin is asserted.
+
+ Reads as 0 when [RST_RCV] = 1 and the PERST*_L pin is not asserted. */
+ uint64_t rst_chip : 1; /**< [ 1: 1](R/W) Controls whether PERST*_L causes a chip warm reset like CHIP_RESET_L. A warm/soft reset
+ does not change this field. On cold reset, this field is initialized to 0.
+
+ When [RST_RCV] = 0, [RST_CHIP] is ignored.
+
+ When [RST_RCV] = 1, [RST_CHIP] = 1, and PERST*_L asserts, a chip warm reset is generated. */
+ uint64_t rst_rcv : 1; /**< [ 2: 2](R/W) Reset received. Controls whether PERST*_L is received. A warm/soft reset does
+ not change this field. On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ When [RST_RCV] = 1, the PERST*_L value is received and can be used to reset the
+ controller and (optionally, based on [RST_CHIP]) warm reset the chip.
+
+ When [RST_RCV] = 1 (and [RST_CHIP] = 0), RST_INT[PERST*] gets set when the PERST*_L
+ pin asserts. (This interrupt can alert software whenever the external reset pin initiates
+ a controller reset sequence.)
+
+ [RST_VAL] gives the PERST*_L pin value when [RST_RCV] = 1.
+
+ When [RST_RCV] = 0, the PERST*_L pin value is ignored. */
+ uint64_t rst_drv : 1; /**< [ 3: 3](R/W) Controls whether PERST*_L is driven. A warm/soft reset does not change this field. On cold
+ reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 0.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 1.
+
+ When set, CNXXXX drives the corresponding PERST*_L pin. Otherwise, CNXXXX does not drive
+ the corresponding PERST*_L pin. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t host_mode : 1; /**< [ 6: 6](RO) Read-only access to the corresponding PEM()_CFG[HOSTMD] field indicating PEMn is root
+ complex (host). For controllers 0 and 2 the initial value is determined by straps. For
+ controllers 1 and 3 this field is initially set as host. */
+ uint64_t rst_link : 1; /**< [ 7: 7](R/W) Reset on link down. When set, a corresponding controller link-down reset or hot
+ reset causes a warm chip reset.
+
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0.
+
+ Note that a link-down or hot-reset event can never cause a warm chip reset when the
+ controller is in reset (i.e. can never cause a warm reset when [RST_DONE] = 0). */
+ uint64_t rst_done : 1; /**< [ 8: 8](RO/H) Reset done. Indicates the controller reset status. [RST_DONE] is always 0
+ (i.e. the controller is held in reset) when
+ * RST_SOFT_PRST()[SOFT_PRST] = 1, or
+ * [RST_RCV] = 1 and PERST*_L pin is asserted. */
+ uint64_t prst_link : 1; /**< [ 9: 9](R/W) PEM reset on link down.
+ 0 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and (provided properly configured) the link should come back up
+ automatically.
+ 1 = Link-down or hot-reset will set RST_INT[RST_LINK] for the corresponding
+ controller, and set RST_SOFT_PRST()[SOFT_PRST]. This will hold the link in reset
+ until software clears RST_SOFT_PRST()[SOFT_PRST].
+
+ A warm/soft reset does not change this field. On cold reset, this field is initialized to
+ 0. */
+ uint64_t pf_flr_chip : 1; /**< [ 10: 10](R/W) Controls whether corresponding controller PF Function Level Reset causes a chip warm
+ reset like CHIP_RESET_L. A warm/soft reset does not change this field.
+ On cold reset, this field is initialized as follows:
+
+ _ 0 when RST_CTL()[HOST_MODE] = 1.
+
+ _ 1 when RST_CTL()[HOST_MODE] = 0. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_ctlx bdk_rst_ctlx_t;
+
+static inline uint64_t BDK_RST_CTLX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_CTLX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e006001640ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e006001640ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e006001640ll + 8ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x87e006001640ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RST_CTLX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_CTLX(a) bdk_rst_ctlx_t
+#define bustype_BDK_RST_CTLX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_CTLX(a) "RST_CTLX"
+#define device_bar_BDK_RST_CTLX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_CTLX(a) (a)
+#define arguments_BDK_RST_CTLX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rst_dbg_reset
+ *
+ * RST Debug Logic Reset Register
+ * This register contains the reset control for each core's debug logic.
+ * Debug reset is not supported in pass 2.
+ */
+union bdk_rst_dbg_reset
+{
+ uint64_t u;
+ struct bdk_rst_dbg_reset_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t rst : 48; /**< [ 47: 0](R/W) Debug logic reset for each core:
+ 0 = Debug logic operates normally.
+ 1 = Holds the debug logic in its reset state.
+
+ The register is reset to 0 only during cold reset, the value is unaffected by
+ warm and soft reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst : 48; /**< [ 47: 0](R/W) Debug logic reset for each core:
+ 0 = Debug logic operates normally.
+ 1 = Holds the debug logic in its reset state.
+
+ The register is reset to 0 only during cold reset, the value is unaffected by
+ warm and soft reset. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_dbg_reset_s cn88xxp1; */
+ struct bdk_rst_dbg_reset_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rst : 4; /**< [ 3: 0](R/W) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst : 4; /**< [ 3: 0](R/W) Reserved. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rst_dbg_reset_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t rst : 24; /**< [ 23: 0](R/W) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst : 24; /**< [ 23: 0](R/W) Reserved. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_rst_dbg_reset_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t rst : 48; /**< [ 47: 0](R/W) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst : 48; /**< [ 47: 0](R/W) Reserved. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_rst_dbg_reset bdk_rst_dbg_reset_t;
+
+#define BDK_RST_DBG_RESET BDK_RST_DBG_RESET_FUNC()
+static inline uint64_t BDK_RST_DBG_RESET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_DBG_RESET_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e006001750ll;
+ __bdk_csr_fatal("RST_DBG_RESET", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_DBG_RESET bdk_rst_dbg_reset_t
+#define bustype_BDK_RST_DBG_RESET BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_DBG_RESET "RST_DBG_RESET"
+#define device_bar_BDK_RST_DBG_RESET 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_DBG_RESET 0
+#define arguments_BDK_RST_DBG_RESET -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_debug
+ *
+ * RST Debug Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_debug
+{
+ uint64_t u;
+ struct bdk_rst_debug_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t dll_csr_wakeup : 1; /**< [ 3: 3](R/W) Forces DLL setting to unlock.
+ Setting this field will force all DLLs to track clock changes.
+ For diagnostic use only.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t clkena_on : 1; /**< [ 2: 2](R/W) Force global clock enable on.
+ Setting this field will force all clocks on while they are in reset and
+ will dramatically increase power consumption.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t clk_cng : 1; /**< [ 1: 1](R/W) Force clock-changing indicator on.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset.
+
+ Internal:
+ Forces store-n-forward across clock domains. */
+ uint64_t clk_on : 1; /**< [ 0: 0](R/W) Force conditional clock used for interrupt logic to always be on. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t clk_on : 1; /**< [ 0: 0](R/W) Force conditional clock used for interrupt logic to always be on. For diagnostic use only. */
+ uint64_t clk_cng : 1; /**< [ 1: 1](R/W) Force clock-changing indicator on.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset.
+
+ Internal:
+ Forces store-n-forward across clock domains. */
+ uint64_t clkena_on : 1; /**< [ 2: 2](R/W) Force global clock enable on.
+ Setting this field will force all clocks on while they are in reset and
+ will dramatically increase power consumption.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t dll_csr_wakeup : 1; /**< [ 3: 3](R/W) Forces DLL setting to unlock.
+ Setting this field will force all DLLs to track clock changes.
+ For diagnostic use only.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_debug_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t clk_on : 1; /**< [ 0: 0](R/W) Force conditional clock used for interrupt logic to always be on. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t clk_on : 1; /**< [ 0: 0](R/W) Force conditional clock used for interrupt logic to always be on. For diagnostic use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_rst_debug_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t dll_csr_wakeup : 1; /**< [ 3: 3](R/W) Forces DLL setting to unlock.
+ Setting this field will force all DLLs to track clock changes.
+ For diagnostic use only.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t clkena_on : 1; /**< [ 2: 2](R/W) Force global clock enable on.
+ Setting this field will force all clocks on while they are in reset and
+ will dramatically increase power consumption.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t clk_cng : 1; /**< [ 1: 1](R/W) Force clock-changing indicator on.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset.
+
+ Internal:
+ Forces store-n-forward across clock domains. */
+ uint64_t clk_on : 1; /**< [ 0: 0](R/W) Force conditional clock used for interrupt logic to always be on.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t clk_on : 1; /**< [ 0: 0](R/W) Force conditional clock used for interrupt logic to always be on.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t clk_cng : 1; /**< [ 1: 1](R/W) Force clock-changing indicator on.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset.
+
+ Internal:
+ Forces store-n-forward across clock domains. */
+ uint64_t clkena_on : 1; /**< [ 2: 2](R/W) Force global clock enable on.
+ Setting this field will force all clocks on while they are in reset and
+ will dramatically increase power consumption.
+ For diagnostic use only.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t dll_csr_wakeup : 1; /**< [ 3: 3](R/W) Forces DLL setting to unlock.
+ Setting this field will force all DLLs to track clock changes.
+ For diagnostic use only.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_debug bdk_rst_debug_t;
+
+#define BDK_RST_DEBUG BDK_RST_DEBUG_FUNC()
+static inline uint64_t BDK_RST_DEBUG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_DEBUG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0060017b0ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0060017b0ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x87e0060017b0ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0060017b0ll;
+ __bdk_csr_fatal("RST_DEBUG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_DEBUG bdk_rst_debug_t
+#define bustype_BDK_RST_DEBUG BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_DEBUG "RST_DEBUG"
+#define device_bar_BDK_RST_DEBUG 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_DEBUG 0
+#define arguments_BDK_RST_DEBUG -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_delay
+ *
+ * RST Delay Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_delay
+{
+ uint64_t u;
+ struct bdk_rst_delay_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t warm_rst_dly : 16; /**< [ 31: 16](R/W) Warm reset delay. A warm reset immediately causes an early warm-reset notification, but
+ the assertion of warm reset is delayed this many coprocessor-clock cycles. A warm/soft
+ reset does not change this field.
+ This must be at least 0x200 coprocessor-clock cycles. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t warm_rst_dly : 16; /**< [ 31: 16](R/W) Warm reset delay. A warm reset immediately causes an early warm-reset notification, but
+ the assertion of warm reset is delayed this many coprocessor-clock cycles. A warm/soft
+ reset does not change this field.
+ This must be at least 0x200 coprocessor-clock cycles. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_delay_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t warm_rst_dly : 16; /**< [ 31: 16](R/W) Warm reset delay. A warm reset immediately causes an early warm-reset notification, but
+ the assertion of warm reset is delayed this many coprocessor-clock cycles. A warm/soft
+ reset does not change this field.
+ This must be at least 0x200 coprocessor-clock cycles. */
+ uint64_t soft_rst_dly : 16; /**< [ 15: 0](R/W) Soft reset delay. A soft reset immediately causes an early soft-reset notification, but
+ the assertion of soft reset is delayed this many coprocessor-clock cycles. A warm/soft
+ reset does not change this field.
+ This must be at least 0x200 coprocessor-clock cycles. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst_dly : 16; /**< [ 15: 0](R/W) Soft reset delay. A soft reset immediately causes an early soft-reset notification, but
+ the assertion of soft reset is delayed this many coprocessor-clock cycles. A warm/soft
+ reset does not change this field.
+ This must be at least 0x200 coprocessor-clock cycles. */
+ uint64_t warm_rst_dly : 16; /**< [ 31: 16](R/W) Warm reset delay. A warm reset immediately causes an early warm-reset notification, but
+ the assertion of warm reset is delayed this many coprocessor-clock cycles. A warm/soft
+ reset does not change this field.
+ This must be at least 0x200 coprocessor-clock cycles. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_rst_delay_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rst_dly : 16; /**< [ 15: 0](R/W) Reset delay. Chip and core domain resets immediately causes an early reset
+ notification to the dDR interface. The assertion of the domain reset is delayed
+ by this many 100 MHz PLL reference clocks. The minimum value is 1 uS.
+ Typical value is 4 uS once DDR has been initialized.
+ This field is reinitialized with a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_dly : 16; /**< [ 15: 0](R/W) Reset delay. Chip and core domain resets immediately causes an early reset
+ notification to the dDR interface. The assertion of the domain reset is delayed
+ by this many 100 MHz PLL reference clocks. The minimum value is 1 uS.
+ Typical value is 4 uS once DDR has been initialized.
+ This field is reinitialized with a cold domain reset. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_delay bdk_rst_delay_t;
+
+#define BDK_RST_DELAY BDK_RST_DELAY_FUNC()
+static inline uint64_t BDK_RST_DELAY_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_DELAY_FUNC(void)
+{
+ return 0x87e006001608ll;
+}
+
+#define typedef_BDK_RST_DELAY bdk_rst_delay_t
+#define bustype_BDK_RST_DELAY BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_DELAY "RST_DELAY"
+#define device_bar_BDK_RST_DELAY 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_DELAY 0
+#define arguments_BDK_RST_DELAY -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_dev_map#
+ *
+ * RST Device Map Register
+ * This register configures the reset domain of devices. Index {a} is enumerated by RST_DEV_E.
+ * Writes to these registers should only occur when all the bits ofRST_BIST_ACTIVE are clear.
+ * See RST_BIST_ACTIVE for details.
+ * Only one RST_DEV_MAP should be written at a time.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_dev_mapx
+{
+ uint64_t u;
+ struct bdk_rst_dev_mapx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t dmn : 3; /**< [ 2: 0](R/W) Map of programmable devices to reset domains. When the specified domain reset
+ occurs the corresponding device will reset. Devices are numbered according to
+ RST_DEV_E.
+
+ GSERs should be mapped to the same domain as the PEM, CGX or NCSI device they are
+ associated with.
+
+ If any PCIEx_EP_STRAP is set then all RST_DEV_MAP(GSERx) are mapped to chip reset.
+
+ See RST_DOMAIN_E for field encodings.
+
+ This field is always reinitialized on a cold domain reset.
+
+ Internal:
+ RST_DEV_MAP()[DMN] resets to core domain for everything except AVS, EMM, MPI\<1:0\>
+ and NCSI which reset to SCP domain and GSER which are set to chip in EP mode.
+
+ This is based on cold reset so software could e.g. choose to put a PEM GSER into
+ endpoint based on knowledge outside the straps (that RST uses to reset this
+ table). */
+#else /* Word 0 - Little Endian */
+ uint64_t dmn : 3; /**< [ 2: 0](R/W) Map of programmable devices to reset domains. When the specified domain reset
+ occurs the corresponding device will reset. Devices are numbered according to
+ RST_DEV_E.
+
+ GSERs should be mapped to the same domain as the PEM, CGX or NCSI device they are
+ associated with.
+
+ If any PCIEx_EP_STRAP is set then all RST_DEV_MAP(GSERx) are mapped to chip reset.
+
+ See RST_DOMAIN_E for field encodings.
+
+ This field is always reinitialized on a cold domain reset.
+
+ Internal:
+ RST_DEV_MAP()[DMN] resets to core domain for everything except AVS, EMM, MPI\<1:0\>
+ and NCSI which reset to SCP domain and GSER which are set to chip in EP mode.
+
+ This is based on cold reset so software could e.g. choose to put a PEM GSER into
+ endpoint based on knowledge outside the straps (that RST uses to reset this
+ table). */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_dev_mapx_s cn; */
+};
+typedef union bdk_rst_dev_mapx bdk_rst_dev_mapx_t;
+
+static inline uint64_t BDK_RST_DEV_MAPX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_DEV_MAPX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=47))
+ return 0x87e00a001a00ll + 8ll * ((a) & 0x3f);
+ __bdk_csr_fatal("RST_DEV_MAPX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_DEV_MAPX(a) bdk_rst_dev_mapx_t
+#define bustype_BDK_RST_DEV_MAPX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_DEV_MAPX(a) "RST_DEV_MAPX"
+#define device_bar_BDK_RST_DEV_MAPX(a) 0x2 /* PF_BAR2 */
+#define busnum_BDK_RST_DEV_MAPX(a) (a)
+#define arguments_BDK_RST_DEV_MAPX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rst_eco
+ *
+ * INTERNAL: RST ECO Register
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_eco
+{
+ uint64_t u;
+ struct bdk_rst_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) ECO flops. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) ECO flops. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_eco_s cn8; */
+ struct bdk_rst_eco_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) ECO flops.
+ This field is always reinitialized on a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) ECO flops.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_eco bdk_rst_eco_t;
+
+#define BDK_RST_ECO BDK_RST_ECO_FUNC()
+static inline uint64_t BDK_RST_ECO_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_ECO_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0060017b8ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0060017b8ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x87e0060017b8ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0060017b8ll;
+ __bdk_csr_fatal("RST_ECO", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_ECO bdk_rst_eco_t
+#define bustype_BDK_RST_ECO BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_ECO "RST_ECO"
+#define device_bar_BDK_RST_ECO 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_ECO 0
+#define arguments_BDK_RST_ECO -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_int
+ *
+ * RST Interrupt Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_int
+{
+ uint64_t u;
+ struct bdk_rst_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1C/H) SCP domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1C/H) MCP domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1C/H) Core domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t reserved_6_31 : 26;
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller. */
+ uint64_t reserved_6_31 : 26;
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1C/H) Core domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1C/H) MCP domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1C/H) SCP domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_int_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1C/H) SCP domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1C/H) MCP domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1C/H) Core domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t perst : 4; /**< [ 19: 16](R/W1C/H) PERST*_L asserted while RST_CTL()[RST_RCV] = 1 and RST_CTL()[RST_CHIP] = 0. One bit
+ corresponds to each controller.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller.
+ This field is reinitialized with a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t perst : 4; /**< [ 19: 16](R/W1C/H) PERST*_L asserted while RST_CTL()[RST_RCV] = 1 and RST_CTL()[RST_CHIP] = 0. One bit
+ corresponds to each controller.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1C/H) Core domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1C/H) MCP domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1C/H) SCP domain entered reset.
+ This field is reinitialized with a chip domain reset. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_int_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t perst : 3; /**< [ 10: 8](R/W1C/H) PERST*_L asserted while RST_CTL()[RST_RCV] = 1 and RST_CTL()[RST_CHIP] = 0. One bit
+ corresponds to each controller. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t perst : 3; /**< [ 10: 8](R/W1C/H) PERST*_L asserted while RST_CTL()[RST_RCV] = 1 and RST_CTL()[RST_CHIP] = 0. One bit
+ corresponds to each controller. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rst_int_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t perst : 6; /**< [ 13: 8](R/W1C/H) PERST*_L asserted while RST_CTL()[RST_RCV] = 1 and RST_CTL()[RST_CHIP] = 0. One bit
+ corresponds to each controller. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t perst : 6; /**< [ 13: 8](R/W1C/H) PERST*_L asserted while RST_CTL()[RST_RCV] = 1 and RST_CTL()[RST_CHIP] = 0. One bit
+ corresponds to each controller. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_rst_int_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t perst : 4; /**< [ 11: 8](R/W1C/H) PERST*_L asserted while RST_CTL()[RST_RCV] = 1 and RST_CTL()[RST_CHIP] = 0. One bit
+ corresponds to each controller. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1C/H) A controller link-down/hot-reset occurred while RST_CTL()[RST_LINK] = 0. Software must
+ assert then deassert RST_SOFT_PRST()[SOFT_PRST]. One bit corresponds to each controller. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t perst : 4; /**< [ 11: 8](R/W1C/H) PERST*_L asserted while RST_CTL()[RST_RCV] = 1 and RST_CTL()[RST_CHIP] = 0. One bit
+ corresponds to each controller. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_int bdk_rst_int_t;
+
+#define BDK_RST_INT BDK_RST_INT_FUNC()
+static inline uint64_t BDK_RST_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_INT_FUNC(void)
+{
+ return 0x87e006001628ll;
+}
+
+#define typedef_BDK_RST_INT bdk_rst_int_t
+#define bustype_BDK_RST_INT BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_INT "RST_INT"
+#define device_bar_BDK_RST_INT 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_INT 0
+#define arguments_BDK_RST_INT -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_int_ena_w1c
+ *
+ * RST Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_rst_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_rst_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for RST_INT[SCP_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for RST_INT[MCP_RESET]. */
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for RST_INT[CORE_RESET]. */
+ uint64_t reserved_6_31 : 26;
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_6_31 : 26;
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for RST_INT[CORE_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for RST_INT[MCP_RESET]. */
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for RST_INT[SCP_RESET]. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_int_ena_w1c_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for RST_INT[SCP_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for RST_INT[MCP_RESET]. */
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for RST_INT[CORE_RESET]. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t perst : 4; /**< [ 19: 16](R/W1C/H) Reads or clears enable for RST_INT[PERST]. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t perst : 4; /**< [ 19: 16](R/W1C/H) Reads or clears enable for RST_INT[PERST]. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for RST_INT[CORE_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for RST_INT[MCP_RESET]. */
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for RST_INT[SCP_RESET]. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t perst : 3; /**< [ 10: 8](R/W1C/H) Reads or clears enable for RST_INT[PERST]. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t perst : 3; /**< [ 10: 8](R/W1C/H) Reads or clears enable for RST_INT[PERST]. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rst_int_ena_w1c_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t perst : 6; /**< [ 13: 8](R/W1C/H) Reads or clears enable for RST_INT[PERST]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t perst : 6; /**< [ 13: 8](R/W1C/H) Reads or clears enable for RST_INT[PERST]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_rst_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t perst : 4; /**< [ 11: 8](R/W1C/H) Reads or clears enable for RST_INT[PERST]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1C/H) Reads or clears enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t perst : 4; /**< [ 11: 8](R/W1C/H) Reads or clears enable for RST_INT[PERST]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_int_ena_w1c bdk_rst_int_ena_w1c_t;
+
+#define BDK_RST_INT_ENA_W1C BDK_RST_INT_ENA_W1C_FUNC()
+static inline uint64_t BDK_RST_INT_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_INT_ENA_W1C_FUNC(void)
+{
+ return 0x87e0060016a8ll;
+}
+
+#define typedef_BDK_RST_INT_ENA_W1C bdk_rst_int_ena_w1c_t
+#define bustype_BDK_RST_INT_ENA_W1C BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_INT_ENA_W1C "RST_INT_ENA_W1C"
+#define device_bar_BDK_RST_INT_ENA_W1C 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_INT_ENA_W1C 0
+#define arguments_BDK_RST_INT_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_int_ena_w1s
+ *
+ * RST Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_rst_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_rst_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for RST_INT[SCP_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for RST_INT[MCP_RESET]. */
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for RST_INT[CORE_RESET]. */
+ uint64_t reserved_6_31 : 26;
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_6_31 : 26;
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for RST_INT[CORE_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for RST_INT[MCP_RESET]. */
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for RST_INT[SCP_RESET]. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_int_ena_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for RST_INT[SCP_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for RST_INT[MCP_RESET]. */
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for RST_INT[CORE_RESET]. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t perst : 4; /**< [ 19: 16](R/W1S/H) Reads or sets enable for RST_INT[PERST]. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t perst : 4; /**< [ 19: 16](R/W1S/H) Reads or sets enable for RST_INT[PERST]. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for RST_INT[CORE_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for RST_INT[MCP_RESET]. */
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for RST_INT[SCP_RESET]. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t perst : 3; /**< [ 10: 8](R/W1S/H) Reads or sets enable for RST_INT[PERST]. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t perst : 3; /**< [ 10: 8](R/W1S/H) Reads or sets enable for RST_INT[PERST]. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rst_int_ena_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t perst : 6; /**< [ 13: 8](R/W1S/H) Reads or sets enable for RST_INT[PERST]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t perst : 6; /**< [ 13: 8](R/W1S/H) Reads or sets enable for RST_INT[PERST]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_rst_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t perst : 4; /**< [ 11: 8](R/W1S/H) Reads or sets enable for RST_INT[PERST]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1S/H) Reads or sets enable for RST_INT[RST_LINK]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t perst : 4; /**< [ 11: 8](R/W1S/H) Reads or sets enable for RST_INT[PERST]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_int_ena_w1s bdk_rst_int_ena_w1s_t;
+
+#define BDK_RST_INT_ENA_W1S BDK_RST_INT_ENA_W1S_FUNC()
+static inline uint64_t BDK_RST_INT_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_INT_ENA_W1S_FUNC(void)
+{
+ return 0x87e0060016a0ll;
+}
+
+#define typedef_BDK_RST_INT_ENA_W1S bdk_rst_int_ena_w1s_t
+#define bustype_BDK_RST_INT_ENA_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_INT_ENA_W1S "RST_INT_ENA_W1S"
+#define device_bar_BDK_RST_INT_ENA_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_INT_ENA_W1S 0
+#define arguments_BDK_RST_INT_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_int_w1s
+ *
+ * RST Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_rst_int_w1s
+{
+ uint64_t u;
+ struct bdk_rst_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1S/H) Reads or sets RST_INT[SCP_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1S/H) Reads or sets RST_INT[MCP_RESET]. */
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1S/H) Reads or sets RST_INT[CORE_RESET]. */
+ uint64_t reserved_6_31 : 26;
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+ uint64_t reserved_6_31 : 26;
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1S/H) Reads or sets RST_INT[CORE_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1S/H) Reads or sets RST_INT[MCP_RESET]. */
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1S/H) Reads or sets RST_INT[SCP_RESET]. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_int_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1S/H) Reads or sets RST_INT[SCP_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1S/H) Reads or sets RST_INT[MCP_RESET]. */
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1S/H) Reads or sets RST_INT[CORE_RESET]. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t perst : 4; /**< [ 19: 16](R/W1S/H) Reads or sets RST_INT[PERST]. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+ uint64_t reserved_4_15 : 12;
+ uint64_t perst : 4; /**< [ 19: 16](R/W1S/H) Reads or sets RST_INT[PERST]. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t core_reset : 1; /**< [ 32: 32](R/W1S/H) Reads or sets RST_INT[CORE_RESET]. */
+ uint64_t mcp_reset : 1; /**< [ 33: 33](R/W1S/H) Reads or sets RST_INT[MCP_RESET]. */
+ uint64_t scp_reset : 1; /**< [ 34: 34](R/W1S/H) Reads or sets RST_INT[SCP_RESET]. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_int_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t perst : 3; /**< [ 10: 8](R/W1S/H) Reads or sets RST_INT[PERST]. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t perst : 3; /**< [ 10: 8](R/W1S/H) Reads or sets RST_INT[PERST]. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rst_int_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t perst : 6; /**< [ 13: 8](R/W1S/H) Reads or sets RST_INT[PERST]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 6; /**< [ 5: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t perst : 6; /**< [ 13: 8](R/W1S/H) Reads or sets RST_INT[PERST]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_rst_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t perst : 4; /**< [ 11: 8](R/W1S/H) Reads or sets RST_INT[PERST]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 4; /**< [ 3: 0](R/W1S/H) Reads or sets RST_INT[RST_LINK]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t perst : 4; /**< [ 11: 8](R/W1S/H) Reads or sets RST_INT[PERST]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_int_w1s bdk_rst_int_w1s_t;
+
+#define BDK_RST_INT_W1S BDK_RST_INT_W1S_FUNC()
+static inline uint64_t BDK_RST_INT_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_INT_W1S_FUNC(void)
+{
+ return 0x87e006001630ll;
+}
+
+#define typedef_BDK_RST_INT_W1S bdk_rst_int_w1s_t
+#define bustype_BDK_RST_INT_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_INT_W1S "RST_INT_W1S"
+#define device_bar_BDK_RST_INT_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_INT_W1S 0
+#define arguments_BDK_RST_INT_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_lboot
+ *
+ * RST Last Boot Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_lboot
+{
+ uint64_t u;
+ struct bdk_rst_lboot_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t lboot : 48; /**< [ 47: 0](R/W1C/H) Bit vector of last reset cause(es). The value reset with a
+ cold domain reset.
+ Bit numbers are enumerated by RST_SOURCE_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t lboot : 48; /**< [ 47: 0](R/W1C/H) Bit vector of last reset cause(es). The value reset with a
+ cold domain reset.
+ Bit numbers are enumerated by RST_SOURCE_E. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_lboot_s cn; */
+};
+typedef union bdk_rst_lboot bdk_rst_lboot_t;
+
+#define BDK_RST_LBOOT BDK_RST_LBOOT_FUNC()
+static inline uint64_t BDK_RST_LBOOT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_LBOOT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001620ll;
+ __bdk_csr_fatal("RST_LBOOT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_LBOOT bdk_rst_lboot_t
+#define bustype_BDK_RST_LBOOT BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_LBOOT "RST_LBOOT"
+#define device_bar_BDK_RST_LBOOT 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_LBOOT 0
+#define arguments_BDK_RST_LBOOT -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_mcp_domain_w1c
+ *
+ * RST MCP Domain Soft Reset Clear Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_mcp_domain_w1c
+{
+ uint64_t u;
+ struct bdk_rst_mcp_domain_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1C/H) Clear soft reset of the MCP processor and associated logic.
+ When set to one, the soft reset of the mcp is removed.
+ Reads of this register show the soft reset state. Not the actual mcp domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[MCP] shows
+ the actual reset state. To compensate for delays in reset, this field should only
+ be set if RST_RESET_ACTIVE[MCP] is set.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1C/H) Clear soft reset of the MCP processor and associated logic.
+ When set to one, the soft reset of the mcp is removed.
+ Reads of this register show the soft reset state. Not the actual mcp domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[MCP] shows
+ the actual reset state. To compensate for delays in reset, this field should only
+ be set if RST_RESET_ACTIVE[MCP] is set.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_mcp_domain_w1c_s cn; */
+};
+typedef union bdk_rst_mcp_domain_w1c bdk_rst_mcp_domain_w1c_t;
+
+#define BDK_RST_MCP_DOMAIN_W1C BDK_RST_MCP_DOMAIN_W1C_FUNC()
+static inline uint64_t BDK_RST_MCP_DOMAIN_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_MCP_DOMAIN_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001838ll;
+ __bdk_csr_fatal("RST_MCP_DOMAIN_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_MCP_DOMAIN_W1C bdk_rst_mcp_domain_w1c_t
+#define bustype_BDK_RST_MCP_DOMAIN_W1C BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_MCP_DOMAIN_W1C "RST_MCP_DOMAIN_W1C"
+#define device_bar_BDK_RST_MCP_DOMAIN_W1C 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_MCP_DOMAIN_W1C 0
+#define arguments_BDK_RST_MCP_DOMAIN_W1C -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_mcp_domain_w1s
+ *
+ * RST MCP Domain Soft Reset Set Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_mcp_domain_w1s
+{
+ uint64_t u;
+ struct bdk_rst_mcp_domain_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Set soft reset of MCP core and associated logic.
+ When set to one, all logic associated with the mcp domain is placed in reset.
+ Reads of this register show the soft reset state. Not the actual mcp domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[MCP] shows
+ the actual reset state.
+ It is typically cleared by writing to RST_MCP_DOMAIN_W1C.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Set soft reset of MCP core and associated logic.
+ When set to one, all logic associated with the mcp domain is placed in reset.
+ Reads of this register show the soft reset state. Not the actual mcp domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[MCP] shows
+ the actual reset state.
+ It is typically cleared by writing to RST_MCP_DOMAIN_W1C.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_mcp_domain_w1s_s cn; */
+};
+typedef union bdk_rst_mcp_domain_w1s bdk_rst_mcp_domain_w1s_t;
+
+#define BDK_RST_MCP_DOMAIN_W1S BDK_RST_MCP_DOMAIN_W1S_FUNC()
+static inline uint64_t BDK_RST_MCP_DOMAIN_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_MCP_DOMAIN_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001830ll;
+ __bdk_csr_fatal("RST_MCP_DOMAIN_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_MCP_DOMAIN_W1S bdk_rst_mcp_domain_w1s_t
+#define bustype_BDK_RST_MCP_DOMAIN_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_MCP_DOMAIN_W1S "RST_MCP_DOMAIN_W1S"
+#define device_bar_BDK_RST_MCP_DOMAIN_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_MCP_DOMAIN_W1S 0
+#define arguments_BDK_RST_MCP_DOMAIN_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_msix_pba#
+ *
+ * RST MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table; the bit number is indexed by the RST_INT_VEC_E
+ * enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_msix_pbax
+{
+ uint64_t u;
+ struct bdk_rst_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated RST_MSIX_VEC()_CTL, enumerated by
+ RST_INT_VEC_E. Bits that have no associated RST_INT_VEC_E are 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated RST_MSIX_VEC()_CTL, enumerated by
+ RST_INT_VEC_E. Bits that have no associated RST_INT_VEC_E are 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_msix_pbax_s cn8; */
+ struct bdk_rst_msix_pbax_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated RST_MSIX_VEC()_CTL, enumerated by
+ RST_INT_VEC_E. Bits that have no associated RST_INT_VEC_E are 0.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated RST_MSIX_VEC()_CTL, enumerated by
+ RST_INT_VEC_E. Bits that have no associated RST_INT_VEC_E are 0.
+ This field is always reinitialized on a chip domain reset. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_msix_pbax bdk_rst_msix_pbax_t;
+
+static inline uint64_t BDK_RST_MSIX_PBAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_MSIX_PBAX(unsigned long a)
+{
+ if (a==0)
+ return 0x87e006ff0000ll + 8ll * ((a) & 0x0);
+ __bdk_csr_fatal("RST_MSIX_PBAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_MSIX_PBAX(a) bdk_rst_msix_pbax_t
+#define bustype_BDK_RST_MSIX_PBAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_MSIX_PBAX(a) "RST_MSIX_PBAX"
+#define device_bar_BDK_RST_MSIX_PBAX(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_RST_MSIX_PBAX(a) (a)
+#define arguments_BDK_RST_MSIX_PBAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rst_msix_vec#_addr
+ *
+ * RST MSI-X Vector-Table Address Register
+ * This register is the MSI-X vector table, indexed by the RST_INT_VEC_E enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_rst_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector can be read or written by either secure or nonsecure states.
+ 1 = This vector's RST_MSIX_VEC()_ADDR, RST_MSIX_VEC()_CTL, and
+ corresponding bit of RST_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_RST_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector can be read or written by either secure or nonsecure states.
+ 1 = This vector's RST_MSIX_VEC()_ADDR, RST_MSIX_VEC()_CTL, and
+ corresponding bit of RST_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_RST_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_msix_vecx_addr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector can be read or written by either secure or nonsecure states.
+ 1 = This vector's RST_MSIX_VEC()_ADDR, RST_MSIX_VEC()_CTL, and
+ corresponding bit of RST_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_RST_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector can be read or written by either secure or nonsecure states.
+ 1 = This vector's RST_MSIX_VEC()_ADDR, RST_MSIX_VEC()_CTL, and
+ corresponding bit of RST_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_RST_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_rst_msix_vecx_addr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector can be read or written by either secure or nonsecure states.
+ 1 = This vector's RST_MSIX_VEC()_ADDR, RST_MSIX_VEC()_CTL, and
+ corresponding bit of RST_MSIX_PBA() are RAZ/WI and does not cause
+ a fault when accessed by the nonsecure world.
+
+ If PCCPF_RST_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector can be read or written by either secure or nonsecure states.
+ 1 = This vector's RST_MSIX_VEC()_ADDR, RST_MSIX_VEC()_CTL, and
+ corresponding bit of RST_MSIX_PBA() are RAZ/WI and does not cause
+ a fault when accessed by the nonsecure world.
+
+ If PCCPF_RST_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_msix_vecx_addr bdk_rst_msix_vecx_addr_t;
+
+static inline uint64_t BDK_RST_MSIX_VECX_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_MSIX_VECX_ADDR(unsigned long a)
+{
+ if (a==0)
+ return 0x87e006f00000ll + 0x10ll * ((a) & 0x0);
+ __bdk_csr_fatal("RST_MSIX_VECX_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_MSIX_VECX_ADDR(a) bdk_rst_msix_vecx_addr_t
+#define bustype_BDK_RST_MSIX_VECX_ADDR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_MSIX_VECX_ADDR(a) "RST_MSIX_VECX_ADDR"
+#define device_bar_BDK_RST_MSIX_VECX_ADDR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_RST_MSIX_VECX_ADDR(a) (a)
+#define arguments_BDK_RST_MSIX_VECX_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rst_msix_vec#_ctl
+ *
+ * RST MSI-X Vector-Table Control and Data Register
+ * This register is the MSI-X vector table, indexed by the RST_INT_VEC_E enumeration.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_rst_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_rst_msix_vecx_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_msix_vecx_ctl bdk_rst_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_RST_MSIX_VECX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_MSIX_VECX_CTL(unsigned long a)
+{
+ if (a==0)
+ return 0x87e006f00008ll + 0x10ll * ((a) & 0x0);
+ __bdk_csr_fatal("RST_MSIX_VECX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_MSIX_VECX_CTL(a) bdk_rst_msix_vecx_ctl_t
+#define bustype_BDK_RST_MSIX_VECX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_MSIX_VECX_CTL(a) "RST_MSIX_VECX_CTL"
+#define device_bar_BDK_RST_MSIX_VECX_CTL(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_RST_MSIX_VECX_CTL(a) (a)
+#define arguments_BDK_RST_MSIX_VECX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rst_ocx
+ *
+ * RST OCX Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_ocx
+{
+ uint64_t u;
+ struct bdk_rst_ocx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W) Controls whether corresponding OCX link going down causes a chip reset. A warm/soft reset
+ does not change this field. On cold reset, this field is initialized to 0. See
+ OCX_COM_LINK()_CTL for a description of what events can contribute to the link_down
+ condition. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W) Controls whether corresponding OCX link going down causes a chip reset. A warm/soft reset
+ does not change this field. On cold reset, this field is initialized to 0. See
+ OCX_COM_LINK()_CTL for a description of what events can contribute to the link_down
+ condition. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_ocx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t rst_link : 1; /**< [ 0: 0](R/W) Controls whether the OCX CCPI link going down causes a reset.
+ This field is reinitialized with a core domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 1; /**< [ 0: 0](R/W) Controls whether the OCX CCPI link going down causes a reset.
+ This field is reinitialized with a core domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_ocx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_link : 3; /**< [ 2: 0](R/W) Reserved. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_rst_ocx_s cn88xx; */
+ /* struct bdk_rst_ocx_cn81xx cn83xx; */
+};
+typedef union bdk_rst_ocx bdk_rst_ocx_t;
+
+#define BDK_RST_OCX BDK_RST_OCX_FUNC()
+static inline uint64_t BDK_RST_OCX_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_OCX_FUNC(void)
+{
+ return 0x87e006001618ll;
+}
+
+#define typedef_BDK_RST_OCX bdk_rst_ocx_t
+#define bustype_BDK_RST_OCX BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_OCX "RST_OCX"
+#define device_bar_BDK_RST_OCX 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_OCX 0
+#define arguments_BDK_RST_OCX -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_osc_cntr
+ *
+ * INTERNAL: RST Internal Ring-Oscillator Counter Register
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_osc_cntr
+{
+ uint64_t u;
+ struct bdk_rst_osc_cntr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Internal ring-oscillator clock count. Updated every 16 reference clocks. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Internal ring-oscillator clock count. Updated every 16 reference clocks. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_osc_cntr_s cn8; */
+ struct bdk_rst_osc_cntr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Internal ring-oscillator clock count.
+ Updated every 16 PLL reference clocks. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](RO/H) Internal ring-oscillator clock count.
+ Updated every 16 PLL reference clocks. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_osc_cntr bdk_rst_osc_cntr_t;
+
+#define BDK_RST_OSC_CNTR BDK_RST_OSC_CNTR_FUNC()
+static inline uint64_t BDK_RST_OSC_CNTR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_OSC_CNTR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e006001778ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e006001778ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x87e006001778ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001768ll;
+ __bdk_csr_fatal("RST_OSC_CNTR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_OSC_CNTR bdk_rst_osc_cntr_t
+#define bustype_BDK_RST_OSC_CNTR BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_OSC_CNTR "RST_OSC_CNTR"
+#define device_bar_BDK_RST_OSC_CNTR 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_OSC_CNTR 0
+#define arguments_BDK_RST_OSC_CNTR -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_out_ctl
+ *
+ * RST External Reset Control Register
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_out_ctl
+{
+ uint64_t u;
+ struct bdk_rst_out_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t scp_rst : 1; /**< [ 3: 3](R/W) SCP reset output. When set by software, this field drives the GPIO_PIN_SEL_E::SCP_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a SCP
+ domain reset.
+ This field is always reinitialized on an SCP domain reset. */
+ uint64_t mcp_rst : 1; /**< [ 2: 2](R/W) MCP reset output. When set by software, this field drives the GPIO_PIN_SEL_E::MCP_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a MCP
+ domain reset.
+ This field is always reinitialized on an MCP domain reset. */
+ uint64_t core_rst : 1; /**< [ 1: 1](R/W) Core reset output. When set by software, this field drives the GPIO_PIN_SEL_E::CORE_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a core
+ domain reset.
+ This field is always reinitialized on a core domain reset. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t core_rst : 1; /**< [ 1: 1](R/W) Core reset output. When set by software, this field drives the GPIO_PIN_SEL_E::CORE_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a core
+ domain reset.
+ This field is always reinitialized on a core domain reset. */
+ uint64_t mcp_rst : 1; /**< [ 2: 2](R/W) MCP reset output. When set by software, this field drives the GPIO_PIN_SEL_E::MCP_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a MCP
+ domain reset.
+ This field is always reinitialized on an MCP domain reset. */
+ uint64_t scp_rst : 1; /**< [ 3: 3](R/W) SCP reset output. When set by software, this field drives the GPIO_PIN_SEL_E::SCP_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a SCP
+ domain reset.
+ This field is always reinitialized on an SCP domain reset. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_out_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W) Soft reset. When set to 1 by software, this field drives the CHIP_RESET_OUT_L pin
+ active low. In this case the field must also be cleared by software to deassert
+ the pin. The pin is also automatically asserted and deasserted by hardware
+ during a cold/warm/soft reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W) Soft reset. When set to 1 by software, this field drives the CHIP_RESET_OUT_L pin
+ active low. In this case the field must also be cleared by software to deassert
+ the pin. The pin is also automatically asserted and deasserted by hardware
+ during a cold/warm/soft reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_rst_out_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t scp_rst : 1; /**< [ 3: 3](R/W) SCP reset output. When set by software, this field drives the GPIO_PIN_SEL_E::SCP_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a SCP
+ domain reset.
+ This field is always reinitialized on an SCP domain reset. */
+ uint64_t mcp_rst : 1; /**< [ 2: 2](R/W) MCP reset output. When set by software, this field drives the GPIO_PIN_SEL_E::MCP_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a MCP
+ domain reset.
+ This field is always reinitialized on an MCP domain reset. */
+ uint64_t core_rst : 1; /**< [ 1: 1](R/W) Core reset output. When set by software, this field drives the GPIO_PIN_SEL_E::CORE_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a core
+ domain reset.
+ This field is always reinitialized on a core domain reset. */
+ uint64_t chip_rst : 1; /**< [ 0: 0](R/W) Chip domain reset output. When set to one by software, this field drives the
+ CHIP_RESET_OUT_L pin active low. If this field is set my software then it must also be
+ cleared to deassert the pin. The pin is also automatically asserted and deasserted by
+ hardware during a chip domain reset.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t chip_rst : 1; /**< [ 0: 0](R/W) Chip domain reset output. When set to one by software, this field drives the
+ CHIP_RESET_OUT_L pin active low. If this field is set my software then it must also be
+ cleared to deassert the pin. The pin is also automatically asserted and deasserted by
+ hardware during a chip domain reset.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t core_rst : 1; /**< [ 1: 1](R/W) Core reset output. When set by software, this field drives the GPIO_PIN_SEL_E::CORE_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a core
+ domain reset.
+ This field is always reinitialized on a core domain reset. */
+ uint64_t mcp_rst : 1; /**< [ 2: 2](R/W) MCP reset output. When set by software, this field drives the GPIO_PIN_SEL_E::MCP_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a MCP
+ domain reset.
+ This field is always reinitialized on an MCP domain reset. */
+ uint64_t scp_rst : 1; /**< [ 3: 3](R/W) SCP reset output. When set by software, this field drives the GPIO_PIN_SEL_E::SCP_RESET_OUT
+ selectable pin active. The pin can be assigned using GPIO_BIT_CFG(). If this
+ field is set by software then it must also be cleared to deassert the pin.
+ The pin is also automatically asserted and deasserted by hardware during a SCP
+ domain reset.
+ This field is always reinitialized on an SCP domain reset. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_out_ctl bdk_rst_out_ctl_t;
+
+#define BDK_RST_OUT_CTL BDK_RST_OUT_CTL_FUNC()
+static inline uint64_t BDK_RST_OUT_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_OUT_CTL_FUNC(void)
+{
+ return 0x87e006001688ll;
+}
+
+#define typedef_BDK_RST_OUT_CTL bdk_rst_out_ctl_t
+#define bustype_BDK_RST_OUT_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_OUT_CTL "RST_OUT_CTL"
+#define device_bar_BDK_RST_OUT_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_OUT_CTL 0
+#define arguments_BDK_RST_OUT_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_pll_limit
+ *
+ * RST PLL Maximum Frequency Limit Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_pll_limit
+{
+ uint64_t u;
+ struct bdk_rst_pll_limit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_23_63 : 41;
+ uint64_t cpt_max_mul : 7; /**< [ 22: 16](R/W/H) Crypto clock maximum PLL multiplier.
+ This field is used to limit the RST_CPT_PLL[CUR_MUL] value.
+ A value of zero is considered unlimited. Once the value
+ of this field is nonzero, any new values written into this field
+ cannot exceed the previous value. Values 1-3 are considered illegal
+ since the minimum PLL frequency is 200 MHz.
+
+ Internal:
+ The field is initialized to FUS_FUSE_NUM_E::CPT_MAX_MUL() fuses on a chip domain reset. */
+ uint64_t reserved_15 : 1;
+ uint64_t core_max_mul : 7; /**< [ 14: 8](R/W/H) Core clock maximum PLL multiplier.
+ This field is used to limit the RST_CORE_PLL[CUR_MUL] value.
+ A value of zero is considered unlimited. Once the value
+ of this field is nonzero, any new values written into this field
+ cannot exceed the previous value. Values 1-5 are considered illegal
+ since the minimum PLL frequency is 300 MHz.
+
+ Internal:
+ The field is initialized to FUS_FUSE_NUM_E::CORE_MAX_MUL() fuses on a chip domain reset. */
+ uint64_t reserved_7 : 1;
+ uint64_t pnr_max_mul : 7; /**< [ 6: 0](R/W/H) Coprocessor clock maximum PLL multiplier.
+ This field is used to limit the RST_PNR_PLL[CUR_MUL] value.
+ A value of zero is considered unlimited. Once the value
+ of this field is nonzero, any new values written into this field
+ cannot exceed the previous value. Values 1-5 are considered illegal
+ since the minimum PLL frequency is 300 MHz.
+
+ Internal:
+ The field is initialized to FUS_FUSE_NUM_E::PNR_MAX_MUL() fuses on a chip domain
+ reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pnr_max_mul : 7; /**< [ 6: 0](R/W/H) Coprocessor clock maximum PLL multiplier.
+ This field is used to limit the RST_PNR_PLL[CUR_MUL] value.
+ A value of zero is considered unlimited. Once the value
+ of this field is nonzero, any new values written into this field
+ cannot exceed the previous value. Values 1-5 are considered illegal
+ since the minimum PLL frequency is 300 MHz.
+
+ Internal:
+ The field is initialized to FUS_FUSE_NUM_E::PNR_MAX_MUL() fuses on a chip domain
+ reset. */
+ uint64_t reserved_7 : 1;
+ uint64_t core_max_mul : 7; /**< [ 14: 8](R/W/H) Core clock maximum PLL multiplier.
+ This field is used to limit the RST_CORE_PLL[CUR_MUL] value.
+ A value of zero is considered unlimited. Once the value
+ of this field is nonzero, any new values written into this field
+ cannot exceed the previous value. Values 1-5 are considered illegal
+ since the minimum PLL frequency is 300 MHz.
+
+ Internal:
+ The field is initialized to FUS_FUSE_NUM_E::CORE_MAX_MUL() fuses on a chip domain reset. */
+ uint64_t reserved_15 : 1;
+ uint64_t cpt_max_mul : 7; /**< [ 22: 16](R/W/H) Crypto clock maximum PLL multiplier.
+ This field is used to limit the RST_CPT_PLL[CUR_MUL] value.
+ A value of zero is considered unlimited. Once the value
+ of this field is nonzero, any new values written into this field
+ cannot exceed the previous value. Values 1-3 are considered illegal
+ since the minimum PLL frequency is 200 MHz.
+
+ Internal:
+ The field is initialized to FUS_FUSE_NUM_E::CPT_MAX_MUL() fuses on a chip domain reset. */
+ uint64_t reserved_23_63 : 41;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_pll_limit_s cn; */
+};
+typedef union bdk_rst_pll_limit bdk_rst_pll_limit_t;
+
+#define BDK_RST_PLL_LIMIT BDK_RST_PLL_LIMIT_FUNC()
+static inline uint64_t BDK_RST_PLL_LIMIT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_PLL_LIMIT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e00a001790ll;
+ __bdk_csr_fatal("RST_PLL_LIMIT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_PLL_LIMIT bdk_rst_pll_limit_t
+#define bustype_BDK_RST_PLL_LIMIT BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_PLL_LIMIT "RST_PLL_LIMIT"
+#define device_bar_BDK_RST_PLL_LIMIT 0x2 /* PF_BAR2 */
+#define busnum_BDK_RST_PLL_LIMIT 0
+#define arguments_BDK_RST_PLL_LIMIT -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_pnr_pll
+ *
+ * RST Coprocessor Clock PLL Control Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_pnr_pll
+{
+ uint64_t u;
+ struct bdk_rst_pnr_pll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t cout_sel : 2; /**< [ 50: 49](R/W) Coprocessor clockout select.
+ 0x0 = Coprocessor clock divided by 16.
+ 0x1 = Coprocessor clock tree output divided by 16.
+ 0x2 = PLL0 output divided by 16.
+ 0x3 = PLL1 output divided by 16.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t cout_reset : 1; /**< [ 48: 48](R/W) Coprocessor clockout reset. The coprocessor clockout should be placed in
+ reset at least 10 PLL reference clocks prior
+ to changing [COUT_SEL]. It should remain under reset for at least 10
+ PLL reference clocks after [COUT_SEL] changes.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t pd_switch : 1; /**< [ 44: 44](R/W) PLL powerdown on switch. When set, hardware automatically
+ powers down the inactive PLL after the switch has occured.
+ When cleared, the inactive PLL remains in operation.
+ If [PD_SWITCH] is written to a one while both [DLY_SWITCH] and
+ [NXT_PGM] are cleared then the inactive PLL will immediately powerdown.
+
+ Note that a powered down PLL requires an additional 575 reference
+ clocks to become active. This time is automatically added by the
+ hardware.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t dly_switch : 12; /**< [ 43: 32](R/W/H) Switch the active PLL after delaying this number of 100 MHz clocks.
+ When set to a nonzero value, the hardware will wait for
+ any PLL programming to complete and then switch to the inactive
+ PLL after the specified number of PLL reference clocks. Hardware
+ will add additional clocks if required.
+ This field is always reinitialized on a cold domain reset.
+
+ Internal:
+ Hardware will add counts to maintain 256 cpt_clk/sclk/rclk notification to hardware.
+ Additional time will be added to wakeup powered down AP cores but that
+ time not be included in this count. */
+ uint64_t pll1_pd : 1; /**< [ 31: 31](RO) PNR PLL1 power down. When set PLL is currently powered down. */
+ uint64_t pll0_pd : 1; /**< [ 30: 30](RO) PNR PLL0 power down. When set PLL is currently powered down. */
+ uint64_t reserved_23_29 : 7;
+ uint64_t init_mul : 7; /**< [ 22: 16](R/W) Coprocessor clock multiplier to be used during a core or chip domain
+ reset. Actual frequency is [INIT_MUL] * 50 MHz. The actual value
+ used is limited by RST_PLL_LIMIT[PNR_MAX_MUL].
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t nxt_pgm : 1; /**< [ 15: 15](R/W/H) Program non-active PLL using [NXT_MUL]. Hardware automatically
+ clears bit when both pll is updated and any delay specified
+ in [DLY_SWITCH] has completed.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t nxt_mul : 7; /**< [ 14: 8](R/W) Coprocessor PLL frequency to be program in 50 MHz increments. The
+ actual value used is limited by RST_PLL_LIMIT[PNR_MAX_MUL] and
+ a minimum setting of 300 MHz.
+ Value will match [INIT_MUL] immediately after a cold or chip domain reset. */
+ uint64_t active_pll : 1; /**< [ 7: 7](RO) Indicates which physical PLL is in use. For diagnostic use only. */
+ uint64_t cur_mul : 7; /**< [ 6: 0](RO/H) Coprocessor clock frequency. Actual frequency is [CUR_MUL] * 50 MHz.
+ Value will reflect [NXT_MUL] after [DLY_SWITCH] has completed or [INIT_MUL]
+ immediately after a cold or chip domain reset. In both cases, value
+ is limited by RST_PLL_LIMIT[PNR_MAX_MUL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cur_mul : 7; /**< [ 6: 0](RO/H) Coprocessor clock frequency. Actual frequency is [CUR_MUL] * 50 MHz.
+ Value will reflect [NXT_MUL] after [DLY_SWITCH] has completed or [INIT_MUL]
+ immediately after a cold or chip domain reset. In both cases, value
+ is limited by RST_PLL_LIMIT[PNR_MAX_MUL]. */
+ uint64_t active_pll : 1; /**< [ 7: 7](RO) Indicates which physical PLL is in use. For diagnostic use only. */
+ uint64_t nxt_mul : 7; /**< [ 14: 8](R/W) Coprocessor PLL frequency to be program in 50 MHz increments. The
+ actual value used is limited by RST_PLL_LIMIT[PNR_MAX_MUL] and
+ a minimum setting of 300 MHz.
+ Value will match [INIT_MUL] immediately after a cold or chip domain reset. */
+ uint64_t nxt_pgm : 1; /**< [ 15: 15](R/W/H) Program non-active PLL using [NXT_MUL]. Hardware automatically
+ clears bit when both pll is updated and any delay specified
+ in [DLY_SWITCH] has completed.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t init_mul : 7; /**< [ 22: 16](R/W) Coprocessor clock multiplier to be used during a core or chip domain
+ reset. Actual frequency is [INIT_MUL] * 50 MHz. The actual value
+ used is limited by RST_PLL_LIMIT[PNR_MAX_MUL].
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_23_29 : 7;
+ uint64_t pll0_pd : 1; /**< [ 30: 30](RO) PNR PLL0 power down. When set PLL is currently powered down. */
+ uint64_t pll1_pd : 1; /**< [ 31: 31](RO) PNR PLL1 power down. When set PLL is currently powered down. */
+ uint64_t dly_switch : 12; /**< [ 43: 32](R/W/H) Switch the active PLL after delaying this number of 100 MHz clocks.
+ When set to a nonzero value, the hardware will wait for
+ any PLL programming to complete and then switch to the inactive
+ PLL after the specified number of PLL reference clocks. Hardware
+ will add additional clocks if required.
+ This field is always reinitialized on a cold domain reset.
+
+ Internal:
+ Hardware will add counts to maintain 256 cpt_clk/sclk/rclk notification to hardware.
+ Additional time will be added to wakeup powered down AP cores but that
+ time not be included in this count. */
+ uint64_t pd_switch : 1; /**< [ 44: 44](R/W) PLL powerdown on switch. When set, hardware automatically
+ powers down the inactive PLL after the switch has occured.
+ When cleared, the inactive PLL remains in operation.
+ If [PD_SWITCH] is written to a one while both [DLY_SWITCH] and
+ [NXT_PGM] are cleared then the inactive PLL will immediately powerdown.
+
+ Note that a powered down PLL requires an additional 575 reference
+ clocks to become active. This time is automatically added by the
+ hardware.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t cout_reset : 1; /**< [ 48: 48](R/W) Coprocessor clockout reset. The coprocessor clockout should be placed in
+ reset at least 10 PLL reference clocks prior
+ to changing [COUT_SEL]. It should remain under reset for at least 10
+ PLL reference clocks after [COUT_SEL] changes.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t cout_sel : 2; /**< [ 50: 49](R/W) Coprocessor clockout select.
+ 0x0 = Coprocessor clock divided by 16.
+ 0x1 = Coprocessor clock tree output divided by 16.
+ 0x2 = PLL0 output divided by 16.
+ 0x3 = PLL1 output divided by 16.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_pnr_pll_s cn; */
+};
+typedef union bdk_rst_pnr_pll bdk_rst_pnr_pll_t;
+
+#define BDK_RST_PNR_PLL BDK_RST_PNR_PLL_FUNC()
+static inline uint64_t BDK_RST_PNR_PLL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_PNR_PLL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e00a001788ll;
+ __bdk_csr_fatal("RST_PNR_PLL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_PNR_PLL bdk_rst_pnr_pll_t
+#define bustype_BDK_RST_PNR_PLL BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_PNR_PLL "RST_PNR_PLL"
+#define device_bar_BDK_RST_PNR_PLL 0x2 /* PF_BAR2 */
+#define busnum_BDK_RST_PNR_PLL 0
+#define arguments_BDK_RST_PNR_PLL -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_power_dbg
+ *
+ * RST Core-Power Debug-Control Register
+ */
+union bdk_rst_power_dbg
+{
+ uint64_t u;
+ struct bdk_rst_power_dbg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t str : 3; /**< [ 2: 0](R/W) Reserved.
+ Internal:
+ Internal power driver strength. Resets only on cold reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t str : 3; /**< [ 2: 0](R/W) Reserved.
+ Internal:
+ Internal power driver strength. Resets only on cold reset. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_power_dbg_s cn; */
+};
+typedef union bdk_rst_power_dbg bdk_rst_power_dbg_t;
+
+#define BDK_RST_POWER_DBG BDK_RST_POWER_DBG_FUNC()
+static inline uint64_t BDK_RST_POWER_DBG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_POWER_DBG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e006001708ll;
+ __bdk_csr_fatal("RST_POWER_DBG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_POWER_DBG bdk_rst_power_dbg_t
+#define bustype_BDK_RST_POWER_DBG BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_POWER_DBG "RST_POWER_DBG"
+#define device_bar_BDK_RST_POWER_DBG 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_POWER_DBG 0
+#define arguments_BDK_RST_POWER_DBG -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_pp_available
+ *
+ * RST Core Availablity Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_pp_available
+{
+ uint64_t u;
+ struct bdk_rst_pp_available_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t present : 48; /**< [ 47: 0](RO) Each bit set indicates a physical core is present. */
+#else /* Word 0 - Little Endian */
+ uint64_t present : 48; /**< [ 47: 0](RO) Each bit set indicates a physical core is present. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_pp_available_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t present : 24; /**< [ 23: 0](RO) Each bit set indicates a physical core is present. */
+#else /* Word 0 - Little Endian */
+ uint64_t present : 24; /**< [ 23: 0](RO) Each bit set indicates a physical core is present. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_pp_available_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t present : 4; /**< [ 3: 0](RO) Each bit set indicates a physical core is present. */
+#else /* Word 0 - Little Endian */
+ uint64_t present : 4; /**< [ 3: 0](RO) Each bit set indicates a physical core is present. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_rst_pp_available_s cn88xx; */
+ /* struct bdk_rst_pp_available_cn9 cn83xx; */
+};
+typedef union bdk_rst_pp_available bdk_rst_pp_available_t;
+
+#define BDK_RST_PP_AVAILABLE BDK_RST_PP_AVAILABLE_FUNC()
+static inline uint64_t BDK_RST_PP_AVAILABLE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_PP_AVAILABLE_FUNC(void)
+{
+ return 0x87e006001738ll;
+}
+
+#define typedef_BDK_RST_PP_AVAILABLE bdk_rst_pp_available_t
+#define bustype_BDK_RST_PP_AVAILABLE BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_PP_AVAILABLE "RST_PP_AVAILABLE"
+#define device_bar_BDK_RST_PP_AVAILABLE 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_PP_AVAILABLE 0
+#define arguments_BDK_RST_PP_AVAILABLE -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_pp_pending
+ *
+ * RST Cores Reset Pending Register
+ * This register contains the reset status for each core.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_pp_pending
+{
+ uint64_t u;
+ struct bdk_rst_pp_pending_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t pend : 48; /**< [ 47: 0](RO/H) Set if corresponding core is waiting to change its reset state. Normally a reset change
+ occurs immediately but if RST_PP_POWER[GATE] = 1 and the core is released from
+ reset a delay of 64K core-clock cycles between each core reset applies to satisfy power
+ management.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 48; /**< [ 47: 0](RO/H) Set if corresponding core is waiting to change its reset state. Normally a reset change
+ occurs immediately but if RST_PP_POWER[GATE] = 1 and the core is released from
+ reset a delay of 64K core-clock cycles between each core reset applies to satisfy power
+ management.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_pp_pending_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t pend : 24; /**< [ 23: 0](RO/H) Set if corresponding core is waiting to change its reset state. Normally a reset change
+ occurs immediately but if RST_PP_POWER[GATE] = 1 and the core is released from
+ reset a delay of 32K core-clock cycles between each core reset applies to satisfy power
+ management.
+ This field is always reinitialized on a core domain reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 24; /**< [ 23: 0](RO/H) Set if corresponding core is waiting to change its reset state. Normally a reset change
+ occurs immediately but if RST_PP_POWER[GATE] = 1 and the core is released from
+ reset a delay of 32K core-clock cycles between each core reset applies to satisfy power
+ management.
+ This field is always reinitialized on a core domain reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_pp_pending_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t pend : 4; /**< [ 3: 0](RO/H) Set if corresponding core is waiting to change its reset state. Normally a reset change
+ occurs immediately but if RST_PP_POWER[GATE] = 1 and the core is released from
+ reset a delay of 64K core-clock cycles between each core reset applies to satisfy power
+ management.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 4; /**< [ 3: 0](RO/H) Set if corresponding core is waiting to change its reset state. Normally a reset change
+ occurs immediately but if RST_PP_POWER[GATE] = 1 and the core is released from
+ reset a delay of 64K core-clock cycles between each core reset applies to satisfy power
+ management.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_rst_pp_pending_s cn88xx; */
+ struct bdk_rst_pp_pending_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t pend : 24; /**< [ 23: 0](RO/H) Set if corresponding core is waiting to change its reset state. Normally a reset change
+ occurs immediately but if RST_PP_POWER[GATE] = 1 and the core is released from
+ reset a delay of 64K core-clock cycles between each core reset applies to satisfy power
+ management.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 24; /**< [ 23: 0](RO/H) Set if corresponding core is waiting to change its reset state. Normally a reset change
+ occurs immediately but if RST_PP_POWER[GATE] = 1 and the core is released from
+ reset a delay of 64K core-clock cycles between each core reset applies to satisfy power
+ management.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_pp_pending bdk_rst_pp_pending_t;
+
+#define BDK_RST_PP_PENDING BDK_RST_PP_PENDING_FUNC()
+static inline uint64_t BDK_RST_PP_PENDING_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_PP_PENDING_FUNC(void)
+{
+ return 0x87e006001748ll;
+}
+
+#define typedef_BDK_RST_PP_PENDING bdk_rst_pp_pending_t
+#define bustype_BDK_RST_PP_PENDING BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_PP_PENDING "RST_PP_PENDING"
+#define device_bar_BDK_RST_PP_PENDING 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_PP_PENDING 0
+#define arguments_BDK_RST_PP_PENDING -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_pp_power
+ *
+ * RST Core-Power Gating-Control Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_pp_power
+{
+ uint64_t u;
+ struct bdk_rst_pp_power_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t gate : 48; /**< [ 47: 0](R/W) Power down enable. When a bit in this field and the corresponding RST_PP_RESET bit are
+ set,
+ the core
+ has voltage removed to save power. In typical operation these bits are set up during
+ initialization and core resets are controlled through RST_PP_RESET. These bits can only be
+ changed when the corresponding core is in reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t gate : 48; /**< [ 47: 0](R/W) Power down enable. When a bit in this field and the corresponding RST_PP_RESET bit are
+ set,
+ the core
+ has voltage removed to save power. In typical operation these bits are set up during
+ initialization and core resets are controlled through RST_PP_RESET. These bits can only be
+ changed when the corresponding core is in reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_pp_power_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t gate : 24; /**< [ 23: 0](R/W) Power down enable. When a bit in this field and the corresponding
+ RST_PP_RESET bit are set, the AP core is reduced to minimum power consumption.
+ In typical operation these bits are set up during initialization and the
+ AP core resets are controlled through RST_PP_RESET. These bits can only be
+ changed when the corresponding AP core is in reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores.
+ This field is always reinitialized on a core domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t gate : 24; /**< [ 23: 0](R/W) Power down enable. When a bit in this field and the corresponding
+ RST_PP_RESET bit are set, the AP core is reduced to minimum power consumption.
+ In typical operation these bits are set up during initialization and the
+ AP core resets are controlled through RST_PP_RESET. These bits can only be
+ changed when the corresponding AP core is in reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores.
+ This field is always reinitialized on a core domain reset. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_pp_power_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t gate : 4; /**< [ 3: 0](R/W) Power down enable. When a bit in this field and the corresponding RST_PP_RESET bit are
+ set,
+ the core
+ has voltage removed to save power. In typical operation these bits are set up during
+ initialization and core resets are controlled through RST_PP_RESET. These bits can only be
+ changed when the corresponding core is in reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t gate : 4; /**< [ 3: 0](R/W) Power down enable. When a bit in this field and the corresponding RST_PP_RESET bit are
+ set,
+ the core
+ has voltage removed to save power. In typical operation these bits are set up during
+ initialization and core resets are controlled through RST_PP_RESET. These bits can only be
+ changed when the corresponding core is in reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_rst_pp_power_s cn88xx; */
+ struct bdk_rst_pp_power_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t gate : 24; /**< [ 23: 0](R/W) Power down enable. When a bit in this field and the corresponding RST_PP_RESET bit are
+ set,
+ the core
+ has voltage removed to save power. In typical operation these bits are set up during
+ initialization and core resets are controlled through RST_PP_RESET. These bits can only be
+ changed when the corresponding core is in reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t gate : 24; /**< [ 23: 0](R/W) Power down enable. When a bit in this field and the corresponding RST_PP_RESET bit are
+ set,
+ the core
+ has voltage removed to save power. In typical operation these bits are set up during
+ initialization and core resets are controlled through RST_PP_RESET. These bits can only be
+ changed when the corresponding core is in reset.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_pp_power bdk_rst_pp_power_t;
+
+#define BDK_RST_PP_POWER BDK_RST_PP_POWER_FUNC()
+static inline uint64_t BDK_RST_PP_POWER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_PP_POWER_FUNC(void)
+{
+ return 0x87e006001700ll;
+}
+
+#define typedef_BDK_RST_PP_POWER bdk_rst_pp_power_t
+#define bustype_BDK_RST_PP_POWER BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_PP_POWER "RST_PP_POWER"
+#define device_bar_BDK_RST_PP_POWER 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_PP_POWER 0
+#define arguments_BDK_RST_PP_POWER -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_pp_power_stat
+ *
+ * RST Core-Power Status Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_pp_power_stat
+{
+ uint64_t u;
+ struct bdk_rst_pp_power_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t down : 48; /**< [ 47: 0](RO/H) Reserved.
+ Internal:
+ Core Powerdown. When set, each bit indicates the core is currently powered down.
+ Typically this occurs when the corresponding RST_PP_RESET and RST_PP_POWER bits are set.
+ If the core is powered down when RST_PP_PENDING and RST_PP_RESET are both clear then the
+ core should be reset again by setting the RST_PP_RESET and then clearing it.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t down : 48; /**< [ 47: 0](RO/H) Reserved.
+ Internal:
+ Core Powerdown. When set, each bit indicates the core is currently powered down.
+ Typically this occurs when the corresponding RST_PP_RESET and RST_PP_POWER bits are set.
+ If the core is powered down when RST_PP_PENDING and RST_PP_RESET are both clear then the
+ core should be reset again by setting the RST_PP_RESET and then clearing it.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_pp_power_stat_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t down : 24; /**< [ 23: 0](RO/H) Reserved.
+ Internal:
+ Core Powerdown. When set, each bit indicates the core is currently powered down.
+ Typically this occurs when the corresponding RST_PP_RESET and RST_PP_POWER bits are set.
+ If the core is powered down when RST_PP_PENDING and RST_PP_RESET are both clear then the
+ core should be reset again by setting the RST_PP_RESET and then clearing it.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t down : 24; /**< [ 23: 0](RO/H) Reserved.
+ Internal:
+ Core Powerdown. When set, each bit indicates the core is currently powered down.
+ Typically this occurs when the corresponding RST_PP_RESET and RST_PP_POWER bits are set.
+ If the core is powered down when RST_PP_PENDING and RST_PP_RESET are both clear then the
+ core should be reset again by setting the RST_PP_RESET and then clearing it.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_pp_power_stat_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t down : 4; /**< [ 3: 0](RO/H) Reserved.
+ Internal:
+ Core Powerdown. When set, each bit indicates the core is currently powered down.
+ Typically this occurs when the corresponding RST_PP_RESET and RST_PP_POWER bits are set.
+ If the core is powered down when RST_PP_PENDING and RST_PP_RESET are both clear then the
+ core should be reset again by setting the RST_PP_RESET and then clearing it.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+#else /* Word 0 - Little Endian */
+ uint64_t down : 4; /**< [ 3: 0](RO/H) Reserved.
+ Internal:
+ Core Powerdown. When set, each bit indicates the core is currently powered down.
+ Typically this occurs when the corresponding RST_PP_RESET and RST_PP_POWER bits are set.
+ If the core is powered down when RST_PP_PENDING and RST_PP_RESET are both clear then the
+ core should be reset again by setting the RST_PP_RESET and then clearing it.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_rst_pp_power_stat_s cn88xx; */
+ /* struct bdk_rst_pp_power_stat_cn9 cn83xx; */
+};
+typedef union bdk_rst_pp_power_stat bdk_rst_pp_power_stat_t;
+
+#define BDK_RST_PP_POWER_STAT BDK_RST_PP_POWER_STAT_FUNC()
+static inline uint64_t BDK_RST_PP_POWER_STAT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_PP_POWER_STAT_FUNC(void)
+{
+ return 0x87e006001710ll;
+}
+
+#define typedef_BDK_RST_PP_POWER_STAT bdk_rst_pp_power_stat_t
+#define bustype_BDK_RST_PP_POWER_STAT BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_PP_POWER_STAT "RST_PP_POWER_STAT"
+#define device_bar_BDK_RST_PP_POWER_STAT 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_PP_POWER_STAT 0
+#define arguments_BDK_RST_PP_POWER_STAT -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_pp_reset
+ *
+ * RST Core Reset Register
+ * This register contains the reset control for each core.
+ * Write operations to this register should occur only if
+ * RST_PP_PENDING is cleared.
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_pp_reset
+{
+ uint64_t u;
+ struct bdk_rst_pp_reset_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_pp_reset_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t rst : 24; /**< [ 23: 0](R/W/H) AP core resets. Writing a one holds the corresponding AP core in reset,
+ writing a zero releases it from reset. These bits may also be cleared by
+ either DAP or CIC activity.
+ This field is always reinitialized on a core domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst : 24; /**< [ 23: 0](R/W/H) AP core resets. Writing a one holds the corresponding AP core in reset,
+ writing a zero releases it from reset. These bits may also be cleared by
+ either DAP or CIC activity.
+ This field is always reinitialized on a core domain reset. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_pp_reset_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rst : 3; /**< [ 3: 1](R/W/H) Core reset for cores 1 and above. Writing a 1 holds the corresponding core in reset,
+ writing a 0 releases from reset. These bits may also be cleared by either DAP or CIC
+ activity.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t rst0 : 1; /**< [ 0: 0](R/W/H) Core reset for core 0, depends on if GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE.
+ This bit may also be cleared by either DAP or CIC activity. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst0 : 1; /**< [ 0: 0](R/W/H) Core reset for core 0, depends on if GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE.
+ This bit may also be cleared by either DAP or CIC activity. */
+ uint64_t rst : 3; /**< [ 3: 1](R/W/H) Core reset for cores 1 and above. Writing a 1 holds the corresponding core in reset,
+ writing a 0 releases from reset. These bits may also be cleared by either DAP or CIC
+ activity.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rst_pp_reset_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t rst : 47; /**< [ 47: 1](R/W/H) Core reset for cores 1 and above. Writing a 1 holds the corresponding core in reset,
+ writing a 0 releases from reset. These bits may also be cleared by either DAP or CIC
+ activity.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t rst0 : 1; /**< [ 0: 0](R/W/H) Core reset for core 0, depends on if GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE.
+ This bit may also be cleared by either DAP or CIC activity. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst0 : 1; /**< [ 0: 0](R/W/H) Core reset for core 0, depends on if GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE.
+ This bit may also be cleared by either DAP or CIC activity. */
+ uint64_t rst : 47; /**< [ 47: 1](R/W/H) Core reset for cores 1 and above. Writing a 1 holds the corresponding core in reset,
+ writing a 0 releases from reset. These bits may also be cleared by either DAP or CIC
+ activity.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_rst_pp_reset_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t rst : 23; /**< [ 23: 1](R/W/H) Core reset for cores 1 and above. Writing a 1 holds the corresponding core in reset,
+ writing a 0 releases from reset. These bits may also be cleared by either DAP or CIC
+ activity.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t rst0 : 1; /**< [ 0: 0](R/W/H) Core reset for core 0, depends on if GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE.
+ This bit may also be cleared by either DAP or CIC activity. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst0 : 1; /**< [ 0: 0](R/W/H) Core reset for core 0, depends on if GPIO_STRAP\<2:0\> = RST_BOOT_METHOD_E::REMOTE.
+ This bit may also be cleared by either DAP or CIC activity. */
+ uint64_t rst : 23; /**< [ 23: 1](R/W/H) Core reset for cores 1 and above. Writing a 1 holds the corresponding core in reset,
+ writing a 0 releases from reset. These bits may also be cleared by either DAP or CIC
+ activity.
+
+ The upper bits of this field remain accessible but will have no effect if the cores
+ are disabled. The number of bits set in RST_PP_AVAILABLE indicate the number of cores. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_rst_pp_reset bdk_rst_pp_reset_t;
+
+#define BDK_RST_PP_RESET BDK_RST_PP_RESET_FUNC()
+static inline uint64_t BDK_RST_PP_RESET_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_PP_RESET_FUNC(void)
+{
+ return 0x87e006001740ll;
+}
+
+#define typedef_BDK_RST_PP_RESET bdk_rst_pp_reset_t
+#define bustype_BDK_RST_PP_RESET BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_PP_RESET "RST_PP_RESET"
+#define device_bar_BDK_RST_PP_RESET 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_PP_RESET 0
+#define arguments_BDK_RST_PP_RESET -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_ref_check
+ *
+ * INTERNAL: RST Reference Clock Checker Register
+ *
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_ref_check
+{
+ uint64_t u;
+ struct bdk_rst_ref_check_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t range : 1; /**< [ 63: 63](RO/H) Reference ever out of range. Set when either:
+ * Reference clock was outside operating range of 25 to 100 MHz.
+ * Reference clock increased or decreased in frequency. */
+ uint64_t reserved_48_62 : 15;
+ uint64_t pcycle : 16; /**< [ 47: 32](RO/H) Previous cycle count. Sum of last [CNT0] and [CNT1]. */
+ uint64_t cnt1 : 16; /**< [ 31: 16](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was high.
+ When used with [CNT0] the internal ring-oscillator frequency can be determined. */
+ uint64_t cnt0 : 16; /**< [ 15: 0](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was low.
+ When used with [CNT1] the internal ring-oscillator frequency can be determined. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt0 : 16; /**< [ 15: 0](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was low.
+ When used with [CNT1] the internal ring-oscillator frequency can be determined. */
+ uint64_t cnt1 : 16; /**< [ 31: 16](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was high.
+ When used with [CNT0] the internal ring-oscillator frequency can be determined. */
+ uint64_t pcycle : 16; /**< [ 47: 32](RO/H) Previous cycle count. Sum of last [CNT0] and [CNT1]. */
+ uint64_t reserved_48_62 : 15;
+ uint64_t range : 1; /**< [ 63: 63](RO/H) Reference ever out of range. Set when either:
+ * Reference clock was outside operating range of 25 to 100 MHz.
+ * Reference clock increased or decreased in frequency. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_ref_check_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t range : 1; /**< [ 63: 63](RO/H) Reference ever out of range. Set when either:
+ * Reference clock was outside operating range of 85 to 115 MHz.
+ * Reference increased or decreased in frequency. */
+ uint64_t reserved_48_62 : 15;
+ uint64_t pcycle : 16; /**< [ 47: 32](RO/H) Previous cycle count. Sum of last [CNT0] and [CNT1]. */
+ uint64_t cnt1 : 16; /**< [ 31: 16](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was high.
+ When used with [CNT0] the internal ring-oscillator frequency can be determined. */
+ uint64_t cnt0 : 16; /**< [ 15: 0](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was low.
+ When used with [CNT1] the internal ring-oscillator frequency can be determined. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt0 : 16; /**< [ 15: 0](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was low.
+ When used with [CNT1] the internal ring-oscillator frequency can be determined. */
+ uint64_t cnt1 : 16; /**< [ 31: 16](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was high.
+ When used with [CNT0] the internal ring-oscillator frequency can be determined. */
+ uint64_t pcycle : 16; /**< [ 47: 32](RO/H) Previous cycle count. Sum of last [CNT0] and [CNT1]. */
+ uint64_t reserved_48_62 : 15;
+ uint64_t range : 1; /**< [ 63: 63](RO/H) Reference ever out of range. Set when either:
+ * Reference clock was outside operating range of 85 to 115 MHz.
+ * Reference increased or decreased in frequency. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_ref_check_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t range : 1; /**< [ 63: 63](RO/H) Reference ever out of range. Set when either:
+ * Reference clock was outside operating range of 25 to 100 MHz.
+ * Reference clock increased or decreased in frequency. */
+ uint64_t reserved_48_62 : 15;
+ uint64_t reserved_32_47 : 16;
+ uint64_t cnt1 : 16; /**< [ 31: 16](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was high.
+ When used with [CNT0] the internal ring-oscillator frequency can be determined. */
+ uint64_t cnt0 : 16; /**< [ 15: 0](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was low.
+ When used with [CNT1] the internal ring-oscillator frequency can be determined. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt0 : 16; /**< [ 15: 0](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was low.
+ When used with [CNT1] the internal ring-oscillator frequency can be determined. */
+ uint64_t cnt1 : 16; /**< [ 31: 16](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was high.
+ When used with [CNT0] the internal ring-oscillator frequency can be determined. */
+ uint64_t reserved_32_47 : 16;
+ uint64_t reserved_48_62 : 15;
+ uint64_t range : 1; /**< [ 63: 63](RO/H) Reference ever out of range. Set when either:
+ * Reference clock was outside operating range of 25 to 100 MHz.
+ * Reference clock increased or decreased in frequency. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_rst_ref_check_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t range : 1; /**< [ 63: 63](RO/H) Reference ever out of range. Set when either:
+ * Reference clock was outside operating range of 25 to 100 MHz.
+ * Reference clock duty cycle outside 50% +/- 20%.
+ * Reference increased or decreased in frequency. */
+ uint64_t reserved_48_62 : 15;
+ uint64_t reserved_32_47 : 16;
+ uint64_t cnt1 : 16; /**< [ 31: 16](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was high.
+ When used with [CNT0] the internal ring-oscillator frequency can be determined. */
+ uint64_t cnt0 : 16; /**< [ 15: 0](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was low.
+ When used with [CNT1] the internal ring-oscillator frequency can be determined. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt0 : 16; /**< [ 15: 0](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was low.
+ When used with [CNT1] the internal ring-oscillator frequency can be determined. */
+ uint64_t cnt1 : 16; /**< [ 31: 16](RO/H) Number of internal ring-oscillator clock pulses counted over 16 reference clocks
+ while reference clock was high.
+ When used with [CNT0] the internal ring-oscillator frequency can be determined. */
+ uint64_t reserved_32_47 : 16;
+ uint64_t reserved_48_62 : 15;
+ uint64_t range : 1; /**< [ 63: 63](RO/H) Reference ever out of range. Set when either:
+ * Reference clock was outside operating range of 25 to 100 MHz.
+ * Reference clock duty cycle outside 50% +/- 20%.
+ * Reference increased or decreased in frequency. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_rst_ref_check_s cn83xx; */
+};
+typedef union bdk_rst_ref_check bdk_rst_ref_check_t;
+
+#define BDK_RST_REF_CHECK BDK_RST_REF_CHECK_FUNC()
+static inline uint64_t BDK_RST_REF_CHECK_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_REF_CHECK_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e006001770ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e006001770ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x87e006001770ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001770ll;
+ __bdk_csr_fatal("RST_REF_CHECK", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_REF_CHECK bdk_rst_ref_check_t
+#define bustype_BDK_RST_REF_CHECK BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_REF_CHECK "RST_REF_CHECK"
+#define device_bar_BDK_RST_REF_CHECK 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_REF_CHECK 0
+#define arguments_BDK_RST_REF_CHECK -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_ref_cntr
+ *
+ * RST Reference-Counter Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_ref_cntr
+{
+ uint64_t u;
+ struct bdk_rst_ref_cntr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](R/W/H) Count. The counter is initialized to 0x0 during a cold reset and is otherwise continuously
+ running.
+ CNT is incremented every reference-clock cycle (i.e. at 50 MHz). */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](R/W/H) Count. The counter is initialized to 0x0 during a cold reset and is otherwise continuously
+ running.
+ CNT is incremented every reference-clock cycle (i.e. at 50 MHz). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_ref_cntr_s cn8; */
+ struct bdk_rst_ref_cntr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](R/W/H) Reference count. [CNT] is incremented every 100 MHz reference clock.
+ This field is always reinitialized on a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 64; /**< [ 63: 0](R/W/H) Reference count. [CNT] is incremented every 100 MHz reference clock.
+ This field is always reinitialized on a cold domain reset. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_ref_cntr bdk_rst_ref_cntr_t;
+
+#define BDK_RST_REF_CNTR BDK_RST_REF_CNTR_FUNC()
+static inline uint64_t BDK_RST_REF_CNTR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_REF_CNTR_FUNC(void)
+{
+ return 0x87e006001758ll;
+}
+
+#define typedef_BDK_RST_REF_CNTR bdk_rst_ref_cntr_t
+#define bustype_BDK_RST_REF_CNTR BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_REF_CNTR "RST_REF_CNTR"
+#define device_bar_BDK_RST_REF_CNTR 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_REF_CNTR 0
+#define arguments_BDK_RST_REF_CNTR -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_refc_ctl
+ *
+ * RST Common Reference Clock Input Control Register
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_refc_ctl
+{
+ uint64_t u;
+ struct bdk_rst_refc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cclk2_sel : 2; /**< [ 8: 7](R/W) Common clock 2 termination select.
+ X0 = No termination.
+ 01 = LVPECL termination.
+ 11 = HCSL termination.
+
+ The field is initialized on a cold domain reset. */
+ uint64_t cclk2_pwdn : 1; /**< [ 6: 6](R/W) Common clock 2 receiver power down.
+ When set, receiver is powered down.
+ The field is initialized on a cold domain reset.
+
+ Internal:
+ The receiver is also forced into powerdown when jtg__rst_pll.iddq_mode is set. */
+ uint64_t cclk1_sel : 2; /**< [ 5: 4](R/W) Common clock 1 termination select.
+ X0 = No termination.
+ 01 = LVPECL termination.
+ 11 = HCSL termination.
+
+ The field is initialized on a cold domain reset. */
+ uint64_t cclk1_pwdn : 1; /**< [ 3: 3](R/W) Common clock 1 receiver power down.
+ When set, receiver is powered down.
+ The field is initialized on a cold domain reset.
+
+ Internal:
+ The receiver is also forced into powerdown when jtg__rst_pll.iddq_mode is set. */
+ uint64_t cclk0_sel : 2; /**< [ 2: 1](RO/H) Common clock 0 termination select determined by hardware.
+ X0 = No termination.
+ 01 = LVPECL termination.
+ 11 = HCSL termination.
+
+ The field is initialized on a cold domain reset. */
+ uint64_t cclk0_pwdn : 1; /**< [ 0: 0](RAZ) Common clock 0 receiver power down.
+ Never powered down. Reads as zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t cclk0_pwdn : 1; /**< [ 0: 0](RAZ) Common clock 0 receiver power down.
+ Never powered down. Reads as zero. */
+ uint64_t cclk0_sel : 2; /**< [ 2: 1](RO/H) Common clock 0 termination select determined by hardware.
+ X0 = No termination.
+ 01 = LVPECL termination.
+ 11 = HCSL termination.
+
+ The field is initialized on a cold domain reset. */
+ uint64_t cclk1_pwdn : 1; /**< [ 3: 3](R/W) Common clock 1 receiver power down.
+ When set, receiver is powered down.
+ The field is initialized on a cold domain reset.
+
+ Internal:
+ The receiver is also forced into powerdown when jtg__rst_pll.iddq_mode is set. */
+ uint64_t cclk1_sel : 2; /**< [ 5: 4](R/W) Common clock 1 termination select.
+ X0 = No termination.
+ 01 = LVPECL termination.
+ 11 = HCSL termination.
+
+ The field is initialized on a cold domain reset. */
+ uint64_t cclk2_pwdn : 1; /**< [ 6: 6](R/W) Common clock 2 receiver power down.
+ When set, receiver is powered down.
+ The field is initialized on a cold domain reset.
+
+ Internal:
+ The receiver is also forced into powerdown when jtg__rst_pll.iddq_mode is set. */
+ uint64_t cclk2_sel : 2; /**< [ 8: 7](R/W) Common clock 2 termination select.
+ X0 = No termination.
+ 01 = LVPECL termination.
+ 11 = HCSL termination.
+
+ The field is initialized on a cold domain reset. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_refc_ctl_s cn; */
+};
+typedef union bdk_rst_refc_ctl bdk_rst_refc_ctl_t;
+
+#define BDK_RST_REFC_CTL BDK_RST_REFC_CTL_FUNC()
+static inline uint64_t BDK_RST_REFC_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_REFC_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e00a001798ll;
+ __bdk_csr_fatal("RST_REFC_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_REFC_CTL bdk_rst_refc_ctl_t
+#define bustype_BDK_RST_REFC_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_REFC_CTL "RST_REFC_CTL"
+#define device_bar_BDK_RST_REFC_CTL 0x2 /* PF_BAR2 */
+#define busnum_BDK_RST_REFC_CTL 0
+#define arguments_BDK_RST_REFC_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_reset_active
+ *
+ * RST Domain Reset Active Status Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_reset_active
+{
+ uint64_t u;
+ struct bdk_rst_reset_active_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t scp : 1; /**< [ 3: 3](RO/H) SCP domain reset status. When set, SCP domain is in reset.
+ Default reset value is zero after a chip reset. */
+ uint64_t mcp : 1; /**< [ 2: 2](RO/H) MCP domain reset status. When set, MCP domain is in reset.
+ Default reset value is one after a chip reset. */
+ uint64_t core : 1; /**< [ 1: 1](RO/H) Core domain reset status. When set, core domain is in reset.
+ Default reset value is one after a chip reset. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t core : 1; /**< [ 1: 1](RO/H) Core domain reset status. When set, core domain is in reset.
+ Default reset value is one after a chip reset. */
+ uint64_t mcp : 1; /**< [ 2: 2](RO/H) MCP domain reset status. When set, MCP domain is in reset.
+ Default reset value is one after a chip reset. */
+ uint64_t scp : 1; /**< [ 3: 3](RO/H) SCP domain reset status. When set, SCP domain is in reset.
+ Default reset value is zero after a chip reset. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_reset_active_s cn; */
+};
+typedef union bdk_rst_reset_active bdk_rst_reset_active_t;
+
+#define BDK_RST_RESET_ACTIVE BDK_RST_RESET_ACTIVE_FUNC()
+static inline uint64_t BDK_RST_RESET_ACTIVE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_RESET_ACTIVE_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001888ll;
+ __bdk_csr_fatal("RST_RESET_ACTIVE", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_RESET_ACTIVE bdk_rst_reset_active_t
+#define bustype_BDK_RST_RESET_ACTIVE BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_RESET_ACTIVE "RST_RESET_ACTIVE"
+#define device_bar_BDK_RST_RESET_ACTIVE 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_RESET_ACTIVE 0
+#define arguments_BDK_RST_RESET_ACTIVE -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_scp_domain_w1c
+ *
+ * RST SCP Domain Soft Reset Clear Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_scp_domain_w1c
+{
+ uint64_t u;
+ struct bdk_rst_scp_domain_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1C/H) Clear soft reset of the SCP processor and associated logic.
+ When set to one, the soft reset of the scp is removed.
+ Reads of this register show the soft reset state. Not the actual scp domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[SCP] shows
+ the actual reset state. To compensate for delays in reset, this field should only
+ be set if RST_RESET_ACTIVE[SCP] is set.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1C/H) Clear soft reset of the SCP processor and associated logic.
+ When set to one, the soft reset of the scp is removed.
+ Reads of this register show the soft reset state. Not the actual scp domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[SCP] shows
+ the actual reset state. To compensate for delays in reset, this field should only
+ be set if RST_RESET_ACTIVE[SCP] is set.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_scp_domain_w1c_s cn; */
+};
+typedef union bdk_rst_scp_domain_w1c bdk_rst_scp_domain_w1c_t;
+
+#define BDK_RST_SCP_DOMAIN_W1C BDK_RST_SCP_DOMAIN_W1C_FUNC()
+static inline uint64_t BDK_RST_SCP_DOMAIN_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_SCP_DOMAIN_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001848ll;
+ __bdk_csr_fatal("RST_SCP_DOMAIN_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_SCP_DOMAIN_W1C bdk_rst_scp_domain_w1c_t
+#define bustype_BDK_RST_SCP_DOMAIN_W1C BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_SCP_DOMAIN_W1C "RST_SCP_DOMAIN_W1C"
+#define device_bar_BDK_RST_SCP_DOMAIN_W1C 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_SCP_DOMAIN_W1C 0
+#define arguments_BDK_RST_SCP_DOMAIN_W1C -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_scp_domain_w1s
+ *
+ * RST SCP Domain Soft Reset Set Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_scp_domain_w1s
+{
+ uint64_t u;
+ struct bdk_rst_scp_domain_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Set soft reset of SCP core and associated logic.
+ When set to one, all logic associated with the scp domain is placed in reset.
+ If RST_BOOT[RBOOT] is set, the scp soft reset will stay asserted until
+ RST_SCP_DOMAIN_W1C is written. Otherwise it will automatically deassert.
+ Reads of this register show the soft reset state. Not the actual scp domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[SCP] shows
+ the actual reset state.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](R/W1S/H) Set soft reset of SCP core and associated logic.
+ When set to one, all logic associated with the scp domain is placed in reset.
+ If RST_BOOT[RBOOT] is set, the scp soft reset will stay asserted until
+ RST_SCP_DOMAIN_W1C is written. Otherwise it will automatically deassert.
+ Reads of this register show the soft reset state. Not the actual scp domain reset.
+ Other factors may keep the reset active, reading RST_RESET_ACTIVE[SCP] shows
+ the actual reset state.
+ This field is always reinitialized on a chip domain reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_scp_domain_w1s_s cn; */
+};
+typedef union bdk_rst_scp_domain_w1s bdk_rst_scp_domain_w1s_t;
+
+#define BDK_RST_SCP_DOMAIN_W1S BDK_RST_SCP_DOMAIN_W1S_FUNC()
+static inline uint64_t BDK_RST_SCP_DOMAIN_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_SCP_DOMAIN_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001840ll;
+ __bdk_csr_fatal("RST_SCP_DOMAIN_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_SCP_DOMAIN_W1S bdk_rst_scp_domain_w1s_t
+#define bustype_BDK_RST_SCP_DOMAIN_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_SCP_DOMAIN_W1S "RST_SCP_DOMAIN_W1S"
+#define device_bar_BDK_RST_SCP_DOMAIN_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_SCP_DOMAIN_W1S 0
+#define arguments_BDK_RST_SCP_DOMAIN_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_soft_prst#
+ *
+ * RST PCIe Soft Reset Registers
+ * This register is accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_soft_prstx
+{
+ uint64_t u;
+ struct bdk_rst_soft_prstx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_prst : 1; /**< [ 0: 0](R/W) Soft PCIe reset. Resets the PCIe logic and corresponding common logic associated with the
+ SLI controller in
+ all modes, not just RC mode.
+ * If RST_CTL()[HOST_MODE] = 0, [SOFT_PRST] resets to 0.
+ * If RST_CTL()[HOST_MODE] = 1, [SOFT_PRST] resets to 1.
+
+ When CNXXXX is configured to drive PERST*_L (i.e.
+ RST_CTL()[RST_DRV] = 1), this controls the output value on PERST*_L.
+
+ Internal:
+ This bit is also forced high if the corresponding PEM Cripple Fuse is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_prst : 1; /**< [ 0: 0](R/W) Soft PCIe reset. Resets the PCIe logic and corresponding common logic associated with the
+ SLI controller in
+ all modes, not just RC mode.
+ * If RST_CTL()[HOST_MODE] = 0, [SOFT_PRST] resets to 0.
+ * If RST_CTL()[HOST_MODE] = 1, [SOFT_PRST] resets to 1.
+
+ When CNXXXX is configured to drive PERST*_L (i.e.
+ RST_CTL()[RST_DRV] = 1), this controls the output value on PERST*_L.
+
+ Internal:
+ This bit is also forced high if the corresponding PEM Cripple Fuse is set. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_soft_prstx_s cn8; */
+ struct bdk_rst_soft_prstx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_prst : 1; /**< [ 0: 0](R/W) Soft PCIe reset. Resets the PEM and corresponding GSER SERDES logic.
+ This field is always set on a cold domain reset, when the link goes down
+ or on the corresponding PEM domain reset if RST_CTL()[HOST_MODE] is set.
+
+ When RST_CTL()[RST_DRV] is set, this controls the output value on PERST*_L.
+
+ Internal:
+ This bit is also forced high if the corresponding PEM Cripple Fuse is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_prst : 1; /**< [ 0: 0](R/W) Soft PCIe reset. Resets the PEM and corresponding GSER SERDES logic.
+ This field is always set on a cold domain reset, when the link goes down
+ or on the corresponding PEM domain reset if RST_CTL()[HOST_MODE] is set.
+
+ When RST_CTL()[RST_DRV] is set, this controls the output value on PERST*_L.
+
+ Internal:
+ This bit is also forced high if the corresponding PEM Cripple Fuse is set. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_rst_soft_prstx bdk_rst_soft_prstx_t;
+
+static inline uint64_t BDK_RST_SOFT_PRSTX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_SOFT_PRSTX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0060016c0ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0060016c0ll + 8ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0060016c0ll + 8ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x87e0060016c0ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RST_SOFT_PRSTX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_SOFT_PRSTX(a) bdk_rst_soft_prstx_t
+#define bustype_BDK_RST_SOFT_PRSTX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_SOFT_PRSTX(a) "RST_SOFT_PRSTX"
+#define device_bar_BDK_RST_SOFT_PRSTX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_SOFT_PRSTX(a) (a)
+#define arguments_BDK_RST_SOFT_PRSTX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) rst_soft_rst
+ *
+ * RST Soft Reset Register
+ */
+union bdk_rst_soft_rst
+{
+ uint64_t u;
+ struct bdk_rst_soft_rst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t soft_rst : 1; /**< [ 0: 0](WO) Soft reset. When set to 1, resets the CNXXXX core. When performing a soft reset from a
+ remote PCIe host,
+ always read this register and wait for the results before setting [SOFT_RST] to 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t soft_rst : 1; /**< [ 0: 0](WO) Soft reset. When set to 1, resets the CNXXXX core. When performing a soft reset from a
+ remote PCIe host,
+ always read this register and wait for the results before setting [SOFT_RST] to 1. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_soft_rst_s cn; */
+};
+typedef union bdk_rst_soft_rst bdk_rst_soft_rst_t;
+
+#define BDK_RST_SOFT_RST BDK_RST_SOFT_RST_FUNC()
+static inline uint64_t BDK_RST_SOFT_RST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_SOFT_RST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e006001680ll;
+ __bdk_csr_fatal("RST_SOFT_RST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_SOFT_RST bdk_rst_soft_rst_t
+#define bustype_BDK_RST_SOFT_RST BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_SOFT_RST "RST_SOFT_RST"
+#define device_bar_BDK_RST_SOFT_RST 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_SOFT_RST 0
+#define arguments_BDK_RST_SOFT_RST -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_src_map
+ *
+ * RST Source Domain Map Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_src_map
+{
+ uint64_t u;
+ struct bdk_rst_src_map_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t ocx_to_chip : 1; /**< [ 12: 12](R/W) Reserved.
+ Internal:
+ OCX linkdown mapped to chip domain reset.
+ When RST_OCX[RST_LINK] is set:
+ 0 = OCX transition from link up to link down will cause a core domain reset.
+ 1 = OCX transition from link up to link down will cause both a core domain reset
+ and a chip domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_11 : 1;
+ uint64_t scp_to_mcp : 1; /**< [ 10: 10](R/W) SCP watchdog and pin resets mapped to MCP domain reset.
+ 0 = Mapping disabled.
+ 1 = SCP reset pin or the SCP watchdog will additionally
+ cause an mcp domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t scp_to_core : 1; /**< [ 9: 9](R/W) SCP watchdog and pin resets mapped to core domain reset.
+ 0 = Mapping disabled.
+ 1 = SCP reset pin or the SCP watchdog will additionally
+ cause a core domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t scp_to_chip : 1; /**< [ 8: 8](R/W) SCP watchdog and pin resets mapped to chip domain reset.
+ 0 = Mapping disabled.
+ 1 = SCP reset pin or the SCP watchdog will additionally
+ cause a chip domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t mcp_to_scp : 1; /**< [ 7: 7](R/W) MCP watchdog and pin resets mapped to scp domain reset.
+ 0 = Mapping disabled.
+ 1 = MCP reset pin or the MCP watchdog will additionally
+ cause an scp domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_6 : 1;
+ uint64_t mcp_to_core : 1; /**< [ 5: 5](R/W) MCP watchdog and pin resets mapped to core domain reset.
+ 0 = Mapping disabled.
+ 1 = MCP reset pin or the MCP watchdog will additionally
+ cause a core domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t mcp_to_chip : 1; /**< [ 4: 4](R/W) MCP watchdog and pin resets mapped to chip domain reset.
+ 0 = Mapping disabled.
+ 1 = MCP reset pin or the MCP watchdog will additionally
+ cause a chip domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t core_to_scp : 1; /**< [ 3: 3](R/W) Core watchdog and pin resets mapped to scp domain reset.
+ 0 = Mapping disabled.
+ 1 = Core reset pin or the AP watchdog will additionally
+ cause an scp domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t core_to_mcp : 1; /**< [ 2: 2](R/W) Core watchdog and pin resets mapped to mcp domain reset.
+ 0 = Mapping disabled.
+ 1 = Core reset pin or the AP watchdog will additionally
+ cause an mcp domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_1 : 1;
+ uint64_t core_to_chip : 1; /**< [ 0: 0](R/W) Core watchdog and pin resets mapped to chip domain reset.
+ 0 = Mapping disabled.
+ 1 = Core reset pin or the AP watchdog will additionally
+ cause a chip domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t core_to_chip : 1; /**< [ 0: 0](R/W) Core watchdog and pin resets mapped to chip domain reset.
+ 0 = Mapping disabled.
+ 1 = Core reset pin or the AP watchdog will additionally
+ cause a chip domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_1 : 1;
+ uint64_t core_to_mcp : 1; /**< [ 2: 2](R/W) Core watchdog and pin resets mapped to mcp domain reset.
+ 0 = Mapping disabled.
+ 1 = Core reset pin or the AP watchdog will additionally
+ cause an mcp domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t core_to_scp : 1; /**< [ 3: 3](R/W) Core watchdog and pin resets mapped to scp domain reset.
+ 0 = Mapping disabled.
+ 1 = Core reset pin or the AP watchdog will additionally
+ cause an scp domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t mcp_to_chip : 1; /**< [ 4: 4](R/W) MCP watchdog and pin resets mapped to chip domain reset.
+ 0 = Mapping disabled.
+ 1 = MCP reset pin or the MCP watchdog will additionally
+ cause a chip domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t mcp_to_core : 1; /**< [ 5: 5](R/W) MCP watchdog and pin resets mapped to core domain reset.
+ 0 = Mapping disabled.
+ 1 = MCP reset pin or the MCP watchdog will additionally
+ cause a core domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_6 : 1;
+ uint64_t mcp_to_scp : 1; /**< [ 7: 7](R/W) MCP watchdog and pin resets mapped to scp domain reset.
+ 0 = Mapping disabled.
+ 1 = MCP reset pin or the MCP watchdog will additionally
+ cause an scp domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t scp_to_chip : 1; /**< [ 8: 8](R/W) SCP watchdog and pin resets mapped to chip domain reset.
+ 0 = Mapping disabled.
+ 1 = SCP reset pin or the SCP watchdog will additionally
+ cause a chip domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t scp_to_core : 1; /**< [ 9: 9](R/W) SCP watchdog and pin resets mapped to core domain reset.
+ 0 = Mapping disabled.
+ 1 = SCP reset pin or the SCP watchdog will additionally
+ cause a core domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t scp_to_mcp : 1; /**< [ 10: 10](R/W) SCP watchdog and pin resets mapped to MCP domain reset.
+ 0 = Mapping disabled.
+ 1 = SCP reset pin or the SCP watchdog will additionally
+ cause an mcp domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_11 : 1;
+ uint64_t ocx_to_chip : 1; /**< [ 12: 12](R/W) Reserved.
+ Internal:
+ OCX linkdown mapped to chip domain reset.
+ When RST_OCX[RST_LINK] is set:
+ 0 = OCX transition from link up to link down will cause a core domain reset.
+ 1 = OCX transition from link up to link down will cause both a core domain reset
+ and a chip domain reset.
+
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_src_map_s cn; */
+};
+typedef union bdk_rst_src_map bdk_rst_src_map_t;
+
+#define BDK_RST_SRC_MAP BDK_RST_SRC_MAP_FUNC()
+static inline uint64_t BDK_RST_SRC_MAP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_SRC_MAP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e006001898ll;
+ __bdk_csr_fatal("RST_SRC_MAP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_SRC_MAP bdk_rst_src_map_t
+#define bustype_BDK_RST_SRC_MAP BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_SRC_MAP "RST_SRC_MAP"
+#define device_bar_BDK_RST_SRC_MAP 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_SRC_MAP 0
+#define arguments_BDK_RST_SRC_MAP -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_sw_w1s
+ *
+ * RST Software W1S Data Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_sw_w1s
+{
+ uint64_t u;
+ struct bdk_rst_sw_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W1S) Data register that can be set by software and is only cleared
+ on a chip domain reset.
+ This field is always reinitialized on a chip domain reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W1S) Data register that can be set by software and is only cleared
+ on a chip domain reset.
+ This field is always reinitialized on a chip domain reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_sw_w1s_s cn; */
+};
+typedef union bdk_rst_sw_w1s bdk_rst_sw_w1s_t;
+
+#define BDK_RST_SW_W1S BDK_RST_SW_W1S_FUNC()
+static inline uint64_t BDK_RST_SW_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_SW_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0060017f0ll;
+ __bdk_csr_fatal("RST_SW_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_SW_W1S bdk_rst_sw_w1s_t
+#define bustype_BDK_RST_SW_W1S BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_SW_W1S "RST_SW_W1S"
+#define device_bar_BDK_RST_SW_W1S 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_SW_W1S 0
+#define arguments_BDK_RST_SW_W1S -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_thermal_alert
+ *
+ * RST Thermal Alert Register
+ * This register is not accessible through ROM scripts; see SCR_WRITE32_S[ADDR].
+ */
+union bdk_rst_thermal_alert
+{
+ uint64_t u;
+ struct bdk_rst_thermal_alert_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t trip : 1; /**< [ 8: 8](R/W1S/H) Thermal trip pin. When set to 1, drives the THERMAL_TRIP_N pin active low. This field is
+ set by either of the
+ on-board temperature sensors reaching a failure threshold or writing this bit.
+ The bit can only be cleared by a deassertion of the PLL_DC_OK pin which completely resets
+ the chip. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t alert : 2; /**< [ 1: 0](RO/H) Thermal alert status. When set to 1, indicates the temperature sensor is currently at the
+ failure threshold. */
+#else /* Word 0 - Little Endian */
+ uint64_t alert : 2; /**< [ 1: 0](RO/H) Thermal alert status. When set to 1, indicates the temperature sensor is currently at the
+ failure threshold. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t trip : 1; /**< [ 8: 8](R/W1S/H) Thermal trip pin. When set to 1, drives the THERMAL_TRIP_N pin active low. This field is
+ set by either of the
+ on-board temperature sensors reaching a failure threshold or writing this bit.
+ The bit can only be cleared by a deassertion of the PLL_DC_OK pin which completely resets
+ the chip. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_rst_thermal_alert_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t trip : 1; /**< [ 8: 8](R/W1S/H) Thermal trip pin. When set, drives the THERMAL_TRIP_L pin active low.
+ This field is set by one of the on-board temperature sensors reaching a
+ failure threshold or writing this bit.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t alert : 1; /**< [ 0: 0](RO/H) Thermal alert status. When set to one, indicates a temperature sensor is
+ currently at the failure threshold. */
+#else /* Word 0 - Little Endian */
+ uint64_t alert : 1; /**< [ 0: 0](RO/H) Thermal alert status. When set to one, indicates a temperature sensor is
+ currently at the failure threshold. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t trip : 1; /**< [ 8: 8](R/W1S/H) Thermal trip pin. When set, drives the THERMAL_TRIP_L pin active low.
+ This field is set by one of the on-board temperature sensors reaching a
+ failure threshold or writing this bit.
+ This field is always reinitialized on a cold domain reset. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_rst_thermal_alert_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t trip : 1; /**< [ 8: 8](R/W1S/H) Thermal trip pin. When set to 1, drives the THERMAL_TRIP_N pin active low. This field is
+ set by either of the
+ on-board temperature sensors reaching a failure threshold or writing this bit.
+ The bit can only be cleared by a deassertion of the PLL_DC_OK pin which completely resets
+ the chip. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t alert : 1; /**< [ 0: 0](RO/H) Thermal alert status. When set to 1, indicates the temperature sensor is currently at the
+ failure threshold. */
+#else /* Word 0 - Little Endian */
+ uint64_t alert : 1; /**< [ 0: 0](RO/H) Thermal alert status. When set to 1, indicates the temperature sensor is currently at the
+ failure threshold. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t trip : 1; /**< [ 8: 8](R/W1S/H) Thermal trip pin. When set to 1, drives the THERMAL_TRIP_N pin active low. This field is
+ set by either of the
+ on-board temperature sensors reaching a failure threshold or writing this bit.
+ The bit can only be cleared by a deassertion of the PLL_DC_OK pin which completely resets
+ the chip. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_rst_thermal_alert_s cn88xx; */
+ /* struct bdk_rst_thermal_alert_cn81xx cn83xx; */
+};
+typedef union bdk_rst_thermal_alert bdk_rst_thermal_alert_t;
+
+#define BDK_RST_THERMAL_ALERT BDK_RST_THERMAL_ALERT_FUNC()
+static inline uint64_t BDK_RST_THERMAL_ALERT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_THERMAL_ALERT_FUNC(void)
+{
+ return 0x87e006001690ll;
+}
+
+#define typedef_BDK_RST_THERMAL_ALERT bdk_rst_thermal_alert_t
+#define bustype_BDK_RST_THERMAL_ALERT BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_THERMAL_ALERT "RST_THERMAL_ALERT"
+#define device_bar_BDK_RST_THERMAL_ALERT 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_THERMAL_ALERT 0
+#define arguments_BDK_RST_THERMAL_ALERT -1,-1,-1,-1
+
+/**
+ * Register (RSL) rst_tns_pll_ctl
+ *
+ * RST Network-Switch PLL-Control Register
+ * This register controls the network-switch clock frequency.
+ * The following sequence is the TNS PLL-bringup sequence:
+ *
+ * 1. Write a 0 to [RESET_N] and a 1 to [DIV_RESET].
+ *
+ * 2. Set [CLKF] and [PS_EN]. If jtg_test_mode.
+ * then also write jtg__tns_pll_tm_en2, jtg__tns_pll_tm_en4, jtg__tns_pll_tm_en12 and
+ * jtg__tns_pll_tm_en24.
+ *
+ * 3. Wait 128 reference-clock cycles.
+ *
+ * 4. Write 1 to [RESET_N].
+ *
+ * 5. Wait 1152 reference-clock cycles.
+ *
+ * 6. Write 0 to [DIV_RESET].
+ *
+ * 7. Wait 10 reference-clock cycles before bringing up the network interface.
+ *
+ * If test mode is going to be activated, wait an additional 8191 reference-clock cycles to allow
+ * PLL clock
+ * alignment.
+ */
+union bdk_rst_tns_pll_ctl
+{
+ uint64_t u;
+ struct bdk_rst_tns_pll_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_29_63 : 35;
+ uint64_t pll_fbslip : 1; /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
+ uint64_t pll_lock : 1; /**< [ 27: 27](RO/H) PLL LOCK indication. */
+ uint64_t pll_rfslip : 1; /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
+ uint64_t reserved_23_25 : 3;
+ uint64_t div_reset : 1; /**< [ 22: 22](R/W) Postscalar divider reset. */
+ uint64_t ps_en : 4; /**< [ 21: 18](R/W) PLL postscalar divide ratio. Determines the network clock speed.
+ 0x0 = Divide TNS PLL by 1.
+ 0x1 = Divide TNS PLL by 2.
+ 0x2 = Divide TNS PLL by 3.
+ 0x3 = Divide TNS PLL by 4.
+ 0x4 = Divide TNS PLL by 5.
+ 0x5 = Divide TNS PLL by 6.
+ 0x6 = Divide TNS PLL by 7.
+ 0x7 = Divide TNS PLL by 8.
+ 0x8 = Divide TNS PLL by 10.
+ 0x9 = Divide TNS PLL by 12.
+ 0xA-0xF = Reserved.
+
+ [PS_EN] is not used when [DIV_RESET] = 1 */
+ uint64_t reserved_15_17 : 3;
+ uint64_t cout_rst : 1; /**< [ 14: 14](R/W) Clockout postscaler reset. This clockout postscaler should be placed in reset at
+ least 10 reference-clock cycles prior to changing [COUT_SEL]. The clockout
+ postscaler should remain under reset for at least 10 reference-clock cycles
+ after [COUT_SEL] changes. */
+ uint64_t cout_sel : 2; /**< [ 13: 12](R/W) PNR clockout select:
+ 0x0 = Network clock.
+ 0x1 = PS output.
+ 0x2 = PLL output.
+ 0x3 = UNDIVIDED core clock. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t reset_n : 1; /**< [ 7: 7](R/W) PLL reset. */
+ uint64_t clkf : 7; /**< [ 6: 0](R/W) PLL multiplier. Sets TNS clock frequency to 50 MHz * ([CLKF]+1) / ([PS_EN]+1). */
+#else /* Word 0 - Little Endian */
+ uint64_t clkf : 7; /**< [ 6: 0](R/W) PLL multiplier. Sets TNS clock frequency to 50 MHz * ([CLKF]+1) / ([PS_EN]+1). */
+ uint64_t reset_n : 1; /**< [ 7: 7](R/W) PLL reset. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t cout_sel : 2; /**< [ 13: 12](R/W) PNR clockout select:
+ 0x0 = Network clock.
+ 0x1 = PS output.
+ 0x2 = PLL output.
+ 0x3 = UNDIVIDED core clock. */
+ uint64_t cout_rst : 1; /**< [ 14: 14](R/W) Clockout postscaler reset. This clockout postscaler should be placed in reset at
+ least 10 reference-clock cycles prior to changing [COUT_SEL]. The clockout
+ postscaler should remain under reset for at least 10 reference-clock cycles
+ after [COUT_SEL] changes. */
+ uint64_t reserved_15_17 : 3;
+ uint64_t ps_en : 4; /**< [ 21: 18](R/W) PLL postscalar divide ratio. Determines the network clock speed.
+ 0x0 = Divide TNS PLL by 1.
+ 0x1 = Divide TNS PLL by 2.
+ 0x2 = Divide TNS PLL by 3.
+ 0x3 = Divide TNS PLL by 4.
+ 0x4 = Divide TNS PLL by 5.
+ 0x5 = Divide TNS PLL by 6.
+ 0x6 = Divide TNS PLL by 7.
+ 0x7 = Divide TNS PLL by 8.
+ 0x8 = Divide TNS PLL by 10.
+ 0x9 = Divide TNS PLL by 12.
+ 0xA-0xF = Reserved.
+
+ [PS_EN] is not used when [DIV_RESET] = 1 */
+ uint64_t div_reset : 1; /**< [ 22: 22](R/W) Postscalar divider reset. */
+ uint64_t reserved_23_25 : 3;
+ uint64_t pll_rfslip : 1; /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
+ uint64_t pll_lock : 1; /**< [ 27: 27](RO/H) PLL LOCK indication. */
+ uint64_t pll_fbslip : 1; /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
+ uint64_t reserved_29_63 : 35;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rst_tns_pll_ctl_s cn; */
+};
+typedef union bdk_rst_tns_pll_ctl bdk_rst_tns_pll_ctl_t;
+
+#define BDK_RST_TNS_PLL_CTL BDK_RST_TNS_PLL_CTL_FUNC()
+static inline uint64_t BDK_RST_TNS_PLL_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RST_TNS_PLL_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e006001780ll;
+ __bdk_csr_fatal("RST_TNS_PLL_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RST_TNS_PLL_CTL bdk_rst_tns_pll_ctl_t
+#define bustype_BDK_RST_TNS_PLL_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_RST_TNS_PLL_CTL "RST_TNS_PLL_CTL"
+#define device_bar_BDK_RST_TNS_PLL_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_RST_TNS_PLL_CTL 0
+#define arguments_BDK_RST_TNS_PLL_CTL -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_RST_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sgp.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sgp.h
new file mode 100644
index 0000000000..1fc68ede63
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sgp.h
@@ -0,0 +1,975 @@
+#ifndef __BDK_CSRS_SGP_H__
+#define __BDK_CSRS_SGP_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium SGP.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration sgp_bar_e
+ *
+ * SGPIO Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_SGP_BAR_E_SGP_PF_BAR0_CN8 (0x87e027000000ll)
+#define BDK_SGP_BAR_E_SGP_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_SGP_BAR_E_SGP_PF_BAR0_CN9 (0x87e027000000ll)
+#define BDK_SGP_BAR_E_SGP_PF_BAR0_CN9_SIZE 0x80000ull
+
+/**
+ * Enumeration sgp_tx_act_e
+ *
+ * SGPIO Transmit Activity Enumeration
+ * Enumerates the values of SGP_TX()[*_ACT].
+ */
+#define BDK_SGP_TX_ACT_E_A_OFF_ON (3)
+#define BDK_SGP_TX_ACT_E_A_ON_OFF (2)
+#define BDK_SGP_TX_ACT_E_BRIEF_END (4)
+#define BDK_SGP_TX_ACT_E_BRIEF_START (5)
+#define BDK_SGP_TX_ACT_E_B_OFF_ON (7)
+#define BDK_SGP_TX_ACT_E_B_ON_OFF (6)
+#define BDK_SGP_TX_ACT_E_STATIC_OFF (0)
+#define BDK_SGP_TX_ACT_E_STATIC_ON (1)
+
+/**
+ * Enumeration sgp_tx_err_e
+ *
+ * SGPIO Transmit Error Enumeration
+ * Enumerates the values of SGP_TX()[*_ERR].
+ */
+#define BDK_SGP_TX_ERR_E_A_OFF_ON (3)
+#define BDK_SGP_TX_ERR_E_A_ON_OFF (2)
+#define BDK_SGP_TX_ERR_E_B_OFF_ON (7)
+#define BDK_SGP_TX_ERR_E_B_ON_OFF (6)
+#define BDK_SGP_TX_ERR_E_STATIC_OFF (0)
+#define BDK_SGP_TX_ERR_E_STATIC_ON (1)
+#define BDK_SGP_TX_ERR_E_STATIC_ON4 (4)
+#define BDK_SGP_TX_ERR_E_STATIC_ON5 (5)
+
+/**
+ * Enumeration sgp_tx_loc_e
+ *
+ * SGPIO Transmit Locate Enumeration
+ * Enumerates the values of SGP_TX()[*_LOC].
+ */
+#define BDK_SGP_TX_LOC_E_A_OFF_ON (3)
+#define BDK_SGP_TX_LOC_E_A_ON_OFF (2)
+#define BDK_SGP_TX_LOC_E_STATIC_OFF (0)
+#define BDK_SGP_TX_LOC_E_STATIC_ON (1)
+
+/**
+ * Register (RSL32b) sgp_cfg0
+ *
+ * SGPIO Configuration 0 Register
+ */
+union bdk_sgp_cfg0
+{
+ uint32_t u;
+ struct bdk_sgp_cfg0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t drive_cnt : 8; /**< [ 31: 24](RO) Number of supported drives.
+ Internal:
+ Corresponds to SATA(0..15). */
+ uint32_t gp_cnt : 4; /**< [ 23: 20](RO) Number of general purpose data registers. */
+ uint32_t cfg_cnt : 3; /**< [ 19: 17](RO) Number of configuration registers. */
+ uint32_t ena : 1; /**< [ 16: 16](R/W) SGPIO enable. Enables the SGPIO inputs and outputs. When zero the bus is not driven,
+ inputs are zero, and shifts do not occur. A change from enabled to disabled does not take
+ effect until the current bit-stream has completed. */
+ uint32_t ver : 4; /**< [ 15: 12](RO) Version. */
+ uint32_t reserved_0_11 : 12;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_11 : 12;
+ uint32_t ver : 4; /**< [ 15: 12](RO) Version. */
+ uint32_t ena : 1; /**< [ 16: 16](R/W) SGPIO enable. Enables the SGPIO inputs and outputs. When zero the bus is not driven,
+ inputs are zero, and shifts do not occur. A change from enabled to disabled does not take
+ effect until the current bit-stream has completed. */
+ uint32_t cfg_cnt : 3; /**< [ 19: 17](RO) Number of configuration registers. */
+ uint32_t gp_cnt : 4; /**< [ 23: 20](RO) Number of general purpose data registers. */
+ uint32_t drive_cnt : 8; /**< [ 31: 24](RO) Number of supported drives.
+ Internal:
+ Corresponds to SATA(0..15). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_cfg0_s cn; */
+};
+typedef union bdk_sgp_cfg0 bdk_sgp_cfg0_t;
+
+#define BDK_SGP_CFG0 BDK_SGP_CFG0_FUNC()
+static inline uint64_t BDK_SGP_CFG0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_CFG0_FUNC(void)
+{
+ return 0x87e027000000ll;
+}
+
+#define typedef_BDK_SGP_CFG0 bdk_sgp_cfg0_t
+#define bustype_BDK_SGP_CFG0 BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_CFG0 "SGP_CFG0"
+#define device_bar_BDK_SGP_CFG0 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_CFG0 0
+#define arguments_BDK_SGP_CFG0 -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_cfg1
+ *
+ * SGPIO Configuration 1 Register
+ */
+union bdk_sgp_cfg1
+{
+ uint32_t u;
+ struct bdk_sgp_cfg1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t blink_b : 4; /**< [ 23: 20](R/W) Blink generator rate B. */
+ uint32_t blink_a : 4; /**< [ 19: 16](R/W) Blink generator rate A:
+ 0x0 = 1/8 second.
+ 0x1 = 2/8 second.
+ ...
+ 0xF = 16/8 seconds. */
+ uint32_t force_off : 4; /**< [ 15: 12](R/W) Force activity off time. The minimum amount of time to disable the activity indicator if
+ it has been continually enabled for the [MAX_ON] time, and the SGP_TX()[*_ACT] is
+ 0x4 or 0x5.
+ 0x0 = No minimum.
+ 0x1 = 1/8 second.
+ ...
+ 0xF = 15/8 seconds. */
+ uint32_t max_on : 4; /**< [ 11: 8](R/W) Maximum activity on time. The maximum amount of time to enable the activity indicator if
+ SGP_TX()[*_ACT] is 0x4 or 0x5. Note all drives will not reach the [MAX_ON] time
+ simultaneously, the pattern will appear somewhat random.
+ 0x0 = No maximum.
+ 0x1 = 1/4 second.
+ ...
+ 0xF = 15/4 seconds. */
+ uint32_t stretch_off : 4; /**< [ 7: 4](R/W) Stretch activity off. The minimum amount of time to disable the activity indicator if
+ SGP_TX()[*_ACT] is 0x4 or 0x5.
+ 0x0 = No minimum.
+ 0x1 = 1/64 second.
+ ...
+ 0xF = 15/64 seconds. */
+ uint32_t stretch_on : 4; /**< [ 3: 0](R/W) Stretch activity on. The minimum amount of time to enable the activity indicator if
+ SGP_TX()[*_ACT] is 0x4 or 0x5.
+ 0x0 = 1/64 second.
+ 0x1 = 2/64 second.
+ ...
+ 0xF = 16/64 seconds. */
+#else /* Word 0 - Little Endian */
+ uint32_t stretch_on : 4; /**< [ 3: 0](R/W) Stretch activity on. The minimum amount of time to enable the activity indicator if
+ SGP_TX()[*_ACT] is 0x4 or 0x5.
+ 0x0 = 1/64 second.
+ 0x1 = 2/64 second.
+ ...
+ 0xF = 16/64 seconds. */
+ uint32_t stretch_off : 4; /**< [ 7: 4](R/W) Stretch activity off. The minimum amount of time to disable the activity indicator if
+ SGP_TX()[*_ACT] is 0x4 or 0x5.
+ 0x0 = No minimum.
+ 0x1 = 1/64 second.
+ ...
+ 0xF = 15/64 seconds. */
+ uint32_t max_on : 4; /**< [ 11: 8](R/W) Maximum activity on time. The maximum amount of time to enable the activity indicator if
+ SGP_TX()[*_ACT] is 0x4 or 0x5. Note all drives will not reach the [MAX_ON] time
+ simultaneously, the pattern will appear somewhat random.
+ 0x0 = No maximum.
+ 0x1 = 1/4 second.
+ ...
+ 0xF = 15/4 seconds. */
+ uint32_t force_off : 4; /**< [ 15: 12](R/W) Force activity off time. The minimum amount of time to disable the activity indicator if
+ it has been continually enabled for the [MAX_ON] time, and the SGP_TX()[*_ACT] is
+ 0x4 or 0x5.
+ 0x0 = No minimum.
+ 0x1 = 1/8 second.
+ ...
+ 0xF = 15/8 seconds. */
+ uint32_t blink_a : 4; /**< [ 19: 16](R/W) Blink generator rate A:
+ 0x0 = 1/8 second.
+ 0x1 = 2/8 second.
+ ...
+ 0xF = 16/8 seconds. */
+ uint32_t blink_b : 4; /**< [ 23: 20](R/W) Blink generator rate B. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_cfg1_s cn; */
+};
+typedef union bdk_sgp_cfg1 bdk_sgp_cfg1_t;
+
+#define BDK_SGP_CFG1 BDK_SGP_CFG1_FUNC()
+static inline uint64_t BDK_SGP_CFG1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_CFG1_FUNC(void)
+{
+ return 0x87e027000004ll;
+}
+
+#define typedef_BDK_SGP_CFG1 bdk_sgp_cfg1_t
+#define bustype_BDK_SGP_CFG1 BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_CFG1 "SGP_CFG1"
+#define device_bar_BDK_SGP_CFG1 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_CFG1 0
+#define arguments_BDK_SGP_CFG1 -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_imp_clk
+ *
+ * SGPIO Implementation Clock Register
+ */
+union bdk_sgp_imp_clk
+{
+ uint32_t u;
+ struct bdk_sgp_imp_clk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t div : 27; /**< [ 26: 0](R/W) Coprocessor-clock divisor. Number of coprocessor clock cycles per
+ GPIO_SCLOCK. May only be changed when SGP_CFG0[ENA] and SGP_IMP_CTL[BUSY] are
+ clear. Should be programmed to yield a frequency between 64 Hz and 100 kHz;
+ reset value assumes a coprocessor clock of 800 MHz and a SGPIO_SCLOCK of 100
+ KHz. */
+#else /* Word 0 - Little Endian */
+ uint32_t div : 27; /**< [ 26: 0](R/W) Coprocessor-clock divisor. Number of coprocessor clock cycles per
+ GPIO_SCLOCK. May only be changed when SGP_CFG0[ENA] and SGP_IMP_CTL[BUSY] are
+ clear. Should be programmed to yield a frequency between 64 Hz and 100 kHz;
+ reset value assumes a coprocessor clock of 800 MHz and a SGPIO_SCLOCK of 100
+ KHz. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_imp_clk_s cn8; */
+ struct bdk_sgp_imp_clk_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t div : 27; /**< [ 26: 0](R/W) 100 MHz reference-clock divisor. Number of 100 MHz reference clock cycles per
+ SGPIO_SCLOCK. May only be changed when SGP_CFG0[ENA] and SGP_IMP_CTL[BUSY] are
+ clear. Should be programmed to yield a frequency between 64 Hz and 100 kHz;
+ reset value assumes a SGPIO_SCLOCK of 100 KHz. */
+#else /* Word 0 - Little Endian */
+ uint32_t div : 27; /**< [ 26: 0](R/W) 100 MHz reference-clock divisor. Number of 100 MHz reference clock cycles per
+ SGPIO_SCLOCK. May only be changed when SGP_CFG0[ENA] and SGP_IMP_CTL[BUSY] are
+ clear. Should be programmed to yield a frequency between 64 Hz and 100 kHz;
+ reset value assumes a SGPIO_SCLOCK of 100 KHz. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_sgp_imp_clk bdk_sgp_imp_clk_t;
+
+#define BDK_SGP_IMP_CLK BDK_SGP_IMP_CLK_FUNC()
+static inline uint64_t BDK_SGP_IMP_CLK_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_IMP_CLK_FUNC(void)
+{
+ return 0x87e027030010ll;
+}
+
+#define typedef_BDK_SGP_IMP_CLK bdk_sgp_imp_clk_t
+#define bustype_BDK_SGP_IMP_CLK BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_IMP_CLK "SGP_IMP_CLK"
+#define device_bar_BDK_SGP_IMP_CLK 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_IMP_CLK 0
+#define arguments_BDK_SGP_IMP_CLK -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_imp_ctl
+ *
+ * SGPIO Implementation Control Register
+ */
+union bdk_sgp_imp_ctl
+{
+ uint32_t u;
+ struct bdk_sgp_imp_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t datains : 3; /**< [ 4: 2](R/W) Number of SGP_SDATAIN connections:
+ 0x0 = No SGP_SDATAIN, all input shift data will be zero.
+ 0x1 = SGP_SDATAIN(0) for drives 0-15.
+ 0x2 = SGP_SDATAIN(0) for drives 0-7, (1) for drives 8-15.
+ 0x3 = SGP_SDATAIN(0) for drives 0-3, (1) for drives 4-7, (2) for drives 8-11, (3) for
+ drives 12-15.
+ 0x4-0x7 = Reserved. */
+ uint32_t hold : 1; /**< [ 1: 1](R/W) Hold shift. When set, automatic shifts will not occur, and the SGP_TX_GP_CFG[COUNT] must
+ be used to initiate a shift operation. */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Shift in progress. */
+#else /* Word 0 - Little Endian */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Shift in progress. */
+ uint32_t hold : 1; /**< [ 1: 1](R/W) Hold shift. When set, automatic shifts will not occur, and the SGP_TX_GP_CFG[COUNT] must
+ be used to initiate a shift operation. */
+ uint32_t datains : 3; /**< [ 4: 2](R/W) Number of SGP_SDATAIN connections:
+ 0x0 = No SGP_SDATAIN, all input shift data will be zero.
+ 0x1 = SGP_SDATAIN(0) for drives 0-15.
+ 0x2 = SGP_SDATAIN(0) for drives 0-7, (1) for drives 8-15.
+ 0x3 = SGP_SDATAIN(0) for drives 0-3, (1) for drives 4-7, (2) for drives 8-11, (3) for
+ drives 12-15.
+ 0x4-0x7 = Reserved. */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_imp_ctl_s cn8; */
+ struct bdk_sgp_imp_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t datains : 3; /**< [ 4: 2](R/W) Number of SGPIO_SDATAIN connections:
+ 0x0 = No SGPIO_SDATAIN, all input shift data will be zero.
+ 0x1 = SGPIO_SDATAIN\<0\> for drives 0-19.
+ 0x2 = SGPIO_SDATAIN\<0\> for drives 0-15, \<1\> for drives 16-19.
+ 0x3 = SGPIO_SDATAIN\<0\> for drives 0-7, \<1\> for drives 8-15, \<2\> for drives 16-19.
+ 0x4 = SGPIO_SDATAIN\<0\> for drives 0-3, \<1\> for drives 4-7, \<2\> for drives 8-11, \<3\> for
+ drives 12-15, \<4\> for drives 16-19.
+ 0x5-0x7 = Reserved. */
+ uint32_t hold : 1; /**< [ 1: 1](R/W) Hold shift. When set, automatic shifts will not occur, and the SGP_TX_GP_CFG[COUNT] must
+ be used to initiate a shift operation. */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Shift in progress. */
+#else /* Word 0 - Little Endian */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Shift in progress. */
+ uint32_t hold : 1; /**< [ 1: 1](R/W) Hold shift. When set, automatic shifts will not occur, and the SGP_TX_GP_CFG[COUNT] must
+ be used to initiate a shift operation. */
+ uint32_t datains : 3; /**< [ 4: 2](R/W) Number of SGPIO_SDATAIN connections:
+ 0x0 = No SGPIO_SDATAIN, all input shift data will be zero.
+ 0x1 = SGPIO_SDATAIN\<0\> for drives 0-19.
+ 0x2 = SGPIO_SDATAIN\<0\> for drives 0-15, \<1\> for drives 16-19.
+ 0x3 = SGPIO_SDATAIN\<0\> for drives 0-7, \<1\> for drives 8-15, \<2\> for drives 16-19.
+ 0x4 = SGPIO_SDATAIN\<0\> for drives 0-3, \<1\> for drives 4-7, \<2\> for drives 8-11, \<3\> for
+ drives 12-15, \<4\> for drives 16-19.
+ 0x5-0x7 = Reserved. */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_sgp_imp_ctl bdk_sgp_imp_ctl_t;
+
+#define BDK_SGP_IMP_CTL BDK_SGP_IMP_CTL_FUNC()
+static inline uint64_t BDK_SGP_IMP_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_IMP_CTL_FUNC(void)
+{
+ return 0x87e027030000ll;
+}
+
+#define typedef_BDK_SGP_IMP_CTL bdk_sgp_imp_ctl_t
+#define bustype_BDK_SGP_IMP_CTL BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_IMP_CTL "SGP_IMP_CTL"
+#define device_bar_BDK_SGP_IMP_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_IMP_CTL 0
+#define arguments_BDK_SGP_IMP_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_imp_drive#
+ *
+ * SGPIO Implementation Drive Map Register
+ */
+union bdk_sgp_imp_drivex
+{
+ uint32_t u;
+ struct bdk_sgp_imp_drivex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t pres_det : 1; /**< [ 8: 8](R/W) "Presence detect. If set, logically OR SGPIO_SDATAIN's ID#.0 bit with any GPIO related
+ present detect and send to the relevant SATA controller's mechanical presence detect." */
+ uint32_t reserved_5_7 : 3;
+ uint32_t ctrlr : 5; /**< [ 4: 0](R/W) SATA controller attached to this index's SGPIO drive. Indicates which SATA(0..15) connects
+ to the corresponding indexed SGPIO drive 0..15. Resets to the index number; controller 0
+ for drive 0, controller 1 for drive 1, etc.
+
+ If SGP_TX()[D0_ACT]..[D3_ACT] = SGP_TX_ACT_E::BRIEF_START or
+ SGP_TX_ACT_E::BRIEF_END, the activity input will come from SATA controller
+ number [CTRLR]. Else, the activity indication is controlled by software alone.
+
+ If [PRES_DET] is set, SATA controller number [CTRLR] will receive the indexed drive's
+ presence detect. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrlr : 5; /**< [ 4: 0](R/W) SATA controller attached to this index's SGPIO drive. Indicates which SATA(0..15) connects
+ to the corresponding indexed SGPIO drive 0..15. Resets to the index number; controller 0
+ for drive 0, controller 1 for drive 1, etc.
+
+ If SGP_TX()[D0_ACT]..[D3_ACT] = SGP_TX_ACT_E::BRIEF_START or
+ SGP_TX_ACT_E::BRIEF_END, the activity input will come from SATA controller
+ number [CTRLR]. Else, the activity indication is controlled by software alone.
+
+ If [PRES_DET] is set, SATA controller number [CTRLR] will receive the indexed drive's
+ presence detect. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t pres_det : 1; /**< [ 8: 8](R/W) "Presence detect. If set, logically OR SGPIO_SDATAIN's ID#.0 bit with any GPIO related
+ present detect and send to the relevant SATA controller's mechanical presence detect." */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_sgp_imp_drivex_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t pres_det : 1; /**< [ 8: 8](R/W) "Presence detect. If set, logically OR SGPIO_SDATAIN's ID#.0 bit with any GPIO related
+ present detect and send to the relevant SATA controller's mechanical presence detect." */
+ uint32_t reserved_4_7 : 4;
+ uint32_t ctrlr : 4; /**< [ 3: 0](R/W) SATA controller attached to this index's SGPIO drive. Indicates which SATA(0..15) connects
+ to the corresponding indexed SGPIO drive 0..15. Resets to the index number; controller 0
+ for drive 0, controller 1 for drive 1, etc.
+
+ If SGP_TX()[D0_ACT]..[D3_ACT] = SGP_TX_ACT_E::BRIEF_START or
+ SGP_TX_ACT_E::BRIEF_END, the activity input will come from SATA controller
+ number [CTRLR]. Else, the activity indication is controlled by software alone.
+
+ If [PRES_DET] is set, SATA controller number [CTRLR] will receive the indexed drive's
+ presence detect. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrlr : 4; /**< [ 3: 0](R/W) SATA controller attached to this index's SGPIO drive. Indicates which SATA(0..15) connects
+ to the corresponding indexed SGPIO drive 0..15. Resets to the index number; controller 0
+ for drive 0, controller 1 for drive 1, etc.
+
+ If SGP_TX()[D0_ACT]..[D3_ACT] = SGP_TX_ACT_E::BRIEF_START or
+ SGP_TX_ACT_E::BRIEF_END, the activity input will come from SATA controller
+ number [CTRLR]. Else, the activity indication is controlled by software alone.
+
+ If [PRES_DET] is set, SATA controller number [CTRLR] will receive the indexed drive's
+ presence detect. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t pres_det : 1; /**< [ 8: 8](R/W) "Presence detect. If set, logically OR SGPIO_SDATAIN's ID#.0 bit with any GPIO related
+ present detect and send to the relevant SATA controller's mechanical presence detect." */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_sgp_imp_drivex_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t pres_det : 1; /**< [ 8: 8](R/W) "Presence detect. If set, logically OR SGPIO_SDATAIN's ID#.0 bit with any GPIO related
+ present detect and send to the relevant SATA controller's mechanical presence detect." */
+ uint32_t reserved_5_7 : 3;
+ uint32_t ctrlr : 5; /**< [ 4: 0](R/W) SATA controller attached to this index's SGPIO drive. Indicates which SATA(0..19) connects
+ to the corresponding indexed SGPIO drive 0..19. Resets to the index number; controller 0
+ for drive 0, controller 1 for drive 1, etc.
+
+ If SGP_TX()[D0_ACT]..[D3_ACT] = SGP_TX_ACT_E::BRIEF_START or
+ SGP_TX_ACT_E::BRIEF_END, the activity input will come from SATA controller
+ number [CTRLR]. Else, the activity indication is controlled by software alone.
+
+ If [PRES_DET] is set, SATA controller number [CTRLR] will receive the indexed drive's
+ presence detect. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrlr : 5; /**< [ 4: 0](R/W) SATA controller attached to this index's SGPIO drive. Indicates which SATA(0..19) connects
+ to the corresponding indexed SGPIO drive 0..19. Resets to the index number; controller 0
+ for drive 0, controller 1 for drive 1, etc.
+
+ If SGP_TX()[D0_ACT]..[D3_ACT] = SGP_TX_ACT_E::BRIEF_START or
+ SGP_TX_ACT_E::BRIEF_END, the activity input will come from SATA controller
+ number [CTRLR]. Else, the activity indication is controlled by software alone.
+
+ If [PRES_DET] is set, SATA controller number [CTRLR] will receive the indexed drive's
+ presence detect. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t pres_det : 1; /**< [ 8: 8](R/W) "Presence detect. If set, logically OR SGPIO_SDATAIN's ID#.0 bit with any GPIO related
+ present detect and send to the relevant SATA controller's mechanical presence detect." */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_sgp_imp_drivex bdk_sgp_imp_drivex_t;
+
+static inline uint64_t BDK_SGP_IMP_DRIVEX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_IMP_DRIVEX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=15))
+ return 0x87e027040000ll + 8ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=19))
+ return 0x87e027040000ll + 8ll * ((a) & 0x1f);
+ __bdk_csr_fatal("SGP_IMP_DRIVEX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SGP_IMP_DRIVEX(a) bdk_sgp_imp_drivex_t
+#define bustype_BDK_SGP_IMP_DRIVEX(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_IMP_DRIVEX(a) "SGP_IMP_DRIVEX"
+#define device_bar_BDK_SGP_IMP_DRIVEX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_IMP_DRIVEX(a) (a)
+#define arguments_BDK_SGP_IMP_DRIVEX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_imp_sec_clk
+ *
+ * SGPIO Implementation Seconds Clock Register
+ */
+union bdk_sgp_imp_sec_clk
+{
+ uint32_t u;
+ struct bdk_sgp_imp_sec_clk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t div : 16; /**< [ 15: 0](R/W) Coprocessor-clock seconds divisor. Number of GPIO_SCLOCKs per 1/64th second. May
+ only be changed when SGP_CFG0[ENA] and SGP_IMP_CTL[BUSY] are clear. Should be
+ programmed to yield a frequency of 64 Hz; reset value assumes GPIO_SCLOCK of 100
+ kHz. */
+#else /* Word 0 - Little Endian */
+ uint32_t div : 16; /**< [ 15: 0](R/W) Coprocessor-clock seconds divisor. Number of GPIO_SCLOCKs per 1/64th second. May
+ only be changed when SGP_CFG0[ENA] and SGP_IMP_CTL[BUSY] are clear. Should be
+ programmed to yield a frequency of 64 Hz; reset value assumes GPIO_SCLOCK of 100
+ kHz. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_imp_sec_clk_s cn8; */
+ struct bdk_sgp_imp_sec_clk_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t div : 16; /**< [ 15: 0](R/W) 100 MHz reference-clock seconds divisor. Number of SGPIO_SCLOCK cycles per 1/64th
+ second. May only be changed when SGP_CFG0[ENA] and SGP_IMP_CTL[BUSY] are
+ clear. Should be programmed to yield a frequency of 64 Hz; reset value assumes
+ SGPIO_SCLOCK of 100 kHz. */
+#else /* Word 0 - Little Endian */
+ uint32_t div : 16; /**< [ 15: 0](R/W) 100 MHz reference-clock seconds divisor. Number of SGPIO_SCLOCK cycles per 1/64th
+ second. May only be changed when SGP_CFG0[ENA] and SGP_IMP_CTL[BUSY] are
+ clear. Should be programmed to yield a frequency of 64 Hz; reset value assumes
+ SGPIO_SCLOCK of 100 kHz. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_sgp_imp_sec_clk bdk_sgp_imp_sec_clk_t;
+
+#define BDK_SGP_IMP_SEC_CLK BDK_SGP_IMP_SEC_CLK_FUNC()
+static inline uint64_t BDK_SGP_IMP_SEC_CLK_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_IMP_SEC_CLK_FUNC(void)
+{
+ return 0x87e027030020ll;
+}
+
+#define typedef_BDK_SGP_IMP_SEC_CLK bdk_sgp_imp_sec_clk_t
+#define bustype_BDK_SGP_IMP_SEC_CLK BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_IMP_SEC_CLK "SGP_IMP_SEC_CLK"
+#define device_bar_BDK_SGP_IMP_SEC_CLK 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_IMP_SEC_CLK 0
+#define arguments_BDK_SGP_IMP_SEC_CLK -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_rx#
+ *
+ * SGPIO Receive Registers
+ */
+union bdk_sgp_rxx
+{
+ uint32_t u;
+ struct bdk_sgp_rxx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t rx3 : 3; /**< [ 26: 24](RO/H) Three bits received on SGPIO_SDATAIN corresponding to drive 3+4*index. */
+ uint32_t reserved_19_23 : 5;
+ uint32_t rx2 : 3; /**< [ 18: 16](RO/H) Three bits received on SGPIO_SDATAIN corresponding to drive 2+4*index. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t rx1 : 3; /**< [ 10: 8](RO/H) Three bits received on SGPIO_SDATAIN corresponding to drive 1+4*index. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t rx0 : 3; /**< [ 2: 0](RO/H) Three bits received on SGPIO_SDATAIN corresponding to drive 0+4*index. */
+#else /* Word 0 - Little Endian */
+ uint32_t rx0 : 3; /**< [ 2: 0](RO/H) Three bits received on SGPIO_SDATAIN corresponding to drive 0+4*index. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t rx1 : 3; /**< [ 10: 8](RO/H) Three bits received on SGPIO_SDATAIN corresponding to drive 1+4*index. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t rx2 : 3; /**< [ 18: 16](RO/H) Three bits received on SGPIO_SDATAIN corresponding to drive 2+4*index. */
+ uint32_t reserved_19_23 : 5;
+ uint32_t rx3 : 3; /**< [ 26: 24](RO/H) Three bits received on SGPIO_SDATAIN corresponding to drive 3+4*index. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_rxx_s cn; */
+};
+typedef union bdk_sgp_rxx bdk_sgp_rxx_t;
+
+static inline uint64_t BDK_SGP_RXX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_RXX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=3))
+ return 0x87e027000400ll + 4ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e027000400ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("SGP_RXX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SGP_RXX(a) bdk_sgp_rxx_t
+#define bustype_BDK_SGP_RXX(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_RXX(a) "SGP_RXX"
+#define device_bar_BDK_SGP_RXX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_RXX(a) (a)
+#define arguments_BDK_SGP_RXX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_rx_gp#
+ *
+ * SGPIO Receive GPIO Registers
+ */
+union bdk_sgp_rx_gpx
+{
+ uint32_t u;
+ struct bdk_sgp_rx_gpx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sdatain3 : 8; /**< [ 31: 24](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain2 : 8; /**< [ 23: 16](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain1 : 8; /**< [ 15: 8](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain0 : 8; /**< [ 7: 0](RO/H) Raw data shifted from SGPIO_SDATAIN. The bits are not in natural 32-bit form; they are
+ assigned to registers as follows:
+ _ Bit 0 (ID0.0): SGP_RX_GP(1)[SDATAIN3]\<0\>.
+ _ Bit 8 (ID2.2): SGP_RX_GP(1)[SDATAIN2]\<0\>.
+ _ Bit 16 (ID5.1): SGP_RX_GP(1)[SDATAIN1]\<0\>.
+ _ Bit 24 (ID8.0): SGP_RX_GP(1)[SDATAIN0]\<0\>.
+ _ Bit 32 (ID10.2): SGP_RX_GP(2)[SDATAIN3]\<0\>.
+ _ Bit 40 (ID13.1): SGP_RX_GP(2)[SDATAIN2]\<0\>.
+ _ Bit 47 (ID15.2): SGP_RX_GP(2)[SDATAIN2]\<7\>.
+
+ SGP_RX_GP(2)[SDATAIN1/SDATAIN0] are always zero. */
+#else /* Word 0 - Little Endian */
+ uint32_t sdatain0 : 8; /**< [ 7: 0](RO/H) Raw data shifted from SGPIO_SDATAIN. The bits are not in natural 32-bit form; they are
+ assigned to registers as follows:
+ _ Bit 0 (ID0.0): SGP_RX_GP(1)[SDATAIN3]\<0\>.
+ _ Bit 8 (ID2.2): SGP_RX_GP(1)[SDATAIN2]\<0\>.
+ _ Bit 16 (ID5.1): SGP_RX_GP(1)[SDATAIN1]\<0\>.
+ _ Bit 24 (ID8.0): SGP_RX_GP(1)[SDATAIN0]\<0\>.
+ _ Bit 32 (ID10.2): SGP_RX_GP(2)[SDATAIN3]\<0\>.
+ _ Bit 40 (ID13.1): SGP_RX_GP(2)[SDATAIN2]\<0\>.
+ _ Bit 47 (ID15.2): SGP_RX_GP(2)[SDATAIN2]\<7\>.
+
+ SGP_RX_GP(2)[SDATAIN1/SDATAIN0] are always zero. */
+ uint32_t sdatain1 : 8; /**< [ 15: 8](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain2 : 8; /**< [ 23: 16](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain3 : 8; /**< [ 31: 24](RO/H) See [SDATAIN0]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_rx_gpx_s cn8; */
+ struct bdk_sgp_rx_gpx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sdatain3 : 8; /**< [ 31: 24](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain2 : 8; /**< [ 23: 16](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain1 : 8; /**< [ 15: 8](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain0 : 8; /**< [ 7: 0](RO/H) Raw data shifted from SGPIO_SDATAIN. The bits are not in natural 32-bit form; they are
+ assigned to registers as follows:
+ _ Bit 0 (ID0.0): SGP_RX_GP(1)[SDATAIN3]\<0\>.
+ _ Bit 8 (ID2.2): SGP_RX_GP(1)[SDATAIN2]\<0\>.
+ _ Bit 16 (ID5.1): SGP_RX_GP(1)[SDATAIN1]\<0\>.
+ _ Bit 24 (ID8.0): SGP_RX_GP(1)[SDATAIN0]\<0\>.
+ _ Bit 32 (ID10.2): SGP_RX_GP(2)[SDATAIN3]\<0\>.
+ _ Bit 40 (ID13.1): SGP_RX_GP(2)[SDATAIN2]\<0\>.
+ _ Bit 48 (ID16.0): SGP_RX_GP(2)[SDATAIN1]\<0\>.
+ _ Bit 56 (ID18.2): SGP_RX_GP(2)[SDATAIN0]\<0\>.
+ _ Bit 59 (ID19.2): SGP_RX_GP(2)[SDATAIN0]\<3\>. */
+#else /* Word 0 - Little Endian */
+ uint32_t sdatain0 : 8; /**< [ 7: 0](RO/H) Raw data shifted from SGPIO_SDATAIN. The bits are not in natural 32-bit form; they are
+ assigned to registers as follows:
+ _ Bit 0 (ID0.0): SGP_RX_GP(1)[SDATAIN3]\<0\>.
+ _ Bit 8 (ID2.2): SGP_RX_GP(1)[SDATAIN2]\<0\>.
+ _ Bit 16 (ID5.1): SGP_RX_GP(1)[SDATAIN1]\<0\>.
+ _ Bit 24 (ID8.0): SGP_RX_GP(1)[SDATAIN0]\<0\>.
+ _ Bit 32 (ID10.2): SGP_RX_GP(2)[SDATAIN3]\<0\>.
+ _ Bit 40 (ID13.1): SGP_RX_GP(2)[SDATAIN2]\<0\>.
+ _ Bit 48 (ID16.0): SGP_RX_GP(2)[SDATAIN1]\<0\>.
+ _ Bit 56 (ID18.2): SGP_RX_GP(2)[SDATAIN0]\<0\>.
+ _ Bit 59 (ID19.2): SGP_RX_GP(2)[SDATAIN0]\<3\>. */
+ uint32_t sdatain1 : 8; /**< [ 15: 8](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain2 : 8; /**< [ 23: 16](RO/H) See [SDATAIN0]. */
+ uint32_t sdatain3 : 8; /**< [ 31: 24](RO/H) See [SDATAIN0]. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_sgp_rx_gpx bdk_sgp_rx_gpx_t;
+
+static inline uint64_t BDK_SGP_RX_GPX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_RX_GPX(unsigned long a)
+{
+ if ((a>=1)&&(a<=2))
+ return 0x87e027000800ll + 4ll * ((a) & 0x3);
+ __bdk_csr_fatal("SGP_RX_GPX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SGP_RX_GPX(a) bdk_sgp_rx_gpx_t
+#define bustype_BDK_SGP_RX_GPX(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_RX_GPX(a) "SGP_RX_GPX"
+#define device_bar_BDK_SGP_RX_GPX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_RX_GPX(a) (a)
+#define arguments_BDK_SGP_RX_GPX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_rx_gp_cfg
+ *
+ * SGPIO Receive GPIO Configuration Register
+ */
+union bdk_sgp_rx_gp_cfg
+{
+ uint32_t u;
+ struct bdk_sgp_rx_gp_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t count : 8; /**< [ 23: 16](RO/H) Number of repetitions remaining. A [COUNT] of 0xFF indicates infinite repetitions are remaining. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t count : 8; /**< [ 23: 16](RO/H) Number of repetitions remaining. A [COUNT] of 0xFF indicates infinite repetitions are remaining. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_rx_gp_cfg_s cn; */
+};
+typedef union bdk_sgp_rx_gp_cfg bdk_sgp_rx_gp_cfg_t;
+
+#define BDK_SGP_RX_GP_CFG BDK_SGP_RX_GP_CFG_FUNC()
+static inline uint64_t BDK_SGP_RX_GP_CFG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_RX_GP_CFG_FUNC(void)
+{
+ return 0x87e027000800ll;
+}
+
+#define typedef_BDK_SGP_RX_GP_CFG bdk_sgp_rx_gp_cfg_t
+#define bustype_BDK_SGP_RX_GP_CFG BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_RX_GP_CFG "SGP_RX_GP_CFG"
+#define device_bar_BDK_SGP_RX_GP_CFG 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_RX_GP_CFG 0
+#define arguments_BDK_SGP_RX_GP_CFG -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_tx#
+ *
+ * SGPIO Transmit Registers
+ */
+union bdk_sgp_txx
+{
+ uint32_t u;
+ struct bdk_sgp_txx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t d3_act : 3; /**< [ 31: 29](R/W) Drive 3+4*index's activity state. */
+ uint32_t d3_loc : 2; /**< [ 28: 27](R/W) Drive 3+4*index's locate state. */
+ uint32_t d3_err : 3; /**< [ 26: 24](R/W) Drive 3+4*index's error state. */
+ uint32_t d2_act : 3; /**< [ 23: 21](R/W) Drive 2+4*index's activity state. */
+ uint32_t d2_loc : 2; /**< [ 20: 19](R/W) Drive 2+4*index's locate state. */
+ uint32_t d2_err : 3; /**< [ 18: 16](R/W) Drive 2+4*index's error state. */
+ uint32_t d1_act : 3; /**< [ 15: 13](R/W) Drive 1+4*index's activity state. */
+ uint32_t d1_loc : 2; /**< [ 12: 11](R/W) Drive 1+4*index's locate state. */
+ uint32_t d1_err : 3; /**< [ 10: 8](R/W) Drive 1+4*index's error state. */
+ uint32_t d0_act : 3; /**< [ 7: 5](R/W) Drive 0+4*index's activity state, enumerated by SGP_TX_ACT_E. */
+ uint32_t d0_loc : 2; /**< [ 4: 3](R/W) Drive 0+4*index's locate state, enumerated by SGP_TX_LOC_E. */
+ uint32_t d0_err : 3; /**< [ 2: 0](R/W) Drive 0+4*index's error state, enumerated by SGP_TX_ERR_E. */
+#else /* Word 0 - Little Endian */
+ uint32_t d0_err : 3; /**< [ 2: 0](R/W) Drive 0+4*index's error state, enumerated by SGP_TX_ERR_E. */
+ uint32_t d0_loc : 2; /**< [ 4: 3](R/W) Drive 0+4*index's locate state, enumerated by SGP_TX_LOC_E. */
+ uint32_t d0_act : 3; /**< [ 7: 5](R/W) Drive 0+4*index's activity state, enumerated by SGP_TX_ACT_E. */
+ uint32_t d1_err : 3; /**< [ 10: 8](R/W) Drive 1+4*index's error state. */
+ uint32_t d1_loc : 2; /**< [ 12: 11](R/W) Drive 1+4*index's locate state. */
+ uint32_t d1_act : 3; /**< [ 15: 13](R/W) Drive 1+4*index's activity state. */
+ uint32_t d2_err : 3; /**< [ 18: 16](R/W) Drive 2+4*index's error state. */
+ uint32_t d2_loc : 2; /**< [ 20: 19](R/W) Drive 2+4*index's locate state. */
+ uint32_t d2_act : 3; /**< [ 23: 21](R/W) Drive 2+4*index's activity state. */
+ uint32_t d3_err : 3; /**< [ 26: 24](R/W) Drive 3+4*index's error state. */
+ uint32_t d3_loc : 2; /**< [ 28: 27](R/W) Drive 3+4*index's locate state. */
+ uint32_t d3_act : 3; /**< [ 31: 29](R/W) Drive 3+4*index's activity state. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_txx_s cn; */
+};
+typedef union bdk_sgp_txx bdk_sgp_txx_t;
+
+static inline uint64_t BDK_SGP_TXX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_TXX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=3))
+ return 0x87e027000c00ll + 4ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=4))
+ return 0x87e027000c00ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("SGP_TXX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SGP_TXX(a) bdk_sgp_txx_t
+#define bustype_BDK_SGP_TXX(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_TXX(a) "SGP_TXX"
+#define device_bar_BDK_SGP_TXX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_TXX(a) (a)
+#define arguments_BDK_SGP_TXX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_tx_gp#
+ *
+ * SGPIO Transmit GPIO Registers
+ */
+union bdk_sgp_tx_gpx
+{
+ uint32_t u;
+ struct bdk_sgp_tx_gpx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sdataout3 : 8; /**< [ 31: 24](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout2 : 8; /**< [ 23: 16](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout1 : 8; /**< [ 15: 8](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout0 : 8; /**< [ 7: 0](R/W) Raw data to shift onto SGPIO_SDATAOUT. The bits are not in natural 32-bit form; they are
+ assigned to registers as follows:
+ _ Bit 0 (ID0.0): SGP_TX_GP(1)[SDATAOUT3]\<0\>.
+ _ Bit 8 (ID2.2): SGP_TX_GP(1)[SDATAOUT2]\<0\>.
+ _ Bit 16 (ID5.1): SGP_TX_GP(1)[SDATAOUT1]\<0\>.
+ _ Bit 24 (ID8.0): SGP_TX_GP(1)[SDATAOUT0]\<0\>.
+ _ Bit 32 (ID10.2): SGP_TX_GP(2)[SDATAOUT3]\<0\>.
+ _ Bit 40 (ID13.1): SGP_TX_GP(2)[SDATAOUT2]\<0\>.
+ _ Bit 47 (ID15.2): SGP_TX_GP(2)[SDATAOUT2]\<7\>.
+
+ SGP_TX_GP(2)[SDATAOUT1/SDATAOUT0] are ignored. */
+#else /* Word 0 - Little Endian */
+ uint32_t sdataout0 : 8; /**< [ 7: 0](R/W) Raw data to shift onto SGPIO_SDATAOUT. The bits are not in natural 32-bit form; they are
+ assigned to registers as follows:
+ _ Bit 0 (ID0.0): SGP_TX_GP(1)[SDATAOUT3]\<0\>.
+ _ Bit 8 (ID2.2): SGP_TX_GP(1)[SDATAOUT2]\<0\>.
+ _ Bit 16 (ID5.1): SGP_TX_GP(1)[SDATAOUT1]\<0\>.
+ _ Bit 24 (ID8.0): SGP_TX_GP(1)[SDATAOUT0]\<0\>.
+ _ Bit 32 (ID10.2): SGP_TX_GP(2)[SDATAOUT3]\<0\>.
+ _ Bit 40 (ID13.1): SGP_TX_GP(2)[SDATAOUT2]\<0\>.
+ _ Bit 47 (ID15.2): SGP_TX_GP(2)[SDATAOUT2]\<7\>.
+
+ SGP_TX_GP(2)[SDATAOUT1/SDATAOUT0] are ignored. */
+ uint32_t sdataout1 : 8; /**< [ 15: 8](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout2 : 8; /**< [ 23: 16](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout3 : 8; /**< [ 31: 24](R/W) See [SDATAOUT0]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_tx_gpx_s cn8; */
+ struct bdk_sgp_tx_gpx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sdataout3 : 8; /**< [ 31: 24](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout2 : 8; /**< [ 23: 16](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout1 : 8; /**< [ 15: 8](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout0 : 8; /**< [ 7: 0](R/W) Raw data to shift onto SGPIO_SDATAOUT. The bits are not in natural 32-bit form; they are
+ assigned to registers as follows:
+ _ Bit 0 (ID0.0): SGP_TX_GP(1)[SDATAOUT3]\<0\>.
+ _ Bit 8 (ID2.2): SGP_TX_GP(1)[SDATAOUT2]\<0\>.
+ _ Bit 16 (ID5.1): SGP_TX_GP(1)[SDATAOUT1]\<0\>.
+ _ Bit 24 (ID8.0): SGP_TX_GP(1)[SDATAOUT0]\<0\>.
+ _ Bit 32 (ID10.2): SGP_TX_GP(2)[SDATAOUT3]\<0\>.
+ _ Bit 40 (ID13.1): SGP_TX_GP(2)[SDATAOUT2]\<0\>.
+ _ Bit 47 (ID15.2): SGP_TX_GP(2)[SDATAOUT2]\<7\>.
+ _ Bit 48 (ID16.0): SGP_TX_GP(2)[SDATAOUT1]\<0\>.
+ _ Bit 56 (ID18.2): SGP_TX_GP(2)[SDATAOUT0]\<0\>.
+ _ Bit 59 (ID19.2): SGP_TX_GP(2)[SDATAOUT0]\<3\>. */
+#else /* Word 0 - Little Endian */
+ uint32_t sdataout0 : 8; /**< [ 7: 0](R/W) Raw data to shift onto SGPIO_SDATAOUT. The bits are not in natural 32-bit form; they are
+ assigned to registers as follows:
+ _ Bit 0 (ID0.0): SGP_TX_GP(1)[SDATAOUT3]\<0\>.
+ _ Bit 8 (ID2.2): SGP_TX_GP(1)[SDATAOUT2]\<0\>.
+ _ Bit 16 (ID5.1): SGP_TX_GP(1)[SDATAOUT1]\<0\>.
+ _ Bit 24 (ID8.0): SGP_TX_GP(1)[SDATAOUT0]\<0\>.
+ _ Bit 32 (ID10.2): SGP_TX_GP(2)[SDATAOUT3]\<0\>.
+ _ Bit 40 (ID13.1): SGP_TX_GP(2)[SDATAOUT2]\<0\>.
+ _ Bit 47 (ID15.2): SGP_TX_GP(2)[SDATAOUT2]\<7\>.
+ _ Bit 48 (ID16.0): SGP_TX_GP(2)[SDATAOUT1]\<0\>.
+ _ Bit 56 (ID18.2): SGP_TX_GP(2)[SDATAOUT0]\<0\>.
+ _ Bit 59 (ID19.2): SGP_TX_GP(2)[SDATAOUT0]\<3\>. */
+ uint32_t sdataout1 : 8; /**< [ 15: 8](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout2 : 8; /**< [ 23: 16](R/W) See [SDATAOUT0]. */
+ uint32_t sdataout3 : 8; /**< [ 31: 24](R/W) See [SDATAOUT0]. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_sgp_tx_gpx bdk_sgp_tx_gpx_t;
+
+static inline uint64_t BDK_SGP_TX_GPX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_TX_GPX(unsigned long a)
+{
+ if ((a>=1)&&(a<=2))
+ return 0x87e027001000ll + 4ll * ((a) & 0x3);
+ __bdk_csr_fatal("SGP_TX_GPX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SGP_TX_GPX(a) bdk_sgp_tx_gpx_t
+#define bustype_BDK_SGP_TX_GPX(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_TX_GPX(a) "SGP_TX_GPX"
+#define device_bar_BDK_SGP_TX_GPX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_TX_GPX(a) (a)
+#define arguments_BDK_SGP_TX_GPX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) sgp_tx_gp_cfg
+ *
+ * SGPIO Transmit GPIO Configuration Register
+ */
+union bdk_sgp_tx_gp_cfg
+{
+ uint32_t u;
+ struct bdk_sgp_tx_gp_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t sload : 4; /**< [ 27: 24](R/W) Pattern to transmit on SGPIO_SLOAD at the start of each general purpose bit stream.
+ [SLOAD]\<0\> is the first bit (L0), \<3\> is the last bit (L3). */
+ uint32_t count : 8; /**< [ 23: 16](R/W) Number of times to transmit the SGP_TX_GP(1..2) pattern and receive
+ into SGP_RX_GP(1..2). A [COUNT] of 0xFF transmits continuously until [COUNT] is set to
+ non-0xFF. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t count : 8; /**< [ 23: 16](R/W) Number of times to transmit the SGP_TX_GP(1..2) pattern and receive
+ into SGP_RX_GP(1..2). A [COUNT] of 0xFF transmits continuously until [COUNT] is set to
+ non-0xFF. */
+ uint32_t sload : 4; /**< [ 27: 24](R/W) Pattern to transmit on SGPIO_SLOAD at the start of each general purpose bit stream.
+ [SLOAD]\<0\> is the first bit (L0), \<3\> is the last bit (L3). */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sgp_tx_gp_cfg_s cn; */
+};
+typedef union bdk_sgp_tx_gp_cfg bdk_sgp_tx_gp_cfg_t;
+
+#define BDK_SGP_TX_GP_CFG BDK_SGP_TX_GP_CFG_FUNC()
+static inline uint64_t BDK_SGP_TX_GP_CFG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SGP_TX_GP_CFG_FUNC(void)
+{
+ return 0x87e027001000ll;
+}
+
+#define typedef_BDK_SGP_TX_GP_CFG bdk_sgp_tx_gp_cfg_t
+#define bustype_BDK_SGP_TX_GP_CFG BDK_CSR_TYPE_RSL32b
+#define basename_BDK_SGP_TX_GP_CFG "SGP_TX_GP_CFG"
+#define device_bar_BDK_SGP_TX_GP_CFG 0x0 /* PF_BAR0 */
+#define busnum_BDK_SGP_TX_GP_CFG 0
+#define arguments_BDK_SGP_TX_GP_CFG -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_SGP_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-uaa.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-uaa.h
new file mode 100644
index 0000000000..4e75ad3de1
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-uaa.h
@@ -0,0 +1,2149 @@
+#ifndef __BDK_CSRS_UAA_H__
+#define __BDK_CSRS_UAA_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium UAA.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration uaa_bar_e
+ *
+ * UART Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_UAA_BAR_E_UAAX_PF_BAR0_CN9(a) (0x87e028000000ll + 0x1000000ll * (a))
+#define BDK_UAA_BAR_E_UAAX_PF_BAR0_CN9_SIZE 0x10000ull
+#define BDK_UAA_BAR_E_UAAX_PF_BAR0_CN81XX(a) (0x87e028000000ll + 0x1000000ll * (a))
+#define BDK_UAA_BAR_E_UAAX_PF_BAR0_CN81XX_SIZE 0x100000ull
+#define BDK_UAA_BAR_E_UAAX_PF_BAR0_CN88XX(a) (0x87e024000000ll + 0x1000000ll * (a))
+#define BDK_UAA_BAR_E_UAAX_PF_BAR0_CN88XX_SIZE 0x100000ull
+#define BDK_UAA_BAR_E_UAAX_PF_BAR0_CN83XX(a) (0x87e028000000ll + 0x1000000ll * (a))
+#define BDK_UAA_BAR_E_UAAX_PF_BAR0_CN83XX_SIZE 0x100000ull
+#define BDK_UAA_BAR_E_UAAX_PF_BAR4_CN9(a) (0x87e028f00000ll + 0x1000000ll * (a))
+#define BDK_UAA_BAR_E_UAAX_PF_BAR4_CN9_SIZE 0x100000ull
+#define BDK_UAA_BAR_E_UAAX_PF_BAR4_CN81XX(a) (0x87e028f00000ll + 0x1000000ll * (a))
+#define BDK_UAA_BAR_E_UAAX_PF_BAR4_CN81XX_SIZE 0x100000ull
+#define BDK_UAA_BAR_E_UAAX_PF_BAR4_CN88XX(a) (0x87e024f00000ll + 0x1000000ll * (a))
+#define BDK_UAA_BAR_E_UAAX_PF_BAR4_CN88XX_SIZE 0x100000ull
+#define BDK_UAA_BAR_E_UAAX_PF_BAR4_CN83XX(a) (0x87e028f00000ll + 0x1000000ll * (a))
+#define BDK_UAA_BAR_E_UAAX_PF_BAR4_CN83XX_SIZE 0x100000ull
+
+/**
+ * Enumeration uaa_int_vec_e
+ *
+ * UART MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_UAA_INT_VEC_E_INTS (0)
+#define BDK_UAA_INT_VEC_E_INTS_CLEAR (1)
+
+/**
+ * Register (RSL32b) uaa#_cidr0
+ *
+ * UART Component Identification Register 0
+ */
+union bdk_uaax_cidr0
+{
+ uint32_t u;
+ struct bdk_uaax_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_cidr0_s cn; */
+};
+typedef union bdk_uaax_cidr0 bdk_uaax_cidr0_t;
+
+static inline uint64_t BDK_UAAX_CIDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_CIDR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000ff0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000ff0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000ff0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000ff0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_CIDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_CIDR0(a) bdk_uaax_cidr0_t
+#define bustype_BDK_UAAX_CIDR0(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_CIDR0(a) "UAAX_CIDR0"
+#define device_bar_BDK_UAAX_CIDR0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_CIDR0(a) (a)
+#define arguments_BDK_UAAX_CIDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_cidr1
+ *
+ * UART Component Identification Register 1
+ */
+union bdk_uaax_cidr1
+{
+ uint32_t u;
+ struct bdk_uaax_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_cidr1_s cn; */
+};
+typedef union bdk_uaax_cidr1 bdk_uaax_cidr1_t;
+
+static inline uint64_t BDK_UAAX_CIDR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_CIDR1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000ff4ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000ff4ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000ff4ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000ff4ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_CIDR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_CIDR1(a) bdk_uaax_cidr1_t
+#define bustype_BDK_UAAX_CIDR1(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_CIDR1(a) "UAAX_CIDR1"
+#define device_bar_BDK_UAAX_CIDR1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_CIDR1(a) (a)
+#define arguments_BDK_UAAX_CIDR1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_cidr2
+ *
+ * UART Component Identification Register 2
+ */
+union bdk_uaax_cidr2
+{
+ uint32_t u;
+ struct bdk_uaax_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_cidr2_s cn; */
+};
+typedef union bdk_uaax_cidr2 bdk_uaax_cidr2_t;
+
+static inline uint64_t BDK_UAAX_CIDR2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_CIDR2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000ff8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000ff8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000ff8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000ff8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_CIDR2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_CIDR2(a) bdk_uaax_cidr2_t
+#define bustype_BDK_UAAX_CIDR2(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_CIDR2(a) "UAAX_CIDR2"
+#define device_bar_BDK_UAAX_CIDR2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_CIDR2(a) (a)
+#define arguments_BDK_UAAX_CIDR2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_cidr3
+ *
+ * UART Component Identification Register 3
+ */
+union bdk_uaax_cidr3
+{
+ uint32_t u;
+ struct bdk_uaax_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_cidr3_s cn; */
+};
+typedef union bdk_uaax_cidr3 bdk_uaax_cidr3_t;
+
+static inline uint64_t BDK_UAAX_CIDR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_CIDR3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000ffcll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000ffcll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000ffcll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000ffcll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_CIDR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_CIDR3(a) bdk_uaax_cidr3_t
+#define bustype_BDK_UAAX_CIDR3(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_CIDR3(a) "UAAX_CIDR3"
+#define device_bar_BDK_UAAX_CIDR3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_CIDR3(a) (a)
+#define arguments_BDK_UAAX_CIDR3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_cr
+ *
+ * UART Control Register
+ */
+union bdk_uaax_cr
+{
+ uint32_t u;
+ struct bdk_uaax_cr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t ctsen : 1; /**< [ 15: 15](R/W) "CTS hardware flow control enable. If set, data is only transmitted when UART#_CTS_L is
+ asserted (low)." */
+ uint32_t rtsen : 1; /**< [ 14: 14](R/W) RTS hardware flow control enable. If set, data is only requested when space in the receive FIFO. */
+ uint32_t out2 : 1; /**< [ 13: 13](R/W) Unused. */
+ uint32_t out1 : 1; /**< [ 12: 12](R/W) Data carrier detect. If set, drive UART#_DCD_L asserted (low). */
+ uint32_t rts : 1; /**< [ 11: 11](R/W) Request to send. If set, assert UART#_RTS_L. */
+ uint32_t dtr : 1; /**< [ 10: 10](R/W) Data terminal ready. If set, assert UART#_DTR_N. */
+ uint32_t rxe : 1; /**< [ 9: 9](R/W) Receive enable. If set, receive section is enabled. */
+ uint32_t txe : 1; /**< [ 8: 8](R/W) Transmit enable. */
+ uint32_t lbe : 1; /**< [ 7: 7](R/W) "Loopback enable. If set, the serial output is looped into the serial input as if
+ UART#_SIN
+ was physically attached to UART#_SOUT." */
+ uint32_t reserved_1_6 : 6;
+ uint32_t uarten : 1; /**< [ 0: 0](R/W) UART enable.
+ 0 = UART is disabled. If the UART is disabled in the middle of transmission or reception,
+ it completes the current character.
+ 1 = UART enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t uarten : 1; /**< [ 0: 0](R/W) UART enable.
+ 0 = UART is disabled. If the UART is disabled in the middle of transmission or reception,
+ it completes the current character.
+ 1 = UART enabled. */
+ uint32_t reserved_1_6 : 6;
+ uint32_t lbe : 1; /**< [ 7: 7](R/W) "Loopback enable. If set, the serial output is looped into the serial input as if
+ UART#_SIN
+ was physically attached to UART#_SOUT." */
+ uint32_t txe : 1; /**< [ 8: 8](R/W) Transmit enable. */
+ uint32_t rxe : 1; /**< [ 9: 9](R/W) Receive enable. If set, receive section is enabled. */
+ uint32_t dtr : 1; /**< [ 10: 10](R/W) Data terminal ready. If set, assert UART#_DTR_N. */
+ uint32_t rts : 1; /**< [ 11: 11](R/W) Request to send. If set, assert UART#_RTS_L. */
+ uint32_t out1 : 1; /**< [ 12: 12](R/W) Data carrier detect. If set, drive UART#_DCD_L asserted (low). */
+ uint32_t out2 : 1; /**< [ 13: 13](R/W) Unused. */
+ uint32_t rtsen : 1; /**< [ 14: 14](R/W) RTS hardware flow control enable. If set, data is only requested when space in the receive FIFO. */
+ uint32_t ctsen : 1; /**< [ 15: 15](R/W) "CTS hardware flow control enable. If set, data is only transmitted when UART#_CTS_L is
+ asserted (low)." */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_uaax_cr_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t ctsen : 1; /**< [ 15: 15](R/W) "CTS hardware flow control enable. If set, data is only transmitted when UART#_CTS_L is
+ asserted (low)." */
+ uint32_t rtsen : 1; /**< [ 14: 14](R/W) RTS hardware flow control enable. If set, data is only requested when space in the receive FIFO. */
+ uint32_t out2 : 1; /**< [ 13: 13](R/W) Unused. */
+ uint32_t out1 : 1; /**< [ 12: 12](R/W) Data carrier detect. If set, drive UART#_DCD_L asserted (low). */
+ uint32_t rts : 1; /**< [ 11: 11](R/W) Request to send. If set, assert UART#_RTS_L. */
+ uint32_t dtr : 1; /**< [ 10: 10](R/W) Data terminal ready. If set, assert UART#_DTR_N. */
+ uint32_t rxe : 1; /**< [ 9: 9](R/W) Receive enable. If set, receive section is enabled. */
+ uint32_t txe : 1; /**< [ 8: 8](R/W) Transmit enable. */
+ uint32_t lbe : 1; /**< [ 7: 7](R/W) "Loopback enable. If set, the serial output is looped into the serial input as if
+ UART#_SIN
+ was physically attached to UART#_SOUT." */
+ uint32_t reserved_3_6 : 4;
+ uint32_t reserved_2 : 1;
+ uint32_t reserved_1 : 1;
+ uint32_t uarten : 1; /**< [ 0: 0](R/W) UART enable.
+ 0 = UART is disabled. If the UART is disabled in the middle of transmission or reception,
+ it completes the current character.
+ 1 = UART enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t uarten : 1; /**< [ 0: 0](R/W) UART enable.
+ 0 = UART is disabled. If the UART is disabled in the middle of transmission or reception,
+ it completes the current character.
+ 1 = UART enabled. */
+ uint32_t reserved_1 : 1;
+ uint32_t reserved_2 : 1;
+ uint32_t reserved_3_6 : 4;
+ uint32_t lbe : 1; /**< [ 7: 7](R/W) "Loopback enable. If set, the serial output is looped into the serial input as if
+ UART#_SIN
+ was physically attached to UART#_SOUT." */
+ uint32_t txe : 1; /**< [ 8: 8](R/W) Transmit enable. */
+ uint32_t rxe : 1; /**< [ 9: 9](R/W) Receive enable. If set, receive section is enabled. */
+ uint32_t dtr : 1; /**< [ 10: 10](R/W) Data terminal ready. If set, assert UART#_DTR_N. */
+ uint32_t rts : 1; /**< [ 11: 11](R/W) Request to send. If set, assert UART#_RTS_L. */
+ uint32_t out1 : 1; /**< [ 12: 12](R/W) Data carrier detect. If set, drive UART#_DCD_L asserted (low). */
+ uint32_t out2 : 1; /**< [ 13: 13](R/W) Unused. */
+ uint32_t rtsen : 1; /**< [ 14: 14](R/W) RTS hardware flow control enable. If set, data is only requested when space in the receive FIFO. */
+ uint32_t ctsen : 1; /**< [ 15: 15](R/W) "CTS hardware flow control enable. If set, data is only transmitted when UART#_CTS_L is
+ asserted (low)." */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_uaax_cr bdk_uaax_cr_t;
+
+static inline uint64_t BDK_UAAX_CR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_CR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000030ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000030ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000030ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000030ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_CR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_CR(a) bdk_uaax_cr_t
+#define bustype_BDK_UAAX_CR(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_CR(a) "UAAX_CR"
+#define device_bar_BDK_UAAX_CR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_CR(a) (a)
+#define arguments_BDK_UAAX_CR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_dr
+ *
+ * UART Data Register
+ * Writing to this register pushes data to the FIFO for transmission. Reading it retrieves
+ * received data from the receive FIFO.
+ */
+union bdk_uaax_dr
+{
+ uint32_t u;
+ struct bdk_uaax_dr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t oe : 1; /**< [ 11: 11](RO/H) Overrun error. Set if data is received and FIFO was full. Cleared once a new character is
+ written to the FIFO. */
+ uint32_t be : 1; /**< [ 10: 10](RO/H) Break error. Indicates received data input was held low for longer than a full-transmission time. */
+ uint32_t pe : 1; /**< [ 9: 9](RO/H) Parity error. Indicates the parity did not match that expected. */
+ uint32_t fe : 1; /**< [ 8: 8](RO/H) Framing error. Indicates that the received character did not have a stop bit. */
+ uint32_t data : 8; /**< [ 7: 0](R/W/H) On write operations, data to transmit. On read operations, received data. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 8; /**< [ 7: 0](R/W/H) On write operations, data to transmit. On read operations, received data. */
+ uint32_t fe : 1; /**< [ 8: 8](RO/H) Framing error. Indicates that the received character did not have a stop bit. */
+ uint32_t pe : 1; /**< [ 9: 9](RO/H) Parity error. Indicates the parity did not match that expected. */
+ uint32_t be : 1; /**< [ 10: 10](RO/H) Break error. Indicates received data input was held low for longer than a full-transmission time. */
+ uint32_t oe : 1; /**< [ 11: 11](RO/H) Overrun error. Set if data is received and FIFO was full. Cleared once a new character is
+ written to the FIFO. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_dr_s cn; */
+};
+typedef union bdk_uaax_dr bdk_uaax_dr_t;
+
+static inline uint64_t BDK_UAAX_DR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_DR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_DR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_DR(a) bdk_uaax_dr_t
+#define bustype_BDK_UAAX_DR(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_DR(a) "UAAX_DR"
+#define device_bar_BDK_UAAX_DR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_DR(a) (a)
+#define arguments_BDK_UAAX_DR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_fbrd
+ *
+ * UART Fractional Baud Rate Register
+ */
+union bdk_uaax_fbrd
+{
+ uint32_t u;
+ struct bdk_uaax_fbrd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_6_31 : 26;
+ uint32_t baud_divfrac : 6; /**< [ 5: 0](R/W) Fractional part of baud rate divisor. The output baud rate is equal to the HCLK frequency
+ divided by sixteen times the value of the baud-rate divisor, as follows:
+
+ _ baud rate = HCLK / (16 * divisor).
+
+ Where the HCLK frequency is controlled by UAA()_UCTL_CTL[H_CLKDIV_SEL].
+
+ Once both divisor-latch registers are set, at least eight HCLK
+ cycles should be allowed to pass before transmitting or receiving data. */
+#else /* Word 0 - Little Endian */
+ uint32_t baud_divfrac : 6; /**< [ 5: 0](R/W) Fractional part of baud rate divisor. The output baud rate is equal to the HCLK frequency
+ divided by sixteen times the value of the baud-rate divisor, as follows:
+
+ _ baud rate = HCLK / (16 * divisor).
+
+ Where the HCLK frequency is controlled by UAA()_UCTL_CTL[H_CLKDIV_SEL].
+
+ Once both divisor-latch registers are set, at least eight HCLK
+ cycles should be allowed to pass before transmitting or receiving data. */
+ uint32_t reserved_6_31 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_fbrd_s cn9; */
+ struct bdk_uaax_fbrd_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_6_31 : 26;
+ uint32_t baud_divfrac : 6; /**< [ 5: 0](R/W) Fractional part of baud rate divisor. The output baud rate is equal to the coprocessor-
+ clock frequency divided by sixteen times the value of the baud-rate divisor, as follows:
+
+ _ baud rate = coprocessor-clock frequency / (16 * divisor).
+
+ Note that once both divisor-latch registers are set, at least eight coprocessor-clock
+ cycles should be allowed to pass before transmitting or receiving data. */
+#else /* Word 0 - Little Endian */
+ uint32_t baud_divfrac : 6; /**< [ 5: 0](R/W) Fractional part of baud rate divisor. The output baud rate is equal to the coprocessor-
+ clock frequency divided by sixteen times the value of the baud-rate divisor, as follows:
+
+ _ baud rate = coprocessor-clock frequency / (16 * divisor).
+
+ Note that once both divisor-latch registers are set, at least eight coprocessor-clock
+ cycles should be allowed to pass before transmitting or receiving data. */
+ uint32_t reserved_6_31 : 26;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_uaax_fbrd_s cn88xx; */
+ /* struct bdk_uaax_fbrd_cn81xx cn83xx; */
+};
+typedef union bdk_uaax_fbrd bdk_uaax_fbrd_t;
+
+static inline uint64_t BDK_UAAX_FBRD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_FBRD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000028ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000028ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_FBRD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_FBRD(a) bdk_uaax_fbrd_t
+#define bustype_BDK_UAAX_FBRD(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_FBRD(a) "UAAX_FBRD"
+#define device_bar_BDK_UAAX_FBRD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_FBRD(a) (a)
+#define arguments_BDK_UAAX_FBRD(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_fr
+ *
+ * UART Flag Register
+ */
+union bdk_uaax_fr
+{
+ uint32_t u;
+ struct bdk_uaax_fr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t ri : 1; /**< [ 8: 8](RO/H) Complement of ring indicator. not supported. */
+ uint32_t txfe : 1; /**< [ 7: 7](RO/H) Transmit FIFO empty. */
+ uint32_t rxff : 1; /**< [ 6: 6](RO/H) Receive FIFO full. */
+ uint32_t txff : 1; /**< [ 5: 5](RO/H) Transmit FIFO full. */
+ uint32_t rxfe : 1; /**< [ 4: 4](RO/H) Receive FIFO empty. */
+ uint32_t busy : 1; /**< [ 3: 3](RO/H) UART busy transmitting data. */
+ uint32_t dcd : 1; /**< [ 2: 2](RO/H) Data carrier detect. */
+ uint32_t dsr : 1; /**< [ 1: 1](RO/H) Data set ready. */
+ uint32_t cts : 1; /**< [ 0: 0](RO/H) Clear to send. Complement of the UART#_CTS_L modem status input pin. */
+#else /* Word 0 - Little Endian */
+ uint32_t cts : 1; /**< [ 0: 0](RO/H) Clear to send. Complement of the UART#_CTS_L modem status input pin. */
+ uint32_t dsr : 1; /**< [ 1: 1](RO/H) Data set ready. */
+ uint32_t dcd : 1; /**< [ 2: 2](RO/H) Data carrier detect. */
+ uint32_t busy : 1; /**< [ 3: 3](RO/H) UART busy transmitting data. */
+ uint32_t rxfe : 1; /**< [ 4: 4](RO/H) Receive FIFO empty. */
+ uint32_t txff : 1; /**< [ 5: 5](RO/H) Transmit FIFO full. */
+ uint32_t rxff : 1; /**< [ 6: 6](RO/H) Receive FIFO full. */
+ uint32_t txfe : 1; /**< [ 7: 7](RO/H) Transmit FIFO empty. */
+ uint32_t ri : 1; /**< [ 8: 8](RO/H) Complement of ring indicator. not supported. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_fr_s cn; */
+};
+typedef union bdk_uaax_fr bdk_uaax_fr_t;
+
+static inline uint64_t BDK_UAAX_FR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_FR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000018ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000018ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000018ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000018ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_FR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_FR(a) bdk_uaax_fr_t
+#define bustype_BDK_UAAX_FR(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_FR(a) "UAAX_FR"
+#define device_bar_BDK_UAAX_FR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_FR(a) (a)
+#define arguments_BDK_UAAX_FR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_ibrd
+ *
+ * UART Integer Baud Rate Register
+ */
+union bdk_uaax_ibrd
+{
+ uint32_t u;
+ struct bdk_uaax_ibrd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t baud_divint : 16; /**< [ 15: 0](R/W) Integer part of baud-rate divisor. See UAA()_FBRD. */
+#else /* Word 0 - Little Endian */
+ uint32_t baud_divint : 16; /**< [ 15: 0](R/W) Integer part of baud-rate divisor. See UAA()_FBRD. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_ibrd_s cn; */
+};
+typedef union bdk_uaax_ibrd bdk_uaax_ibrd_t;
+
+static inline uint64_t BDK_UAAX_IBRD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_IBRD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000024ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000024ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000024ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000024ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_IBRD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_IBRD(a) bdk_uaax_ibrd_t
+#define bustype_BDK_UAAX_IBRD(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_IBRD(a) "UAAX_IBRD"
+#define device_bar_BDK_UAAX_IBRD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_IBRD(a) (a)
+#define arguments_BDK_UAAX_IBRD(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_icr
+ *
+ * UART Interrupt Clear Register
+ * Read value is zero for this register, not the interrupt state.
+ */
+union bdk_uaax_icr
+{
+ uint32_t u;
+ struct bdk_uaax_icr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t oeic : 1; /**< [ 10: 10](R/W1C) Overrun error interrupt clear. */
+ uint32_t beic : 1; /**< [ 9: 9](R/W1C) Break error interrupt clear. */
+ uint32_t peic : 1; /**< [ 8: 8](R/W1C) Parity error interrupt clear. */
+ uint32_t feic : 1; /**< [ 7: 7](R/W1C) Framing error interrupt clear. */
+ uint32_t rtic : 1; /**< [ 6: 6](R/W1C) Receive timeout interrupt clear. */
+ uint32_t txic : 1; /**< [ 5: 5](R/W1C) Transmit interrupt clear. */
+ uint32_t rxic : 1; /**< [ 4: 4](R/W1C) Receive interrupt clear. */
+ uint32_t dsrmic : 1; /**< [ 3: 3](R/W1C) DSR modem interrupt clear. */
+ uint32_t dcdmic : 1; /**< [ 2: 2](R/W1C) DCD modem interrupt clear. */
+ uint32_t ctsmic : 1; /**< [ 1: 1](R/W1C) CTS modem interrupt clear. */
+ uint32_t rimic : 1; /**< [ 0: 0](R/W1C) Ring indicator interrupt clear. Not implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t rimic : 1; /**< [ 0: 0](R/W1C) Ring indicator interrupt clear. Not implemented. */
+ uint32_t ctsmic : 1; /**< [ 1: 1](R/W1C) CTS modem interrupt clear. */
+ uint32_t dcdmic : 1; /**< [ 2: 2](R/W1C) DCD modem interrupt clear. */
+ uint32_t dsrmic : 1; /**< [ 3: 3](R/W1C) DSR modem interrupt clear. */
+ uint32_t rxic : 1; /**< [ 4: 4](R/W1C) Receive interrupt clear. */
+ uint32_t txic : 1; /**< [ 5: 5](R/W1C) Transmit interrupt clear. */
+ uint32_t rtic : 1; /**< [ 6: 6](R/W1C) Receive timeout interrupt clear. */
+ uint32_t feic : 1; /**< [ 7: 7](R/W1C) Framing error interrupt clear. */
+ uint32_t peic : 1; /**< [ 8: 8](R/W1C) Parity error interrupt clear. */
+ uint32_t beic : 1; /**< [ 9: 9](R/W1C) Break error interrupt clear. */
+ uint32_t oeic : 1; /**< [ 10: 10](R/W1C) Overrun error interrupt clear. */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_icr_s cn; */
+};
+typedef union bdk_uaax_icr bdk_uaax_icr_t;
+
+static inline uint64_t BDK_UAAX_ICR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_ICR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000044ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000044ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000044ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000044ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_ICR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_ICR(a) bdk_uaax_icr_t
+#define bustype_BDK_UAAX_ICR(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_ICR(a) "UAAX_ICR"
+#define device_bar_BDK_UAAX_ICR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_ICR(a) (a)
+#define arguments_BDK_UAAX_ICR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_ifls
+ *
+ * UART Interrupt FIFO Level Select Register
+ */
+union bdk_uaax_ifls
+{
+ uint32_t u;
+ struct bdk_uaax_ifls_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_6_31 : 26;
+ uint32_t rxiflsel : 3; /**< [ 5: 3](R/W) Receive interrupt FIFO level select.
+ 0x0 = Receive FIFO becomes \>= 1/8 full.
+ 0x1 = Receive FIFO becomes \>= 1/4 full.
+ 0x2 = Receive FIFO becomes \>= 1/2 full.
+ 0x3 = Receive FIFO becomes \>= 3/4 full.
+ 0x4 = Receive FIFO becomes \>= 7/8 full.
+ 0x5-0x7 = Reserved. */
+ uint32_t txiflsel : 3; /**< [ 2: 0](R/W) Transmit interrupt FIFO level select.
+ 0x0 = Transmit FIFO becomes \<= 1/8 full.
+ 0x1 = Transmit FIFO becomes \<= 1/4 full.
+ 0x2 = Transmit FIFO becomes \<= 1/2 full.
+ 0x3 = Transmit FIFO becomes \<= 3/4 full.
+ 0x4 = Transmit FIFO becomes \<= 7/8 full.
+ 0x5-0x7 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t txiflsel : 3; /**< [ 2: 0](R/W) Transmit interrupt FIFO level select.
+ 0x0 = Transmit FIFO becomes \<= 1/8 full.
+ 0x1 = Transmit FIFO becomes \<= 1/4 full.
+ 0x2 = Transmit FIFO becomes \<= 1/2 full.
+ 0x3 = Transmit FIFO becomes \<= 3/4 full.
+ 0x4 = Transmit FIFO becomes \<= 7/8 full.
+ 0x5-0x7 = Reserved. */
+ uint32_t rxiflsel : 3; /**< [ 5: 3](R/W) Receive interrupt FIFO level select.
+ 0x0 = Receive FIFO becomes \>= 1/8 full.
+ 0x1 = Receive FIFO becomes \>= 1/4 full.
+ 0x2 = Receive FIFO becomes \>= 1/2 full.
+ 0x3 = Receive FIFO becomes \>= 3/4 full.
+ 0x4 = Receive FIFO becomes \>= 7/8 full.
+ 0x5-0x7 = Reserved. */
+ uint32_t reserved_6_31 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_ifls_s cn; */
+};
+typedef union bdk_uaax_ifls bdk_uaax_ifls_t;
+
+static inline uint64_t BDK_UAAX_IFLS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_IFLS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000034ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000034ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000034ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000034ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_IFLS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_IFLS(a) bdk_uaax_ifls_t
+#define bustype_BDK_UAAX_IFLS(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_IFLS(a) "UAAX_IFLS"
+#define device_bar_BDK_UAAX_IFLS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_IFLS(a) (a)
+#define arguments_BDK_UAAX_IFLS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_imsc
+ *
+ * UART Interrupt Mask Set/Clear Register
+ */
+union bdk_uaax_imsc
+{
+ uint32_t u;
+ struct bdk_uaax_imsc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t oeim : 1; /**< [ 10: 10](R/W) Overrun error interrupt mask. */
+ uint32_t beim : 1; /**< [ 9: 9](R/W) Break error interrupt mask. */
+ uint32_t peim : 1; /**< [ 8: 8](R/W) Parity error interrupt mask. */
+ uint32_t feim : 1; /**< [ 7: 7](R/W) Framing error interrupt mask. */
+ uint32_t rtim : 1; /**< [ 6: 6](R/W) Receive timeout interrupt mask. */
+ uint32_t txim : 1; /**< [ 5: 5](R/W) Transmit interrupt mask. */
+ uint32_t rxim : 1; /**< [ 4: 4](R/W) Receive interrupt mask. */
+ uint32_t dsrmim : 1; /**< [ 3: 3](R/W) DSR modem interrupt mask. */
+ uint32_t dcdmim : 1; /**< [ 2: 2](R/W) DCD modem interrupt mask. */
+ uint32_t ctsmim : 1; /**< [ 1: 1](R/W) CTS modem interrupt mask. */
+ uint32_t rimim : 1; /**< [ 0: 0](R/W) Ring indicator interrupt mask. Not implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t rimim : 1; /**< [ 0: 0](R/W) Ring indicator interrupt mask. Not implemented. */
+ uint32_t ctsmim : 1; /**< [ 1: 1](R/W) CTS modem interrupt mask. */
+ uint32_t dcdmim : 1; /**< [ 2: 2](R/W) DCD modem interrupt mask. */
+ uint32_t dsrmim : 1; /**< [ 3: 3](R/W) DSR modem interrupt mask. */
+ uint32_t rxim : 1; /**< [ 4: 4](R/W) Receive interrupt mask. */
+ uint32_t txim : 1; /**< [ 5: 5](R/W) Transmit interrupt mask. */
+ uint32_t rtim : 1; /**< [ 6: 6](R/W) Receive timeout interrupt mask. */
+ uint32_t feim : 1; /**< [ 7: 7](R/W) Framing error interrupt mask. */
+ uint32_t peim : 1; /**< [ 8: 8](R/W) Parity error interrupt mask. */
+ uint32_t beim : 1; /**< [ 9: 9](R/W) Break error interrupt mask. */
+ uint32_t oeim : 1; /**< [ 10: 10](R/W) Overrun error interrupt mask. */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_imsc_s cn; */
+};
+typedef union bdk_uaax_imsc bdk_uaax_imsc_t;
+
+static inline uint64_t BDK_UAAX_IMSC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_IMSC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000038ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000038ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000038ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000038ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_IMSC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_IMSC(a) bdk_uaax_imsc_t
+#define bustype_BDK_UAAX_IMSC(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_IMSC(a) "UAAX_IMSC"
+#define device_bar_BDK_UAAX_IMSC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_IMSC(a) (a)
+#define arguments_BDK_UAAX_IMSC(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) uaa#_io_ctl
+ *
+ * UART IO Control Register
+ * This register controls the UAA[0..1] IO drive strength and slew rates. The additional
+ * UAA interfaces are controlled by GPIO_IO_CTL[DRIVEx] and GPIO_IO_CTL[SLEWx] based
+ * on the selected pins.
+ */
+union bdk_uaax_io_ctl
+{
+ uint64_t u;
+ struct bdk_uaax_io_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t drive : 2; /**< [ 3: 2](R/W) UART bus pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t reserved_1 : 1;
+ uint64_t slew : 1; /**< [ 0: 0](R/W) UART bus pin output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+#else /* Word 0 - Little Endian */
+ uint64_t slew : 1; /**< [ 0: 0](R/W) UART bus pin output slew rate control.
+ 0 = Low slew rate.
+ 1 = High slew rate. */
+ uint64_t reserved_1 : 1;
+ uint64_t drive : 2; /**< [ 3: 2](R/W) UART bus pin output drive strength.
+ 0x0 = 2 mA.
+ 0x1 = 4 mA.
+ 0x2 = 8 mA.
+ 0x3 = 16 mA. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_io_ctl_s cn; */
+};
+typedef union bdk_uaax_io_ctl bdk_uaax_io_ctl_t;
+
+static inline uint64_t BDK_UAAX_IO_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_IO_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028001028ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_IO_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_IO_CTL(a) bdk_uaax_io_ctl_t
+#define bustype_BDK_UAAX_IO_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_IO_CTL(a) "UAAX_IO_CTL"
+#define device_bar_BDK_UAAX_IO_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_IO_CTL(a) (a)
+#define arguments_BDK_UAAX_IO_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_lcr_h
+ *
+ * UART Line Control Register
+ */
+union bdk_uaax_lcr_h
+{
+ uint32_t u;
+ struct bdk_uaax_lcr_h_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t sps : 1; /**< [ 7: 7](R/W) Stick parity select. If set, and [PEN] is set, forces the parity bit to the opposite of EPS. */
+ uint32_t wlen : 2; /**< [ 6: 5](R/W) Word length:
+ 0x0 = 5 bits.
+ 0x1 = 6 bits.
+ 0x2 = 7 bits.
+ 0x3 = 8 bits. */
+ uint32_t fen : 1; /**< [ 4: 4](R/W) Enable FIFOs.
+ 0 = FIFOs disabled, FIFOs are single character deep.
+ 1 = FIFO enabled. */
+ uint32_t stp2 : 1; /**< [ 3: 3](R/W) Two stop bits select. */
+ uint32_t eps : 1; /**< [ 2: 2](R/W) Even parity select. */
+ uint32_t pen : 1; /**< [ 1: 1](R/W) Parity enable. */
+ uint32_t brk : 1; /**< [ 0: 0](R/W) Send break. A low level is continually transmitted after completion of the current character. */
+#else /* Word 0 - Little Endian */
+ uint32_t brk : 1; /**< [ 0: 0](R/W) Send break. A low level is continually transmitted after completion of the current character. */
+ uint32_t pen : 1; /**< [ 1: 1](R/W) Parity enable. */
+ uint32_t eps : 1; /**< [ 2: 2](R/W) Even parity select. */
+ uint32_t stp2 : 1; /**< [ 3: 3](R/W) Two stop bits select. */
+ uint32_t fen : 1; /**< [ 4: 4](R/W) Enable FIFOs.
+ 0 = FIFOs disabled, FIFOs are single character deep.
+ 1 = FIFO enabled. */
+ uint32_t wlen : 2; /**< [ 6: 5](R/W) Word length:
+ 0x0 = 5 bits.
+ 0x1 = 6 bits.
+ 0x2 = 7 bits.
+ 0x3 = 8 bits. */
+ uint32_t sps : 1; /**< [ 7: 7](R/W) Stick parity select. If set, and [PEN] is set, forces the parity bit to the opposite of EPS. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_lcr_h_s cn; */
+};
+typedef union bdk_uaax_lcr_h bdk_uaax_lcr_h_t;
+
+static inline uint64_t BDK_UAAX_LCR_H(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_LCR_H(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e02800002cll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e02800002cll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e02400002cll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e02800002cll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_LCR_H", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_LCR_H(a) bdk_uaax_lcr_h_t
+#define bustype_BDK_UAAX_LCR_H(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_LCR_H(a) "UAAX_LCR_H"
+#define device_bar_BDK_UAAX_LCR_H(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_LCR_H(a) (a)
+#define arguments_BDK_UAAX_LCR_H(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_mis
+ *
+ * UART Masked Interrupt Status Register
+ * Indicates state of interrupts after masking.
+ * Internal:
+ * Note this register was not present in SBSA 2.3, but is referenced
+ * by the Linux driver, so has been defined here.
+ */
+union bdk_uaax_mis
+{
+ uint32_t u;
+ struct bdk_uaax_mis_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t oemis : 1; /**< [ 10: 10](RO/H) Overrun error interrupt status. */
+ uint32_t bemis : 1; /**< [ 9: 9](RO/H) Break error interrupt status. */
+ uint32_t pemis : 1; /**< [ 8: 8](RO/H) Parity error interrupt status. */
+ uint32_t femis : 1; /**< [ 7: 7](RO/H) Framing error interrupt status. */
+ uint32_t rtmis : 1; /**< [ 6: 6](RO/H) Receive timeout interrupt status. */
+ uint32_t txmis : 1; /**< [ 5: 5](RO/H) Transmit interrupt status. */
+ uint32_t rxmis : 1; /**< [ 4: 4](RO/H) Receive interrupt status. */
+ uint32_t dsrmmis : 1; /**< [ 3: 3](RO/H) DSR modem interrupt status. */
+ uint32_t dcdmmis : 1; /**< [ 2: 2](RO/H) DCD modem interrupt status. */
+ uint32_t ctsmmis : 1; /**< [ 1: 1](RO/H) CTS modem interrupt status. */
+ uint32_t rimmis : 1; /**< [ 0: 0](RO/H) Ring indicator interrupt status. Not implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t rimmis : 1; /**< [ 0: 0](RO/H) Ring indicator interrupt status. Not implemented. */
+ uint32_t ctsmmis : 1; /**< [ 1: 1](RO/H) CTS modem interrupt status. */
+ uint32_t dcdmmis : 1; /**< [ 2: 2](RO/H) DCD modem interrupt status. */
+ uint32_t dsrmmis : 1; /**< [ 3: 3](RO/H) DSR modem interrupt status. */
+ uint32_t rxmis : 1; /**< [ 4: 4](RO/H) Receive interrupt status. */
+ uint32_t txmis : 1; /**< [ 5: 5](RO/H) Transmit interrupt status. */
+ uint32_t rtmis : 1; /**< [ 6: 6](RO/H) Receive timeout interrupt status. */
+ uint32_t femis : 1; /**< [ 7: 7](RO/H) Framing error interrupt status. */
+ uint32_t pemis : 1; /**< [ 8: 8](RO/H) Parity error interrupt status. */
+ uint32_t bemis : 1; /**< [ 9: 9](RO/H) Break error interrupt status. */
+ uint32_t oemis : 1; /**< [ 10: 10](RO/H) Overrun error interrupt status. */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_mis_s cn; */
+};
+typedef union bdk_uaax_mis bdk_uaax_mis_t;
+
+static inline uint64_t BDK_UAAX_MIS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_MIS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000040ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000040ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000040ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000040ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_MIS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_MIS(a) bdk_uaax_mis_t
+#define bustype_BDK_UAAX_MIS(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_MIS(a) "UAAX_MIS"
+#define device_bar_BDK_UAAX_MIS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_MIS(a) (a)
+#define arguments_BDK_UAAX_MIS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) uaa#_msix_pba#
+ *
+ * UART MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table, the bit number is indexed by the UAA_INT_VEC_E enumeration.
+ */
+union bdk_uaax_msix_pbax
+{
+ uint64_t u;
+ struct bdk_uaax_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for each interrupt, enumerated by UAA_INT_VEC_E. Bits that have no
+ associated UAA_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for each interrupt, enumerated by UAA_INT_VEC_E. Bits that have no
+ associated UAA_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_msix_pbax_s cn; */
+};
+typedef union bdk_uaax_msix_pbax bdk_uaax_msix_pbax_t;
+
+static inline uint64_t BDK_UAAX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b==0)))
+ return 0x87e028ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b==0)))
+ return 0x87e028ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x87e024ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b==0)))
+ return 0x87e028ff0000ll + 0x1000000ll * ((a) & 0x7) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("UAAX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_UAAX_MSIX_PBAX(a,b) bdk_uaax_msix_pbax_t
+#define bustype_BDK_UAAX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_MSIX_PBAX(a,b) "UAAX_MSIX_PBAX"
+#define device_bar_BDK_UAAX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_UAAX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_UAAX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) uaa#_msix_vec#_addr
+ *
+ * UART MSI-X Vector Table Address Registers
+ * This register is the MSI-X vector table, indexed by the UAA_INT_VEC_E enumeration.
+ */
+union bdk_uaax_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_uaax_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's UAA()_MSIX_VEC()_ADDR, UAA()_MSIX_VEC()_CTL, and
+ corresponding bit of UAA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_UAA(0..1)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's UAA()_MSIX_VEC()_ADDR, UAA()_MSIX_VEC()_CTL, and
+ corresponding bit of UAA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_UAA(0..1)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_uaax_msix_vecx_addr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's UAA()_MSIX_VEC()_ADDR, UAA()_MSIX_VEC()_CTL, and
+ corresponding bit of UAA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_UAA()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's UAA()_MSIX_VEC()_ADDR, UAA()_MSIX_VEC()_CTL, and
+ corresponding bit of UAA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_UAA()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_uaax_msix_vecx_addr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's UAA()_MSIX_VEC()_ADDR, UAA()_MSIX_VEC()_CTL, and
+ corresponding bit of UAA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_UAA(0..3)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's UAA()_MSIX_VEC()_ADDR, UAA()_MSIX_VEC()_CTL, and
+ corresponding bit of UAA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_UAA(0..3)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_uaax_msix_vecx_addr_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's UAA()_MSIX_VEC()_ADDR, UAA()_MSIX_VEC()_CTL, and
+ corresponding bit of UAA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_UAA(0..1)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's UAA()_MSIX_VEC()_ADDR, UAA()_MSIX_VEC()_CTL, and
+ corresponding bit of UAA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_UAA(0..1)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors are secure and function as if [SECVEC]
+ was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_uaax_msix_vecx_addr_cn81xx cn83xx; */
+};
+typedef union bdk_uaax_msix_vecx_addr bdk_uaax_msix_vecx_addr_t;
+
+static inline uint64_t BDK_UAAX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e028f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e028f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e024f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=1)))
+ return 0x87e028f00000ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("UAAX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_UAAX_MSIX_VECX_ADDR(a,b) bdk_uaax_msix_vecx_addr_t
+#define bustype_BDK_UAAX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_MSIX_VECX_ADDR(a,b) "UAAX_MSIX_VECX_ADDR"
+#define device_bar_BDK_UAAX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_UAAX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_UAAX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) uaa#_msix_vec#_ctl
+ *
+ * UART MSI-X Vector Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the UAA_INT_VEC_E enumeration.
+ */
+union bdk_uaax_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_uaax_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_uaax_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_uaax_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_uaax_msix_vecx_ctl bdk_uaax_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_UAAX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e028f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e028f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e024f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=1)))
+ return 0x87e028f00008ll + 0x1000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("UAAX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_UAAX_MSIX_VECX_CTL(a,b) bdk_uaax_msix_vecx_ctl_t
+#define bustype_BDK_UAAX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_MSIX_VECX_CTL(a,b) "UAAX_MSIX_VECX_CTL"
+#define device_bar_BDK_UAAX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_UAAX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_UAAX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL32b) uaa#_pidr0
+ *
+ * UART Component Identification Register 0
+ */
+union bdk_uaax_pidr0
+{
+ uint32_t u;
+ struct bdk_uaax_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. ARM-assigned PL011 compatible. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. ARM-assigned PL011 compatible. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_pidr0_s cn; */
+};
+typedef union bdk_uaax_pidr0 bdk_uaax_pidr0_t;
+
+static inline uint64_t BDK_UAAX_PIDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_PIDR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000fe0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000fe0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000fe0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000fe0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_PIDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_PIDR0(a) bdk_uaax_pidr0_t
+#define bustype_BDK_UAAX_PIDR0(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_PIDR0(a) "UAAX_PIDR0"
+#define device_bar_BDK_UAAX_PIDR0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_PIDR0(a) (a)
+#define arguments_BDK_UAAX_PIDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_pidr1
+ *
+ * UART Peripheral Identification Register 1
+ */
+union bdk_uaax_pidr1
+{
+ uint32_t u;
+ struct bdk_uaax_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) ARM identification. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. ARM-assigned PL011 compatible. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. ARM-assigned PL011 compatible. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) ARM identification. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_pidr1_s cn; */
+};
+typedef union bdk_uaax_pidr1 bdk_uaax_pidr1_t;
+
+static inline uint64_t BDK_UAAX_PIDR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_PIDR1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000fe4ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000fe4ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000fe4ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000fe4ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_PIDR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_PIDR1(a) bdk_uaax_pidr1_t
+#define bustype_BDK_UAAX_PIDR1(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_PIDR1(a) "UAAX_PIDR1"
+#define device_bar_BDK_UAAX_PIDR1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_PIDR1(a) (a)
+#define arguments_BDK_UAAX_PIDR1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_pidr2
+ *
+ * UART Peripheral Identification Register 2
+ */
+union bdk_uaax_pidr2
+{
+ uint32_t u;
+ struct bdk_uaax_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revision : 4; /**< [ 7: 4](RO) UART architectural revision.
+ 0x3 = r1p5. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. 0 = Legacy UART assignment. */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) ARM-design compatible. */
+#else /* Word 0 - Little Endian */
+ uint32_t idcode : 3; /**< [ 2: 0](RO) ARM-design compatible. */
+ uint32_t jedec : 1; /**< [ 3: 3](RO) JEDEC assigned. 0 = Legacy UART assignment. */
+ uint32_t revision : 4; /**< [ 7: 4](RO) UART architectural revision.
+ 0x3 = r1p5. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_pidr2_s cn; */
+};
+typedef union bdk_uaax_pidr2 bdk_uaax_pidr2_t;
+
+static inline uint64_t BDK_UAAX_PIDR2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_PIDR2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000fe8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000fe8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000fe8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000fe8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_PIDR2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_PIDR2(a) bdk_uaax_pidr2_t
+#define bustype_BDK_UAAX_PIDR2(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_PIDR2(a) "UAAX_PIDR2"
+#define device_bar_BDK_UAAX_PIDR2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_PIDR2(a) (a)
+#define arguments_BDK_UAAX_PIDR2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_pidr3
+ *
+ * UART Peripheral Identification Register 3
+ */
+union bdk_uaax_pidr3
+{
+ uint32_t u;
+ struct bdk_uaax_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cust : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_pidr3_s cn; */
+};
+typedef union bdk_uaax_pidr3 bdk_uaax_pidr3_t;
+
+static inline uint64_t BDK_UAAX_PIDR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_PIDR3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000fecll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000fecll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000fecll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000fecll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_PIDR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_PIDR3(a) bdk_uaax_pidr3_t
+#define bustype_BDK_UAAX_PIDR3(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_PIDR3(a) "UAAX_PIDR3"
+#define device_bar_BDK_UAAX_PIDR3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_PIDR3(a) (a)
+#define arguments_BDK_UAAX_PIDR3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_pidr4
+ *
+ * UART Peripheral Identification Register 4
+ */
+union bdk_uaax_pidr4
+{
+ uint32_t u;
+ struct bdk_uaax_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_pidr4_s cn; */
+};
+typedef union bdk_uaax_pidr4 bdk_uaax_pidr4_t;
+
+static inline uint64_t BDK_UAAX_PIDR4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_PIDR4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000fd0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000fd0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000fd0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000fd0ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_PIDR4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_PIDR4(a) bdk_uaax_pidr4_t
+#define bustype_BDK_UAAX_PIDR4(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_PIDR4(a) "UAAX_PIDR4"
+#define device_bar_BDK_UAAX_PIDR4(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_PIDR4(a) (a)
+#define arguments_BDK_UAAX_PIDR4(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_pidr5
+ *
+ * UART Peripheral Identification Register 5
+ */
+union bdk_uaax_pidr5
+{
+ uint32_t u;
+ struct bdk_uaax_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_pidr5_s cn; */
+};
+typedef union bdk_uaax_pidr5 bdk_uaax_pidr5_t;
+
+static inline uint64_t BDK_UAAX_PIDR5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_PIDR5(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000fd4ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000fd4ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000fd4ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000fd4ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_PIDR5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_PIDR5(a) bdk_uaax_pidr5_t
+#define bustype_BDK_UAAX_PIDR5(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_PIDR5(a) "UAAX_PIDR5"
+#define device_bar_BDK_UAAX_PIDR5(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_PIDR5(a) (a)
+#define arguments_BDK_UAAX_PIDR5(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_pidr6
+ *
+ * UART Peripheral Identification Register 6
+ */
+union bdk_uaax_pidr6
+{
+ uint32_t u;
+ struct bdk_uaax_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_pidr6_s cn; */
+};
+typedef union bdk_uaax_pidr6 bdk_uaax_pidr6_t;
+
+static inline uint64_t BDK_UAAX_PIDR6(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_PIDR6(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000fd8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000fd8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000fd8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000fd8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_PIDR6", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_PIDR6(a) bdk_uaax_pidr6_t
+#define bustype_BDK_UAAX_PIDR6(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_PIDR6(a) "UAAX_PIDR6"
+#define device_bar_BDK_UAAX_PIDR6(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_PIDR6(a) (a)
+#define arguments_BDK_UAAX_PIDR6(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_pidr7
+ *
+ * UART Peripheral Identification Register 7
+ */
+union bdk_uaax_pidr7
+{
+ uint32_t u;
+ struct bdk_uaax_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_pidr7_s cn; */
+};
+typedef union bdk_uaax_pidr7 bdk_uaax_pidr7_t;
+
+static inline uint64_t BDK_UAAX_PIDR7(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_PIDR7(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000fdcll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000fdcll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000fdcll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000fdcll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_PIDR7", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_PIDR7(a) bdk_uaax_pidr7_t
+#define bustype_BDK_UAAX_PIDR7(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_PIDR7(a) "UAAX_PIDR7"
+#define device_bar_BDK_UAAX_PIDR7(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_PIDR7(a) (a)
+#define arguments_BDK_UAAX_PIDR7(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) uaa#_redirect
+ *
+ * UART REDIRECT Control Register
+ */
+union bdk_uaax_redirect
+{
+ uint64_t u;
+ struct bdk_uaax_redirect_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t in_ena : 1; /**< [ 3: 3](SR/W) 0 = UAA receive and modem control inputs are from hard-assigned pins or GPIO virtual pins.
+ 1 = UAA receive and modem control come from the UAA specified by [IN_SEL]. */
+ uint64_t in_sel : 3; /**< [ 2: 0](SR/W) 0x0 = Inputs from UAA0.
+ 0x1 = Inputs from UAA1.
+ 0x2 = Inputs from UAA2.
+ 0x3 = Inputs from UAA3.
+ 0x4 = Inputs from UAA4.
+ 0x5 = Inputs from UAA5.
+ 0x6 = Inputs from UAA6.
+ 0x7 = Inputs from UAA7. */
+#else /* Word 0 - Little Endian */
+ uint64_t in_sel : 3; /**< [ 2: 0](SR/W) 0x0 = Inputs from UAA0.
+ 0x1 = Inputs from UAA1.
+ 0x2 = Inputs from UAA2.
+ 0x3 = Inputs from UAA3.
+ 0x4 = Inputs from UAA4.
+ 0x5 = Inputs from UAA5.
+ 0x6 = Inputs from UAA6.
+ 0x7 = Inputs from UAA7. */
+ uint64_t in_ena : 1; /**< [ 3: 3](SR/W) 0 = UAA receive and modem control inputs are from hard-assigned pins or GPIO virtual pins.
+ 1 = UAA receive and modem control come from the UAA specified by [IN_SEL]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_redirect_s cn; */
+};
+typedef union bdk_uaax_redirect bdk_uaax_redirect_t;
+
+static inline uint64_t BDK_UAAX_REDIRECT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_REDIRECT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028001020ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_REDIRECT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_REDIRECT(a) bdk_uaax_redirect_t
+#define bustype_BDK_UAAX_REDIRECT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_REDIRECT(a) "UAAX_REDIRECT"
+#define device_bar_BDK_UAAX_REDIRECT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_REDIRECT(a) (a)
+#define arguments_BDK_UAAX_REDIRECT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_ris
+ *
+ * UART Raw Interrupt Status Register
+ * Indicates state of interrupts before masking.
+ */
+union bdk_uaax_ris
+{
+ uint32_t u;
+ struct bdk_uaax_ris_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t oeris : 1; /**< [ 10: 10](RO/H) Overrun error interrupt status. */
+ uint32_t beris : 1; /**< [ 9: 9](RO/H) Break error interrupt status. */
+ uint32_t peris : 1; /**< [ 8: 8](RO/H) Parity error interrupt status. */
+ uint32_t feris : 1; /**< [ 7: 7](RO/H) Framing error interrupt status. */
+ uint32_t rtris : 1; /**< [ 6: 6](RO/H) Receive timeout interrupt status. */
+ uint32_t txris : 1; /**< [ 5: 5](RO/H) Transmit interrupt status. */
+ uint32_t rxris : 1; /**< [ 4: 4](RO/H) Receive interrupt status. */
+ uint32_t dsrrmis : 1; /**< [ 3: 3](RO/H) DSR modem interrupt status. */
+ uint32_t dcdrmis : 1; /**< [ 2: 2](RO/H) DCD modem interrupt status. */
+ uint32_t ctsrmis : 1; /**< [ 1: 1](RO/H) CTS modem interrupt status. */
+ uint32_t rirmis : 1; /**< [ 0: 0](RO/H) Ring indicator interrupt status. Not implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t rirmis : 1; /**< [ 0: 0](RO/H) Ring indicator interrupt status. Not implemented. */
+ uint32_t ctsrmis : 1; /**< [ 1: 1](RO/H) CTS modem interrupt status. */
+ uint32_t dcdrmis : 1; /**< [ 2: 2](RO/H) DCD modem interrupt status. */
+ uint32_t dsrrmis : 1; /**< [ 3: 3](RO/H) DSR modem interrupt status. */
+ uint32_t rxris : 1; /**< [ 4: 4](RO/H) Receive interrupt status. */
+ uint32_t txris : 1; /**< [ 5: 5](RO/H) Transmit interrupt status. */
+ uint32_t rtris : 1; /**< [ 6: 6](RO/H) Receive timeout interrupt status. */
+ uint32_t feris : 1; /**< [ 7: 7](RO/H) Framing error interrupt status. */
+ uint32_t peris : 1; /**< [ 8: 8](RO/H) Parity error interrupt status. */
+ uint32_t beris : 1; /**< [ 9: 9](RO/H) Break error interrupt status. */
+ uint32_t oeris : 1; /**< [ 10: 10](RO/H) Overrun error interrupt status. */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_ris_s cn; */
+};
+typedef union bdk_uaax_ris bdk_uaax_ris_t;
+
+static inline uint64_t BDK_UAAX_RIS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_RIS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e02800003cll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e02800003cll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e02400003cll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e02800003cll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_RIS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_RIS(a) bdk_uaax_ris_t
+#define bustype_BDK_UAAX_RIS(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_RIS(a) "UAAX_RIS"
+#define device_bar_BDK_UAAX_RIS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_RIS(a) (a)
+#define arguments_BDK_UAAX_RIS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL32b) uaa#_rsr_ecr
+ *
+ * UART Receive Status Register/Error Clear Register
+ */
+union bdk_uaax_rsr_ecr
+{
+ uint32_t u;
+ struct bdk_uaax_rsr_ecr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t oe : 1; /**< [ 3: 3](R/W1/H) Overrun error. Write of any value clears. */
+ uint32_t be : 1; /**< [ 2: 2](R/W1/H) Break error. Associated with the character at the top of the FIFO; only one 0 character is
+ loaded. The next character is only enabled after the receive data goes to 1. Write of any
+ value clears. */
+ uint32_t pe : 1; /**< [ 1: 1](R/W1/H) Parity error. Associated with character at top of the FIFO. Write of any value clears. */
+ uint32_t fe : 1; /**< [ 0: 0](R/W1/H) Framing error. Associated with character at top of the FIFO. Write of any value clears. */
+#else /* Word 0 - Little Endian */
+ uint32_t fe : 1; /**< [ 0: 0](R/W1/H) Framing error. Associated with character at top of the FIFO. Write of any value clears. */
+ uint32_t pe : 1; /**< [ 1: 1](R/W1/H) Parity error. Associated with character at top of the FIFO. Write of any value clears. */
+ uint32_t be : 1; /**< [ 2: 2](R/W1/H) Break error. Associated with the character at the top of the FIFO; only one 0 character is
+ loaded. The next character is only enabled after the receive data goes to 1. Write of any
+ value clears. */
+ uint32_t oe : 1; /**< [ 3: 3](R/W1/H) Overrun error. Write of any value clears. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_rsr_ecr_s cn; */
+};
+typedef union bdk_uaax_rsr_ecr bdk_uaax_rsr_ecr_t;
+
+static inline uint64_t BDK_UAAX_RSR_ECR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_RSR_ECR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028000004ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028000004ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024000004ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028000004ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_RSR_ECR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_RSR_ECR(a) bdk_uaax_rsr_ecr_t
+#define bustype_BDK_UAAX_RSR_ECR(a) BDK_CSR_TYPE_RSL32b
+#define basename_BDK_UAAX_RSR_ECR(a) "UAAX_RSR_ECR"
+#define device_bar_BDK_UAAX_RSR_ECR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_RSR_ECR(a) (a)
+#define arguments_BDK_UAAX_RSR_ECR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) uaa#_uctl_csclk_active_pc
+ *
+ * UAA UCTL Conditional Clock Counter Register
+ * This register counts conditional clocks, for power analysis.
+ * Reset by RSL reset.
+ */
+union bdk_uaax_uctl_csclk_active_pc
+{
+ uint64_t u;
+ struct bdk_uaax_uctl_csclk_active_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Counts conditional-clock active cycles since reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Counts conditional-clock active cycles since reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_uctl_csclk_active_pc_s cn; */
+};
+typedef union bdk_uaax_uctl_csclk_active_pc bdk_uaax_uctl_csclk_active_pc_t;
+
+static inline uint64_t BDK_UAAX_UCTL_CSCLK_ACTIVE_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_UCTL_CSCLK_ACTIVE_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028001018ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_UCTL_CSCLK_ACTIVE_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_UCTL_CSCLK_ACTIVE_PC(a) bdk_uaax_uctl_csclk_active_pc_t
+#define bustype_BDK_UAAX_UCTL_CSCLK_ACTIVE_PC(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_UCTL_CSCLK_ACTIVE_PC(a) "UAAX_UCTL_CSCLK_ACTIVE_PC"
+#define device_bar_BDK_UAAX_UCTL_CSCLK_ACTIVE_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_UCTL_CSCLK_ACTIVE_PC(a) (a)
+#define arguments_BDK_UAAX_UCTL_CSCLK_ACTIVE_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) uaa#_uctl_ctl
+ *
+ * UART UCTL Control Register
+ */
+union bdk_uaax_uctl_ctl
+{
+ uint64_t u;
+ struct bdk_uaax_uctl_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) UART controller clock enable. When set to 1, the UART controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the UART controller clock divider.
+ 0 = Use the divided coprocessor clock from the [H_CLKDIV_SEL] divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the UART
+ controller and APB clock. Software must still set [H_CLK_EN]
+ separately. [H_CLK_BYP_SEL] select should not be changed unless [H_CLK_EN] is
+ disabled. The bypass clock can be selected and running even if the UART
+ controller clock dividers are not running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) UART controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) The UARTCLK and APB CLK frequency select.
+ The divider values are the following:
+ 0x0 = Divide by 1.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 6.
+ 0x4 = Divide by 8.
+ 0x5 = Divide by 16.
+ 0x6 = Divide by 24.
+ 0x7 = Divide by 32.
+
+ The max and min frequency of the UARTCLK is determined by the following:
+ _ f_uartclk(min) \>= 16 * baud_rate(max)
+ _ f_uartclk(max) \<= 16 * 65535 * baud_rate(min) */
+ uint64_t reserved_5_23 : 19;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the UCTL interface clock (coprocessor clock).
+ This enables the UCTL registers starting from 0x30 via the RSL bus. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t uaa_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAA controller; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ protocols. */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UCTL RSL registers 0x30-0xF8.
+ Does not reset UCTL RSL registers 0x0-0x28.
+ UCTL RSL registers starting from 0x30 can be accessed only after the UART controller clock
+ is active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL and CIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UCTL RSL registers 0x30-0xF8.
+ Does not reset UCTL RSL registers 0x0-0x28.
+ UCTL RSL registers starting from 0x30 can be accessed only after the UART controller clock
+ is active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL and CIB protocols. */
+ uint64_t uaa_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAA controller; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ protocols. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the UCTL interface clock (coprocessor clock).
+ This enables the UCTL registers starting from 0x30 via the RSL bus. */
+ uint64_t reserved_5_23 : 19;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) The UARTCLK and APB CLK frequency select.
+ The divider values are the following:
+ 0x0 = Divide by 1.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 6.
+ 0x4 = Divide by 8.
+ 0x5 = Divide by 16.
+ 0x6 = Divide by 24.
+ 0x7 = Divide by 32.
+
+ The max and min frequency of the UARTCLK is determined by the following:
+ _ f_uartclk(min) \>= 16 * baud_rate(max)
+ _ f_uartclk(max) \<= 16 * 65535 * baud_rate(min) */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) UART controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the UART controller clock divider.
+ 0 = Use the divided coprocessor clock from the [H_CLKDIV_SEL] divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the UART
+ controller and APB clock. Software must still set [H_CLK_EN]
+ separately. [H_CLK_BYP_SEL] select should not be changed unless [H_CLK_EN] is
+ disabled. The bypass clock can be selected and running even if the UART
+ controller clock dividers are not running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) UART controller clock enable. When set to 1, the UART controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_uctl_ctl_s cn8; */
+ struct bdk_uaax_uctl_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) UART controller clock enable. When set to 1, the UART controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the UART controller clock divider.
+ 0 = Use the divided coprocessor clock from the [H_CLKDIV_SEL] divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the UART
+ controller and APB clock. Software must still set [H_CLK_EN]
+ separately. [H_CLK_BYP_SEL] select should not be changed unless [H_CLK_EN] is
+ disabled. The bypass clock can be selected and running even if the UART
+ controller clock dividers are not running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) UART controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) The UARTCLK and APB CLK frequency select.
+ The divider values are the following:
+ 0x0 = Divide by 1 (100 MHz).
+ 0x1 = Divide by 2 (50 MHz).
+ 0x2 = Divide by 4 (25 MHz).
+ 0x3 = Divide by 6 (16.66 MHz).
+ 0x4 = Divide by 8 (12.50 MHz).
+ 0x5 = Divide by 16 (6.25 MHz).
+ 0x6 = Divide by 24 (4.167 MHz).
+ 0x7 = Divide by 32 (3.125 MHz).
+
+ The max and min frequency of the UARTCLK is determined by the following:
+ _ f_uartclk(min) \>= 16 * baud_rate(max)
+ _ f_uartclk(max) \<= 16 * 65535 * baud_rate(min) */
+ uint64_t reserved_5_23 : 19;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the UCTL interface clock (coprocessor clock).
+ This enables the UCTL registers starting from 0x30 via the RSL bus. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t uaa_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAA controller; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ protocols. */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UCTL RSL registers 0x30-0xF8.
+ Does not reset UCTL RSL registers 0x0-0x28.
+ UCTL RSL registers starting from 0x30 can be accessed only after the UART controller clock
+ is active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL and CIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UCTL RSL registers 0x30-0xF8.
+ Does not reset UCTL RSL registers 0x0-0x28.
+ UCTL RSL registers starting from 0x30 can be accessed only after the UART controller clock
+ is active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL and CIB protocols. */
+ uint64_t uaa_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAA controller; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ protocols. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the UCTL interface clock (coprocessor clock).
+ This enables the UCTL registers starting from 0x30 via the RSL bus. */
+ uint64_t reserved_5_23 : 19;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) The UARTCLK and APB CLK frequency select.
+ The divider values are the following:
+ 0x0 = Divide by 1 (100 MHz).
+ 0x1 = Divide by 2 (50 MHz).
+ 0x2 = Divide by 4 (25 MHz).
+ 0x3 = Divide by 6 (16.66 MHz).
+ 0x4 = Divide by 8 (12.50 MHz).
+ 0x5 = Divide by 16 (6.25 MHz).
+ 0x6 = Divide by 24 (4.167 MHz).
+ 0x7 = Divide by 32 (3.125 MHz).
+
+ The max and min frequency of the UARTCLK is determined by the following:
+ _ f_uartclk(min) \>= 16 * baud_rate(max)
+ _ f_uartclk(max) \<= 16 * 65535 * baud_rate(min) */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) UART controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the UART controller clock divider.
+ 0 = Use the divided coprocessor clock from the [H_CLKDIV_SEL] divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the UART
+ controller and APB clock. Software must still set [H_CLK_EN]
+ separately. [H_CLK_BYP_SEL] select should not be changed unless [H_CLK_EN] is
+ disabled. The bypass clock can be selected and running even if the UART
+ controller clock dividers are not running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) UART controller clock enable. When set to 1, the UART controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_uaax_uctl_ctl bdk_uaax_uctl_ctl_t;
+
+static inline uint64_t BDK_UAAX_UCTL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_UCTL_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028001000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028001000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024001000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028001000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_UCTL_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_UCTL_CTL(a) bdk_uaax_uctl_ctl_t
+#define bustype_BDK_UAAX_UCTL_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_UCTL_CTL(a) "UAAX_UCTL_CTL"
+#define device_bar_BDK_UAAX_UCTL_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_UCTL_CTL(a) (a)
+#define arguments_BDK_UAAX_UCTL_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) uaa#_uctl_spare0
+ *
+ * UART UCTL Spare Register 0
+ * This register is a spare register. This register can be reset by NCB reset.
+ */
+union bdk_uaax_uctl_spare0
+{
+ uint64_t u;
+ struct bdk_uaax_uctl_spare0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#else /* Word 0 - Little Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_uctl_spare0_s cn; */
+};
+typedef union bdk_uaax_uctl_spare0 bdk_uaax_uctl_spare0_t;
+
+static inline uint64_t BDK_UAAX_UCTL_SPARE0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_UCTL_SPARE0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e028001010ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e028001010ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e024001010ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e028001010ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_UCTL_SPARE0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_UCTL_SPARE0(a) bdk_uaax_uctl_spare0_t
+#define bustype_BDK_UAAX_UCTL_SPARE0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_UCTL_SPARE0(a) "UAAX_UCTL_SPARE0"
+#define device_bar_BDK_UAAX_UCTL_SPARE0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_UCTL_SPARE0(a) (a)
+#define arguments_BDK_UAAX_UCTL_SPARE0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) uaa#_uctl_spare1
+ *
+ * UART UCTL Spare Register 1
+ * This register is a spare register. This register can be reset by NCB reset.
+ */
+union bdk_uaax_uctl_spare1
+{
+ uint64_t u;
+ struct bdk_uaax_uctl_spare1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#else /* Word 0 - Little Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_uaax_uctl_spare1_s cn; */
+};
+typedef union bdk_uaax_uctl_spare1 bdk_uaax_uctl_spare1_t;
+
+static inline uint64_t BDK_UAAX_UCTL_SPARE1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_UAAX_UCTL_SPARE1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0280010f8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0280010f8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0240010f8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0280010f8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("UAAX_UCTL_SPARE1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_UAAX_UCTL_SPARE1(a) bdk_uaax_uctl_spare1_t
+#define bustype_BDK_UAAX_UCTL_SPARE1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_UAAX_UCTL_SPARE1(a) "UAAX_UCTL_SPARE1"
+#define device_bar_BDK_UAAX_UCTL_SPARE1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_UAAX_UCTL_SPARE1(a) (a)
+#define arguments_BDK_UAAX_UCTL_SPARE1(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_UAA_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h
new file mode 100644
index 0000000000..d9c0ce955e
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h
@@ -0,0 +1,117 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for reading Cavium chip fuses.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ */
+
+
+/**
+ * Read a single fuse bit
+ *
+ * @param fuse Fuse number (0-1024)
+ *
+ * @return fuse value: 0 or 1
+ */
+extern int bdk_fuse_read(bdk_node_t node, int fuse);
+
+/**
+ * Read a range of fuses
+ *
+ * @param node Node to read from
+ * @param fuse Fuse number
+ * @param width Number of fuses to read, max of 64
+ *
+ * @return Fuse value
+ */
+extern uint64_t bdk_fuse_read_range(bdk_node_t node, int fuse, int width);
+
+/**
+ * Soft blow a fuse. Soft blown fuses keep there new value over soft resets, but
+ * not power cycles.
+ *
+ * @param node Node to blow
+ * @param fuse Fuse to blow
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_fuse_soft_blow(bdk_node_t node, int fuse);
+
+/**
+ * Read a single fuse bit from the field set (FUSF)
+ *
+ * @param fuse Fuse number (0-1024)
+ *
+ * @return fuse value: 0 or 1
+ */
+extern int bdk_fuse_field_read(bdk_node_t node, int fuse);
+
+/**
+ * Soft blow a fuse in the field set (FUSF). Soft blown fuses
+ * keep there new value over soft resets, but not power cycles.
+ *
+ * @param node Node to blow
+ * @param fuse Fuse to blow
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_fuse_field_soft_blow(bdk_node_t node, int fuse);
+
+/**
+ * Hard blow fuses in the field set (FUSF). Up to 128 fuses in a bank
+ * are blown at the same time
+ *
+ * @param node Node to blow
+ * @param start_fuse First fuses to blow. Must be on a 128 bit boundary.
+ * This fuse is blown to match the LSB of fuse 0.
+ * @param fuses0 First 64 fuse values. Bits with a 1 are blown. LSB
+ * of fuses0 maps to start_fuse. MSB of fuses0 maps to
+ * (start_fuse + 63).
+ * @param fuses1 Second 64 fuse values. Bits with a 1 are blown. LSB
+ * of fuses1 maps to (start_fuse + 64). MSB of fuses1
+ * maps to (start_fuse + 127).
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_fuse_field_hard_blow(bdk_node_t node, int start_fuse, uint64_t fuses0, uint64_t fuses1);
+
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h
new file mode 100644
index 0000000000..49a69dfb76
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h
@@ -0,0 +1,100 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Defiens and functions for performing LMT operations, such as
+ * LMTST and LMTCANCEL
+ *
+ * @defgroup lmt Local Memory Transaction (LMT) support
+ * @{
+ */
+#include "libbdk-arch/bdk-csrs-lmt.h"
+
+/**
+ * Address of the LMT store area in physical memory
+ */
+#define __BDK_LMTLINE_CN83XX 0x87F100000000ull
+
+/**
+ * Flush the LMTLINE area of all previous writes and clear the valid flag
+ */
+static inline void bdk_lmt_cancel(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ *(volatile uint64_t *)(__BDK_LMTLINE_CN83XX | (1 << 10)) = 0;
+ else
+ BDK_CSR_WRITE(bdk_numa_local(), BDK_LMT_LF_LMTCANCEL, 0);
+}
+
+/**
+ * Return a volatile pointer to the LMTLINE area in 64bit words. Good programming
+ * practice would to always store sequencially, incrementing the pointer for each
+ * word written.
+ *
+ * @return Voltaile uint64_t pointer to LMTLINE
+ */
+static inline volatile uint64_t *bdk_lmt_store_ptr(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return (volatile uint64_t *)__BDK_LMTLINE_CN83XX;
+ else
+ return (volatile uint64_t *)BDK_LMT_LF_LMTLINEX(0);
+}
+
+/**
+ * Send the data stored to LMTLINE to an IO block. This call may
+ * fail if the hardware has invalidated the LMTLINE area. If it
+ * fails, you must issue all LMT stores again and redo this
+ * call. Note the return status of this function is backwards
+ * to most BDK functions. It matches the LMTST hardware result.
+ *
+ * @param io_address 48 bit IO address where the LMTLINE data will be sent
+ *
+ * @return Zero on failure, non-zero on success
+ */
+static inline int bdk_lmt_submit(uint64_t io_address)
+{
+ int64_t result = 0;
+ asm volatile ("LDEOR xzr,%x[rf],[%[rs]]" : [rf] "=r"(result): [rs] "r"(io_address));
+ return bdk_le64_to_cpu(result);
+}
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h
new file mode 100644
index 0000000000..fc50514038
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h
@@ -0,0 +1,170 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for determining which Cavium chip you are running
+ * on.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ * @addtogroup chips
+ * @{
+ */
+
+
+/* Flag bits in top byte. The top byte of MIDR_EL1 is defined
+ as ox43, the Cavium implementer code. In this number, bits
+ 7,5,4 are defiend as zero. We use these bits to signal
+ that revision numbers should be ignored. It isn't ideal
+ that these are in the middle of an already defined field,
+ but this keeps the model numbers as 32 bits */
+#define __OM_IGNORE_REVISION 0x80000000
+#define __OM_IGNORE_MINOR_REVISION 0x20000000
+#define __OM_IGNORE_MODEL 0x10000000
+
+#define CAVIUM_CN88XX_PASS1_0 0x430f0a10
+#define CAVIUM_CN88XX_PASS1_1 0x430f0a11
+#define CAVIUM_CN88XX_PASS2_0 0x431f0a10
+#define CAVIUM_CN88XX_PASS2_1 0x431f0a11
+#define CAVIUM_CN88XX_PASS2_2 0x431f0a12
+#define CAVIUM_CN88XX (CAVIUM_CN88XX_PASS1_0 | __OM_IGNORE_REVISION)
+#define CAVIUM_CN88XX_PASS1_X (CAVIUM_CN88XX_PASS1_0 | __OM_IGNORE_MINOR_REVISION)
+#define CAVIUM_CN88XX_PASS2_X (CAVIUM_CN88XX_PASS2_0 | __OM_IGNORE_MINOR_REVISION)
+/* Note CN86XX will also match the CN88XX macros above. See comment in
+ CAVIUM_IS_MODEL() about MIO_FUS_FUSE_NUM_E::CHIP_IDX bits 6-7 */
+
+#define CAVIUM_CN83XX_PASS1_0 0x430f0a30
+#define CAVIUM_CN83XX (CAVIUM_CN83XX_PASS1_0 | __OM_IGNORE_REVISION)
+#define CAVIUM_CN83XX_PASS1_X (CAVIUM_CN83XX_PASS1_0 | __OM_IGNORE_MINOR_REVISION)
+
+#define CAVIUM_CN81XX_PASS1_0 0x430f0a20
+#define CAVIUM_CN81XX (CAVIUM_CN81XX_PASS1_0 | __OM_IGNORE_REVISION)
+#define CAVIUM_CN81XX_PASS1_X (CAVIUM_CN81XX_PASS1_0 | __OM_IGNORE_MINOR_REVISION)
+/* Note CN80XX will also match the CN81XX macros above. See comment in
+ CAVIUM_IS_MODEL() about MIO_FUS_FUSE_NUM_E::CHIP_IDX bits 6-7 */
+
+#define CAVIUM_CN93XX_PASS1_0 0x430f0b20
+#define CAVIUM_CN93XX (CAVIUM_CN93XX_PASS1_0 | __OM_IGNORE_REVISION)
+#define CAVIUM_CN93XX_PASS1_X (CAVIUM_CN93XX_PASS1_0 | __OM_IGNORE_MINOR_REVISION)
+
+/* These match entire families of chips */
+#define CAVIUM_CN8XXX (CAVIUM_CN88XX_PASS1_0 | __OM_IGNORE_MODEL)
+#define CAVIUM_CN9XXX (CAVIUM_CN93XX_PASS1_0 | __OM_IGNORE_MODEL)
+
+static inline uint64_t cavium_get_model() __attribute__ ((pure, always_inline));
+static inline uint64_t cavium_get_model()
+{
+#ifdef BDK_BUILD_HOST
+ extern uint32_t thunder_remote_get_model(void) __attribute__ ((pure));
+ return thunder_remote_get_model();
+#else
+ uint64_t result;
+ asm ("mrs %[rd],MIDR_EL1" : [rd] "=r" (result));
+ return result;
+#endif
+}
+
+/**
+ * Return non-zero if the chip matech the passed model.
+ *
+ * @param arg_model One of the CAVIUM_* constants for chip models and passes
+ *
+ * @return Non-zero if match
+ */
+static inline int CAVIUM_IS_MODEL(uint32_t arg_model) __attribute__ ((pure, always_inline));
+static inline int CAVIUM_IS_MODEL(uint32_t arg_model)
+{
+ const uint32_t FAMILY = 0xff00; /* Bits 15:8, generation t8x=0xa, t9x=0xb */
+ const uint32_t PARTNUM = 0xfff0; /* Bits 15:4, chip t88=0x81, t81=0xa2, t83=0xa3, etc */
+ const uint32_t VARIANT = 0xf00000; /* Bits 23:20, major pass */
+ const uint32_t REVISION = 0xf; /* Bits 3:0, minor pass */
+
+ /* Note that the model matching here is unaffected by
+ MIO_FUS_FUSE_NUM_E::CHIP_IDX bits 6-7, which are the alternate package
+ fuses. These bits don't affect MIDR_EL1, so:
+ CN80XX will match CN81XX (CHIP_IDX 6 is set for 676 ball package)
+ CN80XX will match CN81XX (CHIP_IDX 7 is set for 555 ball package)
+ CN86XX will match CN88XX (CHIP_IDX 6 is set for 676 ball package)
+ Alternate package parts are detected using MIO_FUS_DAT2[chip_id],
+ specifically the upper two bits */
+
+ uint32_t my_model = cavium_get_model();
+ uint32_t mask;
+
+ if (arg_model & __OM_IGNORE_MODEL)
+ mask = FAMILY; /* Matches chip generation (CN8XXX, CN9XXX) */
+ else if (arg_model & __OM_IGNORE_REVISION)
+ mask = PARTNUM; /* Matches chip model (CN88XX, CN81XX, CN83XX) */
+ else if (arg_model & __OM_IGNORE_MINOR_REVISION)
+ mask = PARTNUM | VARIANT; /* Matches chip model and major version */
+ else
+ mask = PARTNUM | VARIANT | REVISION; /* Matches chip model, major version, and minor version */
+ return ((arg_model & mask) == (my_model & mask));
+}
+
+/**
+ * Return non-zero if the die is in an alternate package. The
+ * normal is_model() checks will treat alternate package parts
+ * as all the same, where this function can be used to detect
+ * them. The return value is the upper two bits of
+ * MIO_FUS_DAT2[chip_id]. Most alternate packages use bit 6,
+ * which will return 1 here. Parts with a second alternative
+ * will use bit 7, which will return 2.
+ *
+ * @param arg_model One of the CAVIUM_* constants for chip models and passes
+ *
+ * @return Non-zero if an alternate package
+ * 0 = Normal package
+ * 1 = Alternate package 1 (CN86XX, CN80XX with 555 balls)
+ * 2 = Alternate package 2 (CN80XX with 676 balls)
+ * 3 = Alternate package 3 (Currently unused)
+ */
+extern int cavium_is_altpkg(uint32_t arg_model);
+
+/**
+ * Return the SKU string for a chip
+ *
+ * @param node Node to get SKU for
+ *
+ * @return Chip's SKU
+ */
+extern const char* bdk_model_get_sku(int node);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h
new file mode 100644
index 0000000000..cd5b420876
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h
@@ -0,0 +1,139 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for dealing with multiple chips organized into a
+ * NUMA cluster.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ */
+
+typedef enum
+{
+ BDK_NODE_0 = 0,
+ BDK_NODE_1 = 1,
+ BDK_NODE_2 = 2,
+ BDK_NODE_3 = 3,
+ BDK_NUMA_MAX_NODES = 4
+} bdk_node_t;
+
+/**
+ * Return the local node number
+ *
+ * @return Node number
+ */
+static inline bdk_node_t bdk_numa_local(void) __attribute__ ((always_inline, pure));
+static inline bdk_node_t bdk_numa_local(void)
+{
+#ifndef BDK_BUILD_HOST
+ int mpidr_el1;
+ BDK_MRS_NV(MPIDR_EL1, mpidr_el1);
+ int result;
+ result = (mpidr_el1 >> 16) & 0xff;
+ return BDK_NODE_0 + result;
+#else
+ return BDK_NODE_0; /* FIXME: choose remote node */
+#endif
+}
+
+/**
+ * Return the master node number
+ *
+ * @return Node number
+ */
+static inline bdk_node_t bdk_numa_master(void)
+{
+ extern int __bdk_numa_master_node;
+ return __bdk_numa_master_node;
+}
+
+/**
+ * Get a bitmask of the nodes that exist
+ *
+ * @return bitmask
+ */
+extern uint64_t bdk_numa_get_exists_mask(void);
+
+/**
+ * Add a node to the exists mask
+ *
+ * @param node Node to add
+ */
+extern void bdk_numa_set_exists(bdk_node_t node);
+
+/**
+ * Return true if a node exists
+ *
+ * @param node Node to check
+ *
+ * @return Non zero if the node exists
+ */
+extern int bdk_numa_exists(bdk_node_t node);
+
+/**
+ * Return true if there is only one node
+ *
+ * @return
+ */
+extern int bdk_numa_is_only_one();
+
+/**
+ * Given a physical address without a node, return the proper physical address
+ * for the given node.
+ *
+ * @param node Node to create address for
+ * @param pa Base physical address
+ *
+ * @return Node specific address
+ */
+static inline uint64_t bdk_numa_get_address(bdk_node_t node, uint64_t pa) __attribute__((pure,always_inline));
+static inline uint64_t bdk_numa_get_address(bdk_node_t node, uint64_t pa)
+{
+ if (pa & (1ull << 47))
+ pa |= (uint64_t)(node&3) << 44;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ pa |= (uint64_t)(node & 3) << 40; /* CN8XXX uses bits [41:40] for nodes */
+ else
+ pa |= (uint64_t)(node & 3) << 44; /* CN9XXX uses bits [45:44] for nodes */
+ return pa;
+}
+
+
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-platform.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-platform.h
new file mode 100644
index 0000000000..6b6e340d39
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-platform.h
@@ -0,0 +1,82 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for information about the run platform.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * This typedef defines the possible platforms for the BDK. The
+ * numbers represent fuse setting in Fuses[197:195].
+ */
+typedef enum
+{
+ BDK_PLATFORM_HW = 0,
+ BDK_PLATFORM_EMULATOR = 1,
+ BDK_PLATFORM_RTL = 2,
+ BDK_PLATFORM_ASIM = 3,
+} bdk_platform_t;
+
+/**
+ * Check which platform we are currently running on. This allows a BDK binary to
+ * run on various platforms without a recompile.
+ *
+ * @param platform Platform to check for
+ *
+ * @return Non zero if we are on the platform
+ */
+static inline int bdk_is_platform(bdk_platform_t platform) __attribute__ ((pure, always_inline));
+static inline int bdk_is_platform(bdk_platform_t platform)
+{
+ extern bdk_platform_t __bdk_platform;
+ return (__bdk_platform == platform);
+}
+
+/**
+ * Call to initialize the platform state
+ */
+extern void __bdk_platform_init();
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h
new file mode 100644
index 0000000000..ac65134077
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h
@@ -0,0 +1,107 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions and macros to control what parts of the BDK are linked in
+ *
+ * <hr>$Revision: 49448 $<hr>
+ * @defgroup require Component linking control
+ * @{
+ */
+
+/**
+ * Optional parts of the BDK code are pulled in by adding
+ * BDK_REQUIRE() lines to the function bdk_require_depends().
+ * Component symbols are defined as weak so that they are not
+ * linked in unless a BDK_REQUIRE() pulls them in.
+ */
+#define BDK_REQUIRE(component) \
+ do \
+ { \
+ extern char __bdk_require_symbol_##component; \
+ bdk_warn_if(__bdk_require_symbol_##component, \
+ "Require of %s failed\n", #component); \
+ } while (0)
+
+/**
+ * The following macro defines a special symbol in a C file to
+ * define it as a require component. Referencing this symbol
+ * causes all objects defined in the C file to be pulled in. This
+ * symbol should only be referenced by using the BDK_REQUIRE()
+ * macro in the function bdk_require_depends().
+ */
+#define BDK_REQUIRE_DEFINE(component) \
+ char __bdk_require_symbol_##component; \
+ char __bdk_is_required_symbol_##component
+
+/**
+ * Return if a component has been required. Useful for if
+ * statements around referencing of weak symbols.
+ */
+#define BDK_IS_REQUIRED(component) \
+ ({int is_required; \
+ do \
+ { \
+ extern char __bdk_is_required_symbol_##component __attribute__((weak));\
+ is_required = (&__bdk_is_required_symbol_##component != NULL); \
+ } while (0); \
+ is_required;})
+
+
+/**
+ * The require macros use weak symbols to control if components
+ * are linked in. All directly referenced symbols in a component
+ * must be defined a weak. This causes the component to only be
+ * pulled in by the linker if the symbol defined by
+ * BDK_REQUIRE_DEFINE is used.
+ */
+#define BDK_WEAK __attribute__((weak))
+
+/**
+ * This function is not defined by the BDK libraries. It must be
+ * defined by all BDK applications. It should be empty except for
+ * containing BDK_REQUIRE() lines. The bdk-init code has a strong
+ * reference to bdk_requires_depends() which then contains strong
+ * references to all needed components.
+ */
+extern void __bdk_require_depends(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-swap.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-swap.h
new file mode 100644
index 0000000000..2e5ccc60c1
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-swap.h
@@ -0,0 +1,130 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Utility functions for endian swapping
+ *
+ * <hr>$Revision: 32636 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Byte swap a 16 bit number
+ *
+ * @param x 16 bit number
+ * @return Byte swapped result
+ */
+static inline uint16_t bdk_swap16(uint16_t x)
+{
+ return ((uint16_t)((((uint16_t)(x) & (uint16_t)0x00ffU) << 8) |
+ (((uint16_t)(x) & (uint16_t)0xff00U) >> 8) ));
+}
+
+
+/**
+ * Byte swap a 32 bit number
+ *
+ * @param x 32 bit number
+ * @return Byte swapped result
+ */
+static inline uint32_t bdk_swap32(uint32_t x)
+{
+#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))
+ return __builtin_bswap32(x);
+#else
+ x = ((x<<8)&0xFF00FF00) | ((x>>8)&0x00FF00FF);
+ x = (x>>16) | (x<<16);
+ return x;
+#endif
+}
+
+
+/**
+ * Byte swap a 64 bit number
+ *
+ * @param x 64 bit number
+ * @return Byte swapped result
+ */
+static inline uint64_t bdk_swap64(uint64_t x)
+{
+#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))
+ return __builtin_bswap64(x);
+#else
+ x = ((x<< 8)&0xFF00FF00FF00FF00ULL) | ((x>> 8)&0x00FF00FF00FF00FFULL);
+ x = ((x<<16)&0xFFFF0000FFFF0000ULL) | ((x>>16)&0x0000FFFF0000FFFFULL);
+ return (x>>32) | (x<<32);
+#endif
+}
+
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+
+#define bdk_cpu_to_le16(x) bdk_swap16(x)
+#define bdk_cpu_to_le32(x) bdk_swap32(x)
+#define bdk_cpu_to_le64(x) bdk_swap64(x)
+
+#define bdk_cpu_to_be16(x) (x)
+#define bdk_cpu_to_be32(x) (x)
+#define bdk_cpu_to_be64(x) (x)
+
+#else
+
+#define bdk_cpu_to_le16(x) (x)
+#define bdk_cpu_to_le32(x) (x)
+#define bdk_cpu_to_le64(x) (x)
+
+#define bdk_cpu_to_be16(x) bdk_swap16(x)
+#define bdk_cpu_to_be32(x) bdk_swap32(x)
+#define bdk_cpu_to_be64(x) bdk_swap64(x)
+
+#endif
+
+#define bdk_le16_to_cpu(x) bdk_cpu_to_le16(x)
+#define bdk_le32_to_cpu(x) bdk_cpu_to_le32(x)
+#define bdk_le64_to_cpu(x) bdk_cpu_to_le64(x)
+
+#define bdk_be16_to_cpu(x) bdk_cpu_to_be16(x)
+#define bdk_be32_to_cpu(x) bdk_cpu_to_be32(x)
+#define bdk_be64_to_cpu(x) bdk_cpu_to_be64(x)
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-version.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-version.h
new file mode 100644
index 0000000000..bb9b919777
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-version.h
@@ -0,0 +1,59 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for identifying BDK build version.
+ *
+ * <hr>$Revision$<hr>
+ */
+
+
+extern const char bdk_version_str[];
+
+/**
+ * Return BDK version string
+ *
+ * @return BDK version string
+ */
+static inline const char *bdk_version_string(void)
+{
+ return bdk_version_str;
+}
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h
new file mode 100644
index 0000000000..685c812e20
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h
@@ -0,0 +1,104 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for reporting errors and warnings.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @defgroup stdio Standard IO related functions
+ * @{
+ */
+
+extern void __bdk_die(void) __attribute__ ((noreturn));
+extern void bdk_fatal(const char *format, ...) __attribute__ ((noreturn, format(printf, 1, 2)));
+extern void bdk_error(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
+extern void bdk_warn(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
+#define bdk_warn_if(expression, format, ...) if (bdk_unlikely(expression)) bdk_warn(format, ##__VA_ARGS__)
+
+/* The following defines control detailed tracing of various parts of the
+ BDK. Each one can be enabled(1) or disabled(0) independently. These
+ should be disabled unless you are trying to debug something specific */
+
+typedef enum
+{
+ BDK_TRACE_ENABLE_BGX, /* BGX networking block */
+ BDK_TRACE_ENABLE_DRAM, /* DRAM initialzation */
+ BDK_TRACE_ENABLE_DRAM_TEST, /* DRAM test code */
+ BDK_TRACE_ENABLE_INIT, /* Early initialization, before main() */
+ BDK_TRACE_ENABLE_ECAM, /* ECAM initialization */
+ BDK_TRACE_ENABLE_QLM, /* QLM related debug */
+ BDK_TRACE_ENABLE_EMMC, /* eMMC related debug */
+ BDK_TRACE_ENABLE_PCIE, /* PCIe link init */
+ BDK_TRACE_ENABLE_PCIE_CONFIG, /* PCIe config space reads / writes */
+ BDK_TRACE_ENABLE_SATA, /* SATA/AHCI related debug */
+ BDK_TRACE_ENABLE_CCPI, /* Multi-node related debug */
+ BDK_TRACE_ENABLE_FATFS, /* FatFs related debug */
+ BDK_TRACE_ENABLE_MPI, /* MPI related debug */
+ BDK_TRACE_ENABLE_ENV, /* Environment variables related debug */
+ BDK_TRACE_ENABLE_FPA, /* Free Pool Allocator */
+ BDK_TRACE_ENABLE_PKI, /* Packet Input */
+ BDK_TRACE_ENABLE_PKO, /* Packet Output */
+ BDK_TRACE_ENABLE_SSO, /* SSO */
+ BDK_TRACE_ENABLE_DEVICE, /* ECAM based device framework */
+ BDK_TRACE_ENABLE_DEVICE_SCAN, /* ECAM based device scanning detail */
+ BDK_TRACE_ENABLE_NIC, /* Virtual NIC */
+ BDK_TRACE_ENABLE_FDT_OS, /* Device tree passed to OS */
+ BDK_TRACE_ENABLE_USB_XHCI, /* USB XHCI block */
+ BDK_TRACE_ENABLE_PHY, /* Ethernet Phy drivers debug */
+ __BDK_TRACE_ENABLE_LAST, /* Must always be last value */
+} bdk_trace_enable_t;
+
+/* See bdk-config.c to change the trace level for before config files are loaded */
+extern uint64_t bdk_trace_enables;
+
+/**
+ * Macro for low level tracing of BDK functions. When enabled,
+ * these translate to printf() calls. The "area" is a string
+ * that is appended to "BDK_TRACE_ENABLE_" to figure out which
+ * enable macro to use. The macro expects a ';' after it.
+ */
+#define BDK_TRACE(area, format, ...) do { \
+ if (bdk_trace_enables & (1ull << BDK_TRACE_ENABLE_##area)) \
+ printf(#area ": " format, ##__VA_ARGS__); \
+} while (0)
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-bist/bist.h b/src/vendorcode/cavium/include/bdk/libbdk-bist/bist.h
new file mode 100644
index 0000000000..b11e0c4595
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-bist/bist.h
@@ -0,0 +1,43 @@
+#ifndef __LIBBDK_BIST_H
+#define __LIBBDK_BIST_H
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+void bdk_bist_check();
+#endif
+
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-bist/efuse-read.h b/src/vendorcode/cavium/include/bdk/libbdk-bist/efuse-read.h
new file mode 100644
index 0000000000..0b05bd081e
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-bist/efuse-read.h
@@ -0,0 +1,41 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+void efuse_read_all_o3(unsigned char *efuse_ptr, int cached_read);
+void dump_fuses(void);
+int num_fuses(void);
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-bgx.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-bgx.h
new file mode 100644
index 0000000000..f3ea6a41d5
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-bgx.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for BGX
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Configure BGX on all nodes as part of booting
+ */
+extern void bdk_boot_bgx(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-ccpi.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-ccpi.h
new file mode 100644
index 0000000000..a457f8c0d0
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-ccpi.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for CCPI
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Initialize the CCPI links and bringup the other nodes
+ */
+extern void bdk_boot_ccpi(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-dram.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-dram.h
new file mode 100644
index 0000000000..716efc3c3a
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-dram.h
@@ -0,0 +1,60 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for DRAM
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Configure DRAM on a specific node
+ *
+ * @param node Node to configure
+ * @param override_for_speed
+ * If non-zero, the DRAM init code will use this for the
+ * DRAM clock speed. This is used for SLT and should not
+ * be used otherwise.
+ */
+extern void bdk_boot_dram(bdk_node_t node, int override_for_speed);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-gpio.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-gpio.h
new file mode 100644
index 0000000000..a9d9c9d875
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-gpio.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for GPIO
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Configure GPIO on all nodes as part of booting
+ */
+extern void bdk_boot_gpio(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-info.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-info.h
new file mode 100644
index 0000000000..4ba814ce77
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-info.h
@@ -0,0 +1,86 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for displaying and retrieving infomration about the
+ * boot environment
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+
+/**
+ * Display information about strapping and other hard configuration items for
+ * the specified node
+ *
+ * @param node Node to display
+ */
+void bdk_boot_info_strapping(bdk_node_t node);
+
+/**
+ * Return a string containing information about the chip's manufacture wafer
+ *
+ * @param node Node to query
+ *
+ * @return Static string, reused on each call
+ */
+const char* bdk_boot_info_wafer(bdk_node_t node);
+
+/**
+ * Return a string containing the chip's unique serial number
+ *
+ * @param node Node to query
+ *
+ * @return Static string, reused on each call
+ */
+const char* bdk_boot_info_serial(bdk_node_t node);
+
+/**
+ * Return a string containing the chip's unique ID
+ *
+ * @param node Node to query
+ *
+ * @return Static string, reused on each call
+ */
+const char* bdk_boot_info_unique_id(bdk_node_t node);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-mdio.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-mdio.h
new file mode 100644
index 0000000000..e99be3ffd6
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-mdio.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for MDIO
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Configure MDIO on all nodes as part of booting
+ */
+extern void bdk_boot_mdio(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-pcie.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-pcie.h
new file mode 100644
index 0000000000..f59184b668
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-pcie.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for PCIe
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Configure PCIe on all nodes as part of booting
+ */
+extern void bdk_boot_pcie(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-qlm.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-qlm.h
new file mode 100644
index 0000000000..a1f232030f
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-qlm.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for QLM
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Configure QLM on all nodes as part of booting
+ */
+extern void bdk_boot_qlm(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h
new file mode 100644
index 0000000000..2a0896fe10
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h
@@ -0,0 +1,94 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to report boot status
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Possible boot statuses that can be reported
+ */
+typedef enum
+{
+ /* Codes for boot stub */
+ BDK_BOOT_STATUS_BOOT_STUB_STARTING = 0x000,
+ BDK_BOOT_STATUS_BOOT_STUB_WAITING_FOR_KEY = 0x001,
+ BDK_BOOT_STATUS_BOOT_STUB_BOOT_MENU_KEY = 0x102, /* Signal boot complete as stopped by user */
+ BDK_BOOT_STATUS_BOOT_STUB_NO_BOOT_MENU_KEY = 0x003,
+ BDK_BOOT_STATUS_BOOT_STUB_LOAD_FAILED = 0x004,
+ /* Codes for init.bin */
+ BDK_BOOT_STATUS_INIT_STARTING = 0x005,
+ BDK_BOOT_STATUS_INIT_NODE0_DRAM = 0x006,
+ BDK_BOOT_STATUS_INIT_NODE0_DRAM_COMPLETE = 0x007,
+ BDK_BOOT_STATUS_INIT_NODE0_DRAM_FAILED = 0x008,
+ BDK_BOOT_STATUS_INIT_CCPI = 0x009,
+ BDK_BOOT_STATUS_INIT_CCPI_COMPLETE = 0x00a,
+ BDK_BOOT_STATUS_INIT_CCPI_FAILED = 0x00b,
+ BDK_BOOT_STATUS_INIT_NODE1_DRAM = 0x00c,
+ BDK_BOOT_STATUS_INIT_NODE1_DRAM_COMPLETE = 0x00d,
+ BDK_BOOT_STATUS_INIT_NODE1_DRAM_FAILED = 0x00e,
+ BDK_BOOT_STATUS_INIT_QLM = 0x00f,
+ BDK_BOOT_STATUS_INIT_QLM_COMPLETE = 0x010,
+ BDK_BOOT_STATUS_INIT_QLM_FAILED = 0x011,
+ BDK_BOOT_STATUS_INIT_LOAD_ATF = 0x012,
+ BDK_BOOT_STATUS_INIT_LOAD_DIAGNOSTICS = 0x013,
+ BDK_BOOT_STATUS_INIT_LOAD_FAILED = 0x014,
+ /* Codes for diagnostics.bin */
+ BDK_BOOT_STATUS_DIAG_STARTING = 0x015,
+ BDK_BOOT_STATUS_DIAG_COMPLETE = 0x116, /* Signal boot complete */
+ /* Special codes */
+ BDK_BOOT_STATUS_REQUEST_POWER_CYCLE = 0x0f2, /* Don't continue, power cycle */
+} bdk_boot_status_t;
+
+/**
+ * Report boot status to the BMC or whomever might care. This function
+ * will return quickly except for a status of "power cycle". In the power cycle
+ * case it is assumed the board is in a bad state and should not continue until
+ * a power cycle restarts us.
+ *
+ * @param status Status to report. Enumerated in bdk_boot_status_t
+ */
+extern void bdk_boot_status(bdk_boot_status_t status);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-twsi.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-twsi.h
new file mode 100644
index 0000000000..155509f3c1
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-twsi.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for TWSI
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Configure TWSI on all nodes as part of booting
+ */
+extern void bdk_boot_twsi(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h
new file mode 100644
index 0000000000..169047499b
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for USB
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Configure USB on all nodes as part of booting
+ */
+extern void bdk_boot_usb(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h
new file mode 100644
index 0000000000..e40b8cebc2
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h
@@ -0,0 +1,69 @@
+#ifndef __BDK_BOOT_H__
+#define __BDK_BOOT_H__
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Master include file for functions related to boot images and
+ * their operation. Any BDK based image booting on hardware has
+ * a number of common tasks it needs to do. This header includes
+ * the API functions in this category. Use bdk.h instead of
+ * including this file directly.
+ *
+ * @defgroup boot Boot related support functions
+ */
+
+#include "bdk-boot-bgx.h"
+#include "bdk-boot-ccpi.h"
+#include "bdk-boot-dram.h"
+#include "bdk-boot-gpio.h"
+#include "bdk-boot-info.h"
+#include "bdk-boot-mdio.h"
+#include "bdk-boot-pcie.h"
+#include "bdk-boot-qlm.h"
+#include "bdk-boot-status.h"
+#include "bdk-boot-twsi.h"
+#include "bdk-boot-usb.h"
+#include "bdk-image.h"
+#include "bdk-watchdog.h"
+#include "bdk-xmodem.h"
+
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-image.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-image.h
new file mode 100644
index 0000000000..725453c8a0
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-image.h
@@ -0,0 +1,105 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Utility functions for handling binary images
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @defgroup image Binary image utility functions
+ * @{
+ */
+
+/**
+ * Structure present at the beginning of BDK images
+ */
+typedef struct
+{
+ uint32_t instruction; /* Raw instruction for skipping header */
+ uint32_t length; /* Length of the image, includes header */
+ uint64_t magic; /* Magic string "THUNDERX" */
+ uint32_t crc32; /* CRC32 of image + header. These bytes are zero when calculating the CRC */
+ uint32_t reserved1; /* Zero, reserved for future use */
+ char name[64]; /* ASCII Image name. Must always end in zero */
+ char version[32]; /* ASCII Version. Must always end in zero */
+ uint64_t reserved[17]; /* Zero, reserved for future use */
+} BDK_LITTLE_ENDIAN_STRUCT bdk_image_header_t;
+
+/**
+ * Validate image header
+ *
+ * @param header Header to validate
+ *
+ * @return 1 if header is valid, zero if invalid
+ */
+extern int bdk_image_header_is_valid(const bdk_image_header_t *header);
+
+/**
+ * Verify image at the given address is good
+ *
+ * @param image Pointer to the image
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_image_verify(const void *image);
+
+/**
+ * Read a image header from a file
+ *
+ * @param handle File handel to read from
+ * @param header Pointer to header structure to fill
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_image_read_header(FILE *handle, bdk_image_header_t *header);
+
+/**
+ * Read a image from a file and boot it, replacing the current running BDK image
+ *
+ * @param filename File to read the image from
+ * @param loc Offset into file for image. This is normally zero for normal files. Device
+ * files, such as /dev/mem, will use this to locate the image.
+ *
+ * @return Negative on failure. On success this function never returns.
+ */
+extern int bdk_image_boot(const char *filename, uint64_t loc);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h
new file mode 100644
index 0000000000..45f6efb537
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h
@@ -0,0 +1,81 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Utility functions for controlling the watchdog during boot
+ *
+ * @defgroup watchdog Watchdog related functions
+ * @{
+ */
+
+/**
+ * Setup the watchdog to expire in timeout_ms milliseconds. When the watchdog
+ * expires, the chip three things happen:
+ * 1) Expire 1: interrupt that is ignored by the BDK
+ * 2) Expire 2: DEL3T interrupt, which is disabled and ignored
+ * 3) Expire 3: Soft reset of the chip
+ *
+ * Since we want a soft reset, we actually program the watchdog to expire at
+ * the timeout / 3.
+ *
+ * @param timeout_ms Timeout in milliseconds. If this is zero, the timeout is taken from the
+ * global configuration option BDK_BRD_CFG_WATCHDOG_TIMEOUT
+ */
+extern void bdk_watchdog_set(unsigned int timeout_ms);
+
+/**
+ * Signal the watchdog that we are still running
+ */
+extern void bdk_watchdog_poke(void);
+
+/**
+ * Disable the hardware watchdog
+ */
+extern void bdk_watchdog_disable(void);
+
+/**
+ * Return true if the watchdog is configured and running
+ *
+ * @return Non-zero if watchdog is running
+ */
+extern int bdk_watchdog_is_running(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-xmodem.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-xmodem.h
new file mode 100644
index 0000000000..3caff397c5
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-xmodem.h
@@ -0,0 +1,59 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Boot services for XMODEM transfers
+ *
+ * @addtogroup boot
+ * @{
+ */
+
+/**
+ * Receive a file through Xmodem and write it to an internal file.
+ *
+ * @param dest_file File to write to
+ * @param offset Offset into the file to write
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_xmodem_upload(const char *dest_file, uint64_t offset);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h
new file mode 100644
index 0000000000..84488597b4
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h
@@ -0,0 +1,118 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for configuring DRAM.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @defgroup dram DRAM related functions
+ * @{
+ */
+#include "../libdram/libdram.h"
+
+/**
+ * Lookup a DRAM configuration by name and intialize dram with it
+ *
+ * @param node Node to configure
+ * @param ddr_clock_override
+ * If non zero, override the DRAM frequency specified in the config with
+ * this value
+ *
+ * @return Amount of DRAM in MB, or negative on failure
+ */
+extern int bdk_dram_config(int node, int ddr_clock_override);
+
+/**
+ * Do DRAM configuration tuning
+ *
+ * @param node Node to tune
+ *
+ * @return Success or Fail
+ */
+extern int bdk_dram_tune(int node);
+
+/**
+ * Do DRAM Margin all tests
+ *
+ * @param node Node to tune
+ *
+ * @return None
+ */
+extern void bdk_dram_margin(int node);
+
+/**
+ * Get the amount of DRAM configured for a node. This is read from the LMC
+ * controller after DRAM is setup.
+ *
+ * @param node Node to query
+ *
+ * @return Size in megabytes
+ */
+extern uint64_t bdk_dram_get_size_mbytes(int node);
+
+/**
+ * Return the string of info about the current node's DRAM configuration.
+ *
+ * @param node node to retrieve
+ *
+ * @return String or NULL
+ */
+extern const char* bdk_dram_get_info_string(int node);
+
+/**
+ * Return the highest address currently used by the BDK. This address will
+ * be about 4MB above the top of the BDK to make sure small growths between the
+ * call and its use don't cause corruption. Any call to memory allocation can
+ * change this value.
+ *
+ * @return Size of the BDK in bytes
+ */
+extern uint64_t bdk_dram_get_top_of_bdk(void);
+
+extern int __bdk_dram_get_num_lmc(bdk_node_t node);
+extern int __bdk_dram_is_ddr4(bdk_node_t node, int lmc);
+extern int __bdk_dram_is_rdimm(bdk_node_t node, int lmc);
+extern uint32_t __bdk_dram_get_row_mask(bdk_node_t node, int lmc);
+extern uint32_t __bdk_dram_get_col_mask(bdk_node_t node, int lmc);
+extern int __bdk_dram_get_num_bank_bits(bdk_node_t node, int lmc);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h
new file mode 100644
index 0000000000..f6be005995
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h
@@ -0,0 +1,198 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for configuring DRAM.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @defgroup dram DRAM related functions
+ * @{
+ */
+
+/**
+ * Flags to pass to DRAM tests to control behavior
+ */
+typedef enum
+{
+ /* Which nodes to check. If none are specified, default to all */
+ BDK_DRAM_TEST_NODE0 = 1 << BDK_NODE_0,
+ BDK_DRAM_TEST_NODE1 = 1 << BDK_NODE_1,
+ BDK_DRAM_TEST_NODE2 = 1 << BDK_NODE_2,
+ BDK_DRAM_TEST_NODE3 = 1 << BDK_NODE_3,
+ BDK_DRAM_TEST_NO_STOP_ERROR = 1 << 8, /**< Don't stop running tests on errors, continue counting all errors */
+ BDK_DRAM_TEST_NO_PROGRESS = 1 << 9, /**< Don't report progress percentage during run, for batch runs */
+ BDK_DRAM_TEST_NO_STATS = 1 << 10, /**< Don't report usage status for LMC, or CCPI with USE_CCPI */
+ BDK_DRAM_TEST_NO_BANNERS = 1 << 11, /**< Don't display banenrs at beginning of test */
+ BDK_DRAM_TEST_USE_CCPI = 1 << 12, /**< Test using other node across CCPI. Use to verify CCPI. This
+ automatically enables CCPI usage reporting unless NO_STATS is
+ also specified */
+} bdk_dram_test_flags_t;
+
+/**
+ * Convert a test enumeration into a string
+ *
+ * @param test Test to convert
+ *
+ * @return String for display
+ */
+extern const char* bdk_dram_get_test_name(int test);
+
+/**
+ * Perform a memory test.
+ *
+ * @param test Test type to run
+ * @param start_address
+ * Physical address to start at
+ * @param length Length of memory block
+ * @param flags Flags to control memory test options. Zero defaults to testing all
+ * node with statistics and progress output.
+ *
+ * @return Number of errors found. Zero is success. Negative means the test
+ * did not run due to some other failure.
+ */
+extern int
+bdk_dram_test(int test, uint64_t start_address, uint64_t length,
+ bdk_dram_test_flags_t flags);
+
+/**
+ * Given a physical DRAM address, extract information about the node, LMC, DIMM,
+ * rank, bank, row, and column that was accessed.
+ *
+ * @param address Physical address to decode
+ * @param node Node the address was for
+ * @param lmc LMC controller the address was for
+ * @param dimm DIMM the address was for
+ * @param prank Physical RANK on the DIMM
+ * @param lrank Logical RANK on the DIMM
+ * @param bank BANK on the DIMM
+ * @param row Row on the DIMM
+ * @param col Column on the DIMM
+ */
+extern void
+bdk_dram_address_extract_info(uint64_t address, int *node, int *lmc, int *dimm,
+ int *prank, int *lrank, int *bank, int *row, int *col);
+
+/**
+ * Construct a physical address given the node, LMC, DIMM, rank, bank, row, and column.
+ *
+ * @param node Node the address was for
+ * @param lmc LMC controller the address was for
+ * @param dimm DIMM the address was for
+ * @param prank Physical RANK on the DIMM
+ * @param lrank Logical RANK on the DIMM
+ * @param bank BANK on the DIMM
+ * @param row Row on the DIMM
+ * @param col Column on the DIMM
+ */
+extern uint64_t
+bdk_dram_address_construct_info(bdk_node_t node, int lmc, int dimm,
+ int prank, int lrank, int bank, int row, int col);
+
+/**
+ * Inject a DRAM error at a specific address in memory. The injection can either
+ * be a single bit inside the byte, or a double bit error in the ECC byte. Double
+ * bit errors may corrupt memory, causing software to crash. The corruption is
+ * written to memory and will continue to exist until the cache line is written
+ * again. After a call to this function, the BDK should report a ECC error. Double
+ * bit errors corrupt bits 0-1.
+ *
+ * @param address Physical address to corrupt. Any byte alignment is supported
+ * @param bit Bit to corrupt in the byte (0-7), or -1 to create a double bit fault in the ECC
+ * byte.
+ */
+extern void bdk_dram_test_inject_error(uint64_t address, int bit);
+
+/* These variables count the number of ECC errors. They should only be accessed atomically */
+/* Keep the counts per memory channel (LMC) for more detail. */
+#define BDK_MAX_MEM_CHANS 4
+extern int64_t __bdk_dram_ecc_single_bit_errors[BDK_MAX_MEM_CHANS];
+extern int64_t __bdk_dram_ecc_double_bit_errors[BDK_MAX_MEM_CHANS];
+
+/* These are internal support functions */
+extern void __bdk_dram_flush_to_mem(uint64_t address);
+extern void __bdk_dram_flush_to_mem_range(uint64_t area, uint64_t max_address);
+extern void __bdk_dram_report_error(uint64_t address, uint64_t data, uint64_t correct, int burst, int fails);
+extern void __bdk_dram_report_error2(uint64_t address1, uint64_t data1, uint64_t address2, uint64_t data2, int burst, int fails);
+extern int __bdk_dram_retry_failure(int burst, uint64_t address, uint64_t data, uint64_t expected);
+extern int __bdk_dram_retry_failure2(int burst, uint64_t address1, uint64_t data1, uint64_t address2, uint64_t data2);
+
+static inline void __bdk_dram_write64(uint64_t address, uint64_t data)
+{
+ /* The DRAM code doesn't use the normal bdk_phys_to_ptr() because of the
+ NULL check in it. This greatly slows down the memory tests */
+ volatile uint64_t *ptr = (void*)address;
+ *ptr = data;
+}
+
+static inline uint64_t __bdk_dram_read64(uint64_t address)
+{
+ /* The DRAM code doesn't use the normal bdk_phys_to_ptr() because of the
+ NULL check in it. This greatly slows down the memory tests */
+ volatile uint64_t *ptr = (void*)address;
+ return *ptr;
+}
+
+/* This is the function prototype that all test must use. "start_address" is
+ the first byte to be tested (inclusive), "end_address" is the address right
+ after the region (exclusive). For example, if start_address equals
+ end_address, no memory will be tested */
+typedef int (*__bdk_dram_test_t)(uint64_t start_address, uint64_t end_address, int bursts);
+
+/* These are the actual tests that get run. Each test is meant to be run with
+ a small range and repeated on lots of cores and large ranges. The return
+ value is the number of errors found */
+extern int __bdk_dram_test_mem_address_bus(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_checkerboard(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_data_bus(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_leftwalk0(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_leftwalk1(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_random(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_rightwalk0(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_rightwalk1(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_rows(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_self_addr(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_solid(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_mem_xor(uint64_t start_address, uint64_t end_address, int bursts);
+extern int __bdk_dram_test_fast_scan(uint64_t area, uint64_t max_address, int bursts);
+
+/** @} */
+
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h
new file mode 100644
index 0000000000..a4eb32805c
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h
@@ -0,0 +1,54 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Master include file for dram configuration and testing. Use bdk.h
+ * instead of including this file directly.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ * @defgroup dram DRAM related functions
+ * @{
+ */
+
+#include "bdk-dram-config.h"
+#include "bdk-dram-test.h"
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-driver/bdk-driver-sgpio.h b/src/vendorcode/cavium/include/bdk/libbdk-driver/bdk-driver-sgpio.h
new file mode 100644
index 0000000000..1e9f8c5930
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-driver/bdk-driver-sgpio.h
@@ -0,0 +1,153 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include "libbdk-arch/bdk-csrs-sgp.h"
+
+/**
+ * @file
+ *
+ * Serial GPIO interface (SGPIO)
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+#ifndef _BDK_DRIVER_SGPIO_H_
+#define _BDK_DRIVER_SGPIO_H_
+
+typedef enum
+{
+ BDK_SGPIO_LED_ACT,
+ BDK_SGPIO_LED_LOC,
+ BDK_SGPIO_LED_ERR,
+} bdk_sgpio_led_t;
+
+typedef enum
+{
+ BDK_SGPIO_STATIC_ON = BDK_SGP_TX_ACT_E_STATIC_ON,
+ BDK_SGPIO_STATIC_OFF = BDK_SGP_TX_ACT_E_STATIC_OFF,
+ BDK_SGPIO_BLINK = BDK_SGP_TX_ACT_E_A_ON_OFF,
+} bdk_sgpio_led_state_t;
+
+
+/**
+ * Get the SGPIO controller state
+ *
+ * @param node CPU node number
+ *
+ * @return 1 == Controller is enabled, 0 == Controller is disabled
+ */
+int bdk_sgpio_is_enabled(bdk_node_t node);
+
+/**
+ * Set the mode for a specific LED.
+ *
+ * @param node CPU node number
+ * @param drive Drive number of LED (0-15)
+ * @param led LED type:
+ * BDK_SGPIO_LED_ACT
+ * BDK_SGPIO_LED_ERR
+ * BDK_SGPIO_LED_LOC
+ *
+ * @return
+ */
+void bdk_sgpio_set_led_state(bdk_node_t node, int drive, int led, int state);
+
+/**
+ * Get the mode for a specific LED.
+ *
+ * @param node CPU node number
+ * @param drive Drive number of LED (0-15)
+ * @param led LED type:
+ * BDK_SGPIO_LED_ACT
+ * BDK_SGPIO_LED_ERR
+ * BDK_SGPIO_LED_LOC
+ *
+ * @return LED state:
+ * BDK_SGPIO_STATIC_ON
+ * BDK_SGPIO_STATIC_OFF
+ * BDK_SGPIO_BLINK
+ */
+int bdk_sgpio_get_led_state(bdk_node_t node, int drive, int led);
+
+/**
+ * Set the controller's SCLOCK frequency
+ *
+ * @param node CPU node number
+ * @param freq Frequency to set
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sgpio_set_sclock(bdk_node_t node, int freq);
+
+/**
+ * Enable / disable controller
+ *
+ * @param node CPU node number
+ * @param ena zero = disable, non-zero = enable
+ *
+ * @return
+ */
+void bdk_sgpio_enable_controller(bdk_node_t node, int ena);
+
+/**
+ * Set up SGPIO pin muxing based on environment.
+ *
+ * @param node CPU node number
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sgpio_setup_gpio_pins(bdk_node_t node);
+
+/**
+ * Initialize the SGPIO controller.
+ * - Set up the SGPIO pin muxing as per configuration environment.
+ * - Turn all LEDs off
+ * - Set the blink rate to 1/2 second
+ * - Enable the controller
+ *
+ * @param node CPU node number
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sgpio_initialize(bdk_node_t node);
+
+/** @} */
+
+#endif /* _BDK_DRIVER_SGPIO_H_ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-driver/bdk-driver.h b/src/vendorcode/cavium/include/bdk/libbdk-driver/bdk-driver.h
new file mode 100644
index 0000000000..c99b8ef959
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-driver/bdk-driver.h
@@ -0,0 +1,71 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * bdk_driver_t represents drivers for devices connected using
+ * ECAMs. This are discover by scanning the ECAMs and
+ * instantiating devices for what is found.
+ *
+ * @defgroup driver ECAM Attached Drivers
+ * @addtogroup driver
+ * @{
+ */
+#include "bdk-driver-sgpio.h"
+
+/**
+ * Defines the main entry points for a device driver
+ */
+typedef struct bdk_driver_s
+{
+ struct bdk_driver_s *next; /* Used by bdk-device to maintian list */
+ uint32_t id; /* ECAM device ID */
+ int (*probe)(bdk_device_t *device);
+ int (*init)(bdk_device_t *device);
+} bdk_driver_t;
+
+/**
+ * Called by the BDK to register all loaded drivers with bdk-device.
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_driver_register_all(void) BDK_WEAK;
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access-native.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access-native.h
new file mode 100644
index 0000000000..aa9d87bf37
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access-native.h
@@ -0,0 +1,155 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ * Functions for accessing memory and CSRs when we are compiling
+ * natively.
+ *
+ * <hr>$Revision: 38306 $<hr>
+*/
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatible
+ * memory address (uint64_t). Cavium hardware widgets don't
+ * understand logical addresses.
+ *
+ * @param ptr C style memory pointer
+ * @return Hardware physical address
+ */
+static inline uint64_t bdk_ptr_to_phys(void *ptr) __attribute__ ((pure, always_inline));
+static inline uint64_t bdk_ptr_to_phys(void *ptr)
+{
+ bdk_warn_if(!ptr, "bdk_ptr_to_phys() passed a NULL\n");
+ return (long)ptr;
+}
+
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @param physical_address
+ * Hardware physical address to memory
+ * @return Pointer to memory
+ */
+static inline void *bdk_phys_to_ptr(uint64_t physical_address) __attribute__ ((pure, always_inline));
+static inline void *bdk_phys_to_ptr(uint64_t physical_address)
+{
+ bdk_warn_if(physical_address==0, "bdk_phys_to_ptr() passed a zero address\n");
+ return (void*)(long)physical_address;
+}
+
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define BDK_BUILD_WRITE64(TYPE) \
+static inline void bdk_write64_##TYPE(uint64_t addr, TYPE##_t val) __attribute__ ((always_inline)); \
+static inline void bdk_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ *(volatile TYPE##_t *)bdk_phys_to_ptr(addr) = val; \
+}
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define BDK_BUILD_READ64(TYPE) \
+static inline TYPE##_t bdk_read64_##TYPE(uint64_t addr) __attribute__ ((always_inline)); \
+static inline TYPE##_t bdk_read64_##TYPE(uint64_t addr) \
+{ \
+ return *(volatile TYPE##_t *)bdk_phys_to_ptr(addr); \
+}
+
+/* The following defines 8 functions for writing to a 64bit address. Each
+ takes two arguments, the address and the value to write.
+ bdk_write64_int64 bdk_write64_uint64
+ bdk_write64_int32 bdk_write64_uint32
+ bdk_write64_int16 bdk_write64_uint16
+ bdk_write64_int8 bdk_write64_uint8 */
+BDK_BUILD_WRITE64(int64)
+BDK_BUILD_WRITE64(int32)
+BDK_BUILD_WRITE64(int16)
+BDK_BUILD_WRITE64(int8)
+BDK_BUILD_WRITE64(uint64)
+BDK_BUILD_WRITE64(uint32)
+BDK_BUILD_WRITE64(uint16)
+BDK_BUILD_WRITE64(uint8)
+
+/* The following defines 8 functions for reading from a 64bit address. Each
+ takes the address as the only argument
+ bdk_read64_int64 bdk_read64_uint64
+ bdk_read64_int32 bdk_read64_uint32
+ bdk_read64_int16 bdk_read64_uint16
+ bdk_read64_int8 bdk_read64_uint8 */
+BDK_BUILD_READ64(int64)
+BDK_BUILD_READ64(int32)
+BDK_BUILD_READ64(int16)
+BDK_BUILD_READ64(int8)
+BDK_BUILD_READ64(uint64)
+BDK_BUILD_READ64(uint32)
+BDK_BUILD_READ64(uint16)
+BDK_BUILD_READ64(uint8)
+
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @param val 32 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+static inline uint32_t bdk_pop(uint32_t val)
+{
+ return __builtin_popcount(val);
+}
+
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @param val 64 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+static inline int bdk_dpop(uint64_t val)
+{
+ return __builtin_popcountl(val);
+}
+
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h
new file mode 100644
index 0000000000..d50ecd7e5c
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h
@@ -0,0 +1,133 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ * Function prototypes for accessing memory and CSRs.
+ *
+ * <hr>$Revision: 38306 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+*/
+
+#define BDK_FUNCTION static inline
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatible
+ * memory address (uint64_t). Cavium hardware widgets don't
+ * understand logical addresses.
+ *
+ * @param ptr C style memory pointer
+ * @return Hardware physical address
+ */
+BDK_FUNCTION uint64_t bdk_ptr_to_phys(void *ptr);
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @param physical_address
+ * Hardware physical address to memory
+ * @return Pointer to memory
+ */
+BDK_FUNCTION void *bdk_phys_to_ptr(uint64_t physical_address);
+
+BDK_FUNCTION void bdk_write64_int64(uint64_t address, int64_t value);
+BDK_FUNCTION void bdk_write64_uint64(uint64_t address, uint64_t value);
+BDK_FUNCTION void bdk_write64_int32(uint64_t address, int32_t value);
+BDK_FUNCTION void bdk_write64_uint32(uint64_t address, uint32_t value);
+BDK_FUNCTION void bdk_write64_int16(uint64_t address, int16_t value);
+BDK_FUNCTION void bdk_write64_uint16(uint64_t address, uint16_t value);
+BDK_FUNCTION void bdk_write64_int8(uint64_t address, int8_t value);
+BDK_FUNCTION void bdk_write64_uint8(uint64_t address, uint8_t value);
+
+BDK_FUNCTION int64_t bdk_read64_int64(uint64_t address);
+BDK_FUNCTION uint64_t bdk_read64_uint64(uint64_t address);
+BDK_FUNCTION int32_t bdk_read64_int32(uint64_t address);
+BDK_FUNCTION uint32_t bdk_read64_uint32(uint64_t address);
+BDK_FUNCTION int16_t bdk_read64_int16(uint64_t address);
+BDK_FUNCTION uint16_t bdk_read64_uint16(uint64_t address);
+BDK_FUNCTION int8_t bdk_read64_int8(uint64_t address);
+BDK_FUNCTION uint8_t bdk_read64_uint8(uint64_t address);
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @param val 32 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+BDK_FUNCTION uint32_t bdk_pop(uint32_t val);
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @param val 64 bit value to count set bits in
+ *
+ * @return Number of bits set
+ */
+BDK_FUNCTION int bdk_dpop(uint64_t val);
+
+/**
+ * Wait for the specified number of core clock cycles
+ *
+ * @param cycles
+ */
+extern void bdk_wait(uint64_t cycles);
+
+/**
+ * Wait for the specified number of micro seconds
+ *
+ * @param usec micro seconds to wait
+ */
+extern void bdk_wait_usec(uint64_t usec);
+
+/**
+ * Perform a soft reset of the chip
+ *
+ * @return
+ */
+extern void bdk_reset_chip(bdk_node_t node);
+
+#undef BDK_FUNCTION
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h
new file mode 100644
index 0000000000..7f521a67e2
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h
@@ -0,0 +1,541 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * This file provides atomic operations
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void bdk_atomic_add32_nosync(int32_t *ptr, int32_t incr)
+{
+ /* Atomic add with no ordering */
+ asm volatile ("ldadd %w[i], wzr, [%[b]]"
+ : [r] "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void bdk_atomic_add32(int32_t *ptr, int32_t incr)
+{
+ /* Atomic add with acquire and release */
+ asm volatile ("ldaddal %w[i], wzr, [%[b]]"
+ : "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+}
+
+/**
+ * Atomically sets a 32 bit (aligned) memory location to a value
+ *
+ * @param ptr address of memory to set
+ * @param value value to set memory location to.
+ */
+static inline void bdk_atomic_set32(int32_t *ptr, int32_t value)
+{
+ /* Implies a release */
+ asm volatile ("stlr %w[v], [%[b]]"
+ : "+m" (*ptr)
+ : [v] "r" (value), [b] "r" (ptr)
+ : "memory");
+}
+
+/**
+ * Returns the current value of a 32 bit (aligned) memory
+ * location.
+ *
+ * @param ptr Address of memory to get
+ * @return Value of the memory
+ */
+static inline int32_t bdk_atomic_get32(int32_t *ptr)
+{
+ return *(volatile int32_t *)ptr;
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void bdk_atomic_add64_nosync(int64_t *ptr, int64_t incr)
+{
+ /* Atomic add with no ordering */
+ asm volatile ("ldadd %x[i], xzr, [%[b]]"
+ : [r] "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ */
+static inline void bdk_atomic_add64(int64_t *ptr, int64_t incr)
+{
+ /* Atomic add with acquire and release */
+ asm volatile ("ldaddal %x[i], xzr, [%[b]]"
+ : [r] "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+}
+
+/**
+ * Atomically sets a 64 bit (aligned) memory location to a value
+ *
+ * @param ptr address of memory to set
+ * @param value value to set memory location to.
+ */
+static inline void bdk_atomic_set64(int64_t *ptr, int64_t value)
+{
+ /* Implies a release */
+ asm volatile ("stlr %x[v], [%[b]]"
+ : "+m" (*ptr)
+ : [v] "r" (value), [b] "r" (ptr)
+ : "memory");
+}
+
+/**
+ * Returns the current value of a 64 bit (aligned) memory
+ * location.
+ *
+ * @param ptr Address of memory to get
+ * @return Value of the memory
+ */
+static inline int64_t bdk_atomic_get64(int64_t *ptr)
+{
+ return *(volatile int64_t *)ptr;
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does no memory synchronization.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline int bdk_atomic_compare_and_store32_nosync(uint32_t *ptr, uint32_t old_val, uint32_t new_val) __attribute__((always_inline));
+static inline int bdk_atomic_compare_and_store32_nosync(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
+{
+ uint32_t val = old_val;
+
+ /* CN88XX pass 1.x has errata AP-22500: GlobalSync request during a multi-cycle ATOMIC stalls forever
+ Don't use compare and swap on these chips */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ {
+ asm volatile ("1: ldxr %w[v], [%[b]] \n"
+ " cmp %w[v], %w[o] \n"
+ " b.ne 2f \n"
+ " stxr %w[v], %w[n], [%[b]]\n" /* Returns zero on success */
+ " cbnz %w[v], 1b \n"
+ " mov %w[v], %w[o] \n"
+ "2: \n"
+ : [mem] "+m" (*ptr), [v] "=&r" (val)
+ : [b] "r" (ptr), [n] "r" (new_val), [o] "r" (old_val)
+ : );
+ }
+ else
+ {
+ asm volatile ("cas %w[o], %w[n], [%[b]]"
+ : [mem] "+m" (*ptr), [o] "+r" (val)
+ : [b] "r" (ptr), [n] "r" (new_val)
+ : );
+ }
+ return old_val == val;
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does memory synchronization that is required to use this as a locking primitive.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline int bdk_atomic_compare_and_store32(uint32_t *ptr, uint32_t old_val, uint32_t new_val) __attribute__((always_inline));
+static inline int bdk_atomic_compare_and_store32(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
+{
+ uint32_t val = old_val;
+
+ /* CN88XX pass 1.x has errata AP-22500: GlobalSync request during a multi-cycle ATOMIC stalls forever
+ Don't use compare and swap on these chips */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ {
+ asm volatile ("1: ldaxr %w[v], [%[b]] \n"
+ " cmp %w[v], %w[o] \n"
+ " b.ne 2f \n"
+ " stlxr %w[v], %w[n], [%[b]]\n" /* Returns zero on success */
+ " cbnz %w[v], 1b \n"
+ " mov %w[v], %w[o] \n"
+ "2: \n"
+ : [mem] "+m" (*ptr), [v] "=&r" (val)
+ : [b] "r" (ptr), [n] "r" (new_val), [o] "r" (old_val)
+ : );
+ }
+ else
+ {
+ asm volatile ("casal %w[o], %w[n], [%[b]]"
+ : [mem] "+m" (*ptr), [o] "+r" (val)
+ : [b] "r" (ptr), [n] "r" (new_val)
+ : );
+ }
+ return old_val == val;
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does no memory synchronization.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline int bdk_atomic_compare_and_store64_nosync(uint64_t *ptr, uint64_t old_val, uint64_t new_val) __attribute__((always_inline));
+static inline int bdk_atomic_compare_and_store64_nosync(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
+{
+ uint32_t val = old_val;
+
+ /* CN88XX pass 1.x has errata AP-22500: GlobalSync request during a multi-cycle ATOMIC stalls forever
+ Don't use compare and swap on these chips */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ {
+ asm volatile ("1: ldxr %x[v], [%[b]] \n"
+ " cmp %x[v], %x[o] \n"
+ " b.ne 2f \n"
+ " stxr %x[v], %x[n], [%[b]]\n" /* Returns zero on success */
+ " cbnz %x[v], 1b \n"
+ " mov %x[v], %x[o] \n"
+ "2: \n"
+ : [mem] "+m" (*ptr), [v] "=&r" (val)
+ : [b] "r" (ptr), [n] "r" (new_val), [o] "r" (old_val)
+ : );
+ }
+ else
+ {
+ asm volatile ("cas %x[o], %x[n], [%[b]]"
+ : [mem] "+m" (*ptr), [o] "+r" (val)
+ : [b] "r" (ptr), [n] "r" (new_val)
+ : );
+ }
+ return old_val == val;
+}
+
+/**
+ * Atomically compares the old value with the value at ptr, and if they match,
+ * stores new_val to ptr.
+ * If *ptr and old don't match, function returns failure immediately.
+ * If *ptr and old match, function spins until *ptr updated to new atomically, or
+ * until *ptr and old no longer match
+ *
+ * Does memory synchronization that is required to use this as a locking primitive.
+ *
+ * @return 1 on success (match and store)
+ * 0 on no match
+ */
+static inline int bdk_atomic_compare_and_store64(uint64_t *ptr, uint64_t old_val, uint64_t new_val) __attribute__((always_inline));
+static inline int bdk_atomic_compare_and_store64(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
+{
+ uint32_t val = old_val;
+
+ /* CN88XX pass 1.x has errata AP-22500: GlobalSync request during a multi-cycle ATOMIC stalls forever
+ Don't use compare and swap on these chips */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ {
+ asm volatile ("1: ldaxr %x[v], [%[b]] \n"
+ " cmp %x[v], %x[o] \n"
+ " b.ne 2f \n"
+ " stlxr %x[v], %x[n], [%[b]]\n" /* Returns zero on success */
+ " cbnz %x[v], 1b \n"
+ " mov %x[v], %x[o] \n"
+ "2: \n"
+ : [mem] "+m" (*ptr), [v] "=&r" (val)
+ : [b] "r" (ptr), [n] "r" (new_val), [o] "r" (old_val)
+ : );
+ }
+ else
+ {
+ asm volatile ("casal %x[o], %x[n], [%[b]]"
+ : [mem] "+m" (*ptr), [o] "+r" (val)
+ : [b] "r" (ptr), [n] "r" (new_val)
+ : );
+ }
+ return old_val == val;
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int64_t bdk_atomic_fetch_and_add64_nosync(int64_t *ptr, int64_t incr)
+{
+ int64_t result;
+ /* Atomic add with no ordering */
+ asm volatile ("ldadd %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+/**
+ * Atomically adds a signed value to a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int64_t bdk_atomic_fetch_and_add64(int64_t *ptr, int64_t incr)
+{
+ int64_t result;
+ /* Atomic add with acquire/release */
+ asm volatile ("ldaddal %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints. (This should NOT be used for reference counting -
+ * use the standard version instead.)
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int32_t bdk_atomic_fetch_and_add32_nosync(int32_t *ptr, int32_t incr)
+{
+ int32_t result;
+ /* Atomic add with no ordering */
+ asm volatile ("ldadd %w[i], %w[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+/**
+ * Atomically adds a signed value to a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * Memory access ordering is enforced before/after the atomic operation,
+ * so no additional 'sync' instructions are required.
+ *
+ * @param ptr address in memory to add incr to
+ * @param incr amount to increment memory location by (signed)
+ *
+ * @return Value of memory location before increment
+ */
+static inline int32_t bdk_atomic_fetch_and_add32(int32_t *ptr, int32_t incr)
+{
+ int32_t result;
+ /* Atomic add with acquire/release */
+ asm volatile ("ldaddal %w[i], %w[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (incr), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+/**
+ * Atomically set bits in a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to set
+ *
+ * @return Value of memory location before setting bits
+ */
+static inline uint64_t bdk_atomic_fetch_and_bset64_nosync(uint64_t *ptr, uint64_t mask)
+{
+ uint64_t result;
+ /* Atomic or with no ordering */
+ asm volatile ("ldset %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (mask), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+/**
+ * Atomically set bits in a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to set
+ *
+ * @return Value of memory location before setting bits
+ */
+static inline uint32_t bdk_atomic_fetch_and_bset32_nosync(uint32_t *ptr, uint32_t mask)
+{
+ uint32_t result;
+ /* Atomic or with no ordering */
+ asm volatile ("ldset %w[i], %w[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (mask), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+/**
+ * Atomically clear bits in a 64 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to clear
+ *
+ * @return Value of memory location before clearing bits
+ */
+static inline uint64_t bdk_atomic_fetch_and_bclr64_nosync(uint64_t *ptr, uint64_t mask)
+{
+ uint64_t result;
+ /* Atomic and with no ordering */
+ asm volatile ("ldclr %x[i], %x[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (mask), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+/**
+ * Atomically clear bits in a 32 bit (aligned) memory location,
+ * and returns previous value.
+ *
+ * This version does not perform 'sync' operations to enforce memory
+ * operations. This should only be used when there are no memory operation
+ * ordering constraints.
+ *
+ * @param ptr address in memory
+ * @param mask mask of bits to clear
+ *
+ * @return Value of memory location before clearing bits
+ */
+static inline uint32_t bdk_atomic_fetch_and_bclr32_nosync(uint32_t *ptr, uint32_t mask)
+{
+ uint32_t result;
+ /* Atomic and with no ordering */
+ asm volatile ("ldclr %w[i], %w[r], [%[b]]"
+ : [r] "=r" (result), "+m" (*ptr)
+ : [i] "r" (mask), [b] "r" (ptr)
+ : "memory");
+ return result;
+}
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h
new file mode 100644
index 0000000000..d0d117c590
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h
@@ -0,0 +1,105 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to Core, IO and DDR Clock.
+ *
+ * <hr>$Revision: 45089 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+#define BDK_GTI_RATE 100000000ull
+
+/**
+ * Enumeration of different Clocks.
+ */
+typedef enum{
+ BDK_CLOCK_TIME, /**< Clock for telling time with fast access. Uses GTI in core */
+ BDK_CLOCK_MAIN_REF, /**< Main reference clock */
+ BDK_CLOCK_RCLK, /**< Clock used by cores, coherent bus and L2 cache. */
+ BDK_CLOCK_SCLK, /**< Clock used by IO blocks. */
+} bdk_clock_t;
+
+/**
+ * Called in __bdk_init to setup the global timer
+ */
+extern void bdk_clock_setup();
+
+/**
+ * Get cycle count based on the clock type.
+ *
+ * @param clock - Enumeration of the clock type.
+ * @return - Get the number of cycles executed so far.
+ */
+static inline uint64_t bdk_clock_get_count(bdk_clock_t clock) __attribute__ ((always_inline));
+static inline uint64_t bdk_clock_get_count(bdk_clock_t clock)
+{
+ extern uint64_t __bdk_clock_get_count_slow(bdk_clock_t clock);
+ if (clock == BDK_CLOCK_TIME)
+ {
+ uint64_t clk;
+ BDK_MRS(CNTPCT_EL0, clk);
+ return clk;
+ }
+ else
+ return __bdk_clock_get_count_slow(clock);
+}
+
+/**
+ * Get clock rate based on the clock type.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special value.
+ * @param clock - Enumeration of the clock type.
+ * @return - return the clock rate.
+ */
+static inline uint64_t bdk_clock_get_rate(bdk_node_t node, bdk_clock_t clock) __attribute__ ((always_inline, pure));
+static inline uint64_t bdk_clock_get_rate(bdk_node_t node, bdk_clock_t clock)
+{
+ extern uint64_t __bdk_clock_get_rate_slow(bdk_node_t node, bdk_clock_t clock) __attribute__ ((pure));
+ if (clock == BDK_CLOCK_TIME)
+ return BDK_GTI_RATE; /* Programed as part of setup */
+ else
+ return __bdk_clock_get_rate_slow(node, clock);
+}
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h
new file mode 100644
index 0000000000..6848fd687c
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h
@@ -0,0 +1,357 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for controlling the system configuration.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+#define BDK_CONFIG_MANUFACTURING_ADDRESS 0xff0000 /* 16MB - 64KB */
+
+typedef enum
+{
+ /* Board manufacturing data */
+ BDK_CONFIG_BOARD_MODEL, /* No parameters */
+ BDK_CONFIG_BOARD_REVISION, /* No parameters */
+ BDK_CONFIG_BOARD_SERIAL, /* No parameters */
+ BDK_CONFIG_MAC_ADDRESS, /* No parameters */
+ BDK_CONFIG_MAC_ADDRESS_NUM, /* No parameters */
+ BDK_CONFIG_MAC_ADDRESS_NUM_OVERRIDE, /* No parameters */
+
+ /* Board generic */
+ BDK_CONFIG_BMC_TWSI, /* No parameters */
+ BDK_CONFIG_WATCHDOG_TIMEOUT, /* No parameters */
+ BDK_CONFIG_TWSI_WRITE, /* Parameters: Write number */
+ BDK_CONFIG_MDIO_WRITE, /* Parameters: Write number */
+
+ /* Board wiring of network ports and PHYs */
+ BDK_CONFIG_PHY_ADDRESS, /* Parameters: Node, Interface, Port */
+ BDK_CONFIG_BGX_ENABLE, /* Parameters: Node, BGX, Port */
+ /* Non-EBB specific SFF8104 board and alike */
+ BDK_CONFIG_AQUANTIA_PHY, /* Parameters: Node, BGX, Port */
+
+ /* BDK Configuration params */
+ BDK_CONFIG_VERSION,
+ BDK_CONFIG_NUM_PACKET_BUFFERS,
+ BDK_CONFIG_PACKET_BUFFER_SIZE,
+ BDK_CONFIG_SHOW_LINK_STATUS,
+ BDK_CONFIG_COREMASK,
+ BDK_CONFIG_BOOT_MENU_TIMEOUT,
+ BDK_CONFIG_BOOT_PATH_OPTION,
+ BDK_CONFIG_BOOT_NEXT_STAGE,
+ BDK_CONFIG_TRACE,
+
+ /* Chip feature items */
+ BDK_CONFIG_MULTI_NODE, /* No parameters */
+ BDK_CONFIG_PCIE_EA, /* No parameters */
+ BDK_CONFIG_PCIE_ORDERING, /* No parameters */
+ BDK_CONFIG_PCIE_PRESET_REQUEST_VECTOR, /* Parameters: Node, Port */
+ BDK_CONFIG_PCIE_WIDTH, /* Parameters: Node, Port */
+ BDK_CONFIG_PCIE_PHYSICAL_SLOT, /* Parameters: Node, Port */
+ BDK_CONFIG_PCIE_FLASH, /* Parameters: Node, Port */
+ BDK_CONFIG_CCPI_LANE_REVERSE, /* No parameters */
+ BDK_CONFIG_CHIP_SKU, /* Parameter: Node */
+ BDK_CONFIG_CHIP_SERIAL, /* Parameter: Node */
+ BDK_CONFIG_CHIP_UNIQUE_ID, /* Parameter: Node */
+
+ /* QLM related config */
+ BDK_CONFIG_QLM_AUTO_CONFIG, /* Parameters: Node */
+ /* SFF8104 related QLM config */
+ BDK_CONFIG_QLM_DIP_AUTO_CONFIG, /* Parameters: Node */
+ BDK_CONFIG_QLM_MODE, /* Parameters: Node, QLM */
+ BDK_CONFIG_QLM_FREQ, /* Parameters: Node, QLM */
+ BDK_CONFIG_QLM_CLK, /* Parameters: Node, QLM */
+ BDK_CONFIG_QLM_TUNING_TX_SWING, /* Parameters: Node, QLM, Lane */
+ BDK_CONFIG_QLM_TUNING_TX_PREMPTAP, /* Parameters: Node, QLM, Lane */
+ BDK_CONFIG_QLM_TUNING_TX_GAIN, /* Parameters: Node, QLM, Lane */
+ BDK_CONFIG_QLM_TUNING_TX_VBOOST, /* Parameters: Node, QLM, Lane */
+ BDK_CONFIG_QLM_CHANNEL_LOSS, /* Parameters: Node, QLM */
+
+ /* DRAM configuration options */
+ BDK_CONFIG_DDR_SPEED, /* Parameters: Node */
+ BDK_CONFIG_DDR_ALT_REFCLK, /* Parameters: Node */
+ BDK_CONFIG_DDR_SPD_ADDR, /* Parameters: DIMM, LMC, Node */
+ BDK_CONFIG_DDR_SPD_DATA, /* Parameters: DIMM, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_DQX_CTL, /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_WODT_MASK, /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE1_PASR, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE1_ASR, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE1_SRT, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE1_RTT_WR, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE1_DIC, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE1_RTT_NOM, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE1_DB_OUTPUT_IMPEDANCE, /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE2_RTT_PARK, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE2_VREF_VALUE, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE2_VREF_RANGE, /* Parameters: Num Ranks, Num DIMMs, Rank, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_MODE2_VREFDQ_TRAIN_EN, /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_RODT_CTL, /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ BDK_CONFIG_DDR_RANKS_RODT_MASK, /* Parameters: Num Ranks, Num DIMMs, LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_MIN_RTT_NOM_IDX, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_MAX_RTT_NOM_IDX, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_MIN_RODT_CTL, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_MAX_RODT_CTL, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_CK_CTL, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_CMD_CTL, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_CTL_CTL, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_MIN_CAS_LATENCY, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_OFFSET_EN, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_OFFSET, /* Parameters: Type(UDIMM,RDIMM), LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_RLEVEL_COMPUTE, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_RLEVEL_COMP_OFFSET, /* Parameters: Type(UDIMM,RDIMM), LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_DDR2T, /* Parameters: Type(UDIMM,RDIMM), LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_DISABLE_SEQUENTIAL_DELAY_CHECK, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_MAXIMUM_ADJACENT_RLEVEL_DELAY_INCREMENT, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_PARITY, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_FPRCH2, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_MODE32B, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_MEASURED_VREF, /* Parameters: LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_DLL_WRITE_OFFSET, /* Parameters: Byte, LMC, Node */
+ BDK_CONFIG_DDR_CUSTOM_DLL_READ_OFFSET, /* Parameters: Byte, LMC, Node */
+
+ /* High level DRAM options */
+ BDK_CONFIG_DRAM_VERBOSE, /* Parameters: Node */
+ BDK_CONFIG_DRAM_BOOT_TEST, /* Parameters: Node */
+ BDK_CONFIG_DRAM_CONFIG_GPIO, /* No parameters */
+ BDK_CONFIG_DRAM_SCRAMBLE, /* No parameters */
+
+ /* USB */
+ BDK_CONFIG_USB_PWR_GPIO, /* Parameters: Node, Port */
+ BDK_CONFIG_USB_PWR_GPIO_POLARITY, /* Parameters: Node, Port */
+ BDK_CONFIG_USB_REFCLK_SRC, /* Parameters: Node, Port */
+
+ /* Nitrox reset - For CN88XX SC and SNT part. High drives Nitrox DC_OK high */
+ BDK_CONFIG_NITROX_GPIO, /* Parameters: Node */
+
+ /* How EYE diagrams are captured from a QLM */
+ BDK_CONFIG_EYE_ZEROS, /* No parameters */
+ BDK_CONFIG_EYE_SAMPLE_TIME, /* No parameters */
+ BDK_CONFIG_EYE_SETTLE_TIME, /* No parameters */
+
+ /* SGPIO */
+ BDK_CONFIG_SGPIO_SCLOCK_FREQ, /* Parameters: Node */
+ BDK_CONFIG_SGPIO_PIN_POWER, /* Parameters: Node */
+ BDK_CONFIG_SGPIO_PIN_SCLOCK, /* Parameters: Node */
+ BDK_CONFIG_SGPIO_PIN_SLOAD, /* Parameters: Node */
+ BDK_CONFIG_SGPIO_PIN_SDATAOUT, /* Parameters: Node, Dataline */
+
+ /* VRM temperature throttling */
+ BDK_CONFIG_VRM_TEMP_TRIP, /* Parameters: Node */
+ BDK_CONFIG_VRM_TEMP_HIGH, /* Parameters: Node */
+ BDK_CONFIG_VRM_TEMP_LOW, /* Parameters: Node */
+ BDK_CONFIG_VRM_THROTTLE_NORMAL, /* Parameters: Node */
+ BDK_CONFIG_VRM_THROTTLE_THERM, /* Parameters: Node */
+
+ /* Generic GPIO, unrelated to a specific block */
+ BDK_CONFIG_GPIO_PIN_SELECT, /* Parameters: GPIO, Node */
+ BDK_CONFIG_GPIO_POLARITY, /* Parameters: GPIO, Node */
+
+ /* PBUS */
+ BDK_CONFIG_PBUS_CFG, /* Parameters: Region, Node */
+ BDK_CONFIG_PBUS_TIM, /* Parameters: Region, Node */
+
+ /* Trusted boot information */
+ BDK_CONFIG_TRUST_CSIB, /* No parameters */
+ BDK_CONFIG_TRUST_ROT_ADDR, /* No parameters */
+ BDK_CONFIG_TRUST_BSSK_ADDR, /* No parameters */
+
+ __BDK_CONFIG_END
+} bdk_config_t;
+
+/**
+ * Internal BDK function to initialize the config system. Must be called before
+ * any configuration functions are called
+ */
+extern void __bdk_config_init(void);
+
+/**
+ * Return a help string for the given configuration parameter
+ *
+ * @param cfg_item Configuration parameter to get help for
+ *
+ * @return Help string for the user
+ */
+extern const char *bdk_config_get_help(bdk_config_t cfg_item);
+
+/**
+ * Get an integer configuration item
+ *
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ *
+ * @return The value of the configuration item, or def_value if the item is not set
+ */
+extern int64_t bdk_config_get_int(bdk_config_t cfg_item, ...);
+
+/**
+ * Get a string configuration item
+ *
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ *
+ * @return The value of the configuration item, or def_value if the item is not set
+ */
+extern const char *bdk_config_get_str(bdk_config_t cfg_item, ...);
+
+/**
+ * Get a binary blob
+ *
+ * @param blob_size Integer to receive the size of the blob
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ *
+ * @return The value of the configuration item, or def_value if the item is not set
+ */
+extern const void *bdk_config_get_blob(int *blob_size, bdk_config_t cfg_item, ...);
+
+/**
+ * Set an integer configuration item. Note this only sets the item in memory,
+ * persistent storage is not updated. The optional parameters for the setting are
+ * not supplied, meaning this function only changes the global default.
+ *
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+extern void bdk_config_set_int_no_param(int64_t value, bdk_config_t cfg_item);
+
+/**
+ * Set an integer configuration item. Note this only sets the item in memory,
+ * persistent storage is not updated.
+ *
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+extern void bdk_config_set_int(int64_t value, bdk_config_t cfg_item, ...);
+
+/**
+ * Set an integer configuration item. Note this only sets the item in memory,
+ * persistent storage is not updated.
+ *
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+extern void bdk_config_set_str(const char *value, bdk_config_t cfg_item, ...);
+
+/**
+ * Set a blob configuration item. Note this only sets the
+ * item in memory, persistent storage is not updated. The optional
+ * parameters for the setting are not supplied, meaning this function
+ * only changes the global default.
+ *
+ * @param size Size of the item in bytes. A size of zero removes the device tree field
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+extern void bdk_config_set_blob_no_param(int size, const void *value, bdk_config_t cfg_item);
+
+/**
+ * Set a blob configuration item. Note this only sets the
+ * item in memory, persistent storage is not updated.
+ *
+ * @param size Size of the item in bytes. A size of zero removes the device tree field
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+extern void bdk_config_set_blob(int size, const void *value, bdk_config_t cfg_item, ...);
+
+/**
+ * Display the active configuration
+ */
+extern void bdk_config_show(void);
+
+/**
+ * Display a list of all posssible config items with help text
+ */
+extern void bdk_config_help(void);
+
+/**
+ * Save the current configuration to flash
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_config_save(void);
+
+/**
+ * Takes the current live device tree and exports it to a memory address suitable
+ * for passing to the next binary in register X1.
+ *
+ * @return Physical address of the device tree, or 0 on failure
+ */
+extern uint64_t __bdk_config_export_to_mem(void);
+
+/**
+ * Return a pointer to the device tree used for configuration
+ *
+ * @return FDT or NULL on failure
+ */
+extern void* bdk_config_get_fdt(void);
+
+/**
+ * Set the device tree used for configuration
+ *
+ * @param fdt Device tree to use. Memory is assumed to be from malloc() and bdk_config takes
+ * over ownership on success
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_config_set_fdt(void *fdt);
+
+/**
+ * Write all default values to a FDT. Missing config items get defaults in the
+ * BDK config, this function adds those defaults to the FDT. This way other code
+ * gets the default value without needing special code.
+ *
+ * @param fdt FDT structure to fill defaults into
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_config_expand_defaults(void *fdt);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-crc.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-crc.h
new file mode 100644
index 0000000000..05fc59a378
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-crc.h
@@ -0,0 +1,53 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Module to support CRC.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+uint32_t bdk_crc32(const void *ptr, int len, uint32_t iv);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h
new file mode 100644
index 0000000000..3f90d11dc2
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h
@@ -0,0 +1,97 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the ECAMs.
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Read from an ECAM
+ *
+ * @param device Device to read from
+ * @param reg Register to read
+ *
+ * @return Result of the read of -1 on failure
+ */
+extern uint32_t bdk_ecam_read32(const bdk_device_t *device, int reg);
+
+/**
+ * Write to an ECAM register
+ *
+ * @param device Device to write to
+ * @param reg Register to write
+ * @param value Value to write
+ */
+extern void bdk_ecam_write32(const bdk_device_t *device, int reg, uint32_t value);
+
+/**
+ * Build an ECAM config space request address for a device
+ *
+ * @param device Device being accessed
+ * @param reg Register to access
+ *
+ * @return 64bit IO address
+ */
+extern uint64_t __bdk_ecam_build_address(const bdk_device_t *device, int reg);
+
+/**
+ * Return the number of internal ECAMS on a node.
+ *
+ * @param node Node to query
+ *
+ * @return Number of ECAMs available
+ */
+extern int bdk_ecam_get_num(bdk_node_t node);
+
+/**
+ * Scan all ECAMs for devices and add them to bdk-device
+ *
+ * @param node Node to scan
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_ecam_scan_all(bdk_node_t node) BDK_WEAK;
+
+/** @} */
+
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-error-report.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-error-report.h
new file mode 100644
index 0000000000..520780fa0a
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-error-report.h
@@ -0,0 +1,62 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to hardware error reporting.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Call this function to check if any error interrupts are
+ * set in the chip.
+ */
+extern void (*bdk_error_check)(bdk_node_t node) BDK_WEAK;
+
+/**
+ * Call this function to setup error enables.
+ */
+extern void bdk_error_enable(bdk_node_t node) BDK_WEAK;
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-fpa.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-fpa.h
new file mode 100644
index 0000000000..8094cd0fa0
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-fpa.h
@@ -0,0 +1,162 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the hardware Free Pool Allocator.
+ *
+ * @addtogroup hal
+ * @{
+ */
+#define BDK_FPA_NUM_AURAS 128 /* Must not be bigger than FPA_CONST[auras] or NPA_AF_LFX_AURAS_CFG[loc_aura_size] */
+
+/**
+ * This enum represents the FPA hardware pools in use by the BDK
+ */
+typedef enum
+{
+ BDK_FPA_PACKET_POOL, /* Stores PKI packet buffers */
+ BDK_FPA_SSO_POOL, /* Used for internal storage in the SSO, CN83XX and CN9XXX */
+ BDK_FPA_PKO_POOL, /* Used for queue storage in the CN83XX PKO and CN9XXX SQB */
+ BDK_FPA_NUM_POOLS = 16 /* Can be 16, or 32 for CN83XX. Must not be bigger than FPA_CONST[pools] */
+} bdk_fpa_pool_t;
+
+/**
+ * Structure representing the global state of the FPA
+ */
+typedef struct
+{
+ uint16_t buffer_size_pool[BDK_FPA_NUM_POOLS];
+ uint16_t buffer_size_aura[BDK_FPA_NUM_AURAS];
+ int next_free_aura;
+ int next_free_lf; /* Used on CN9XXX for RVU PF allocation */
+ void *npa_auras_ptr; /* Pointer to Aura Context Table: BDK_FPA_NUM_AURAS * (Aura HW context) */
+ void *npa_pools_ptr; /* Pointer to Pool Context Table: BDK_FPA_NUM_POOLS * (Pool HW context) */
+} __bdk_fpa_state_t;
+
+extern __bdk_fpa_state_t __bdk_fpa_node_state[BDK_NUMA_MAX_NODES];
+
+/**
+ * Get a new block from an aura
+ *
+ * @param node Node to use in a Numa setup. See bdk-numa.h
+ * @param aura Aura to get the block from (0-BDK_FPA_NUM_AURAS)
+ *
+ * @return Pointer to the block or NULL on failure
+ */
+extern void* (*bdk_fpa_alloc)(bdk_node_t node, int aura);
+
+/**
+ * Free a block allocated with bdk_fpa_alloc(). Does NOT provide memory ordering
+ * for core stores. Software must insure all pending writes are flushed before
+ * calling this function.
+ *
+ * @param node Node to use in a Numa setup. See bdk-numa.h
+ * @param address Physical address to free to the FPA aura
+ * @param aura Aura number to free to (0-BDK_FPA_NUM_AURAS)
+ * @param num_cache_lines
+ * Cache lines to invalidate. Use this if the data in the buffer is no longer
+ * requires cache coherency. Normally best to set this to zero.
+ */
+extern void (*__bdk_fpa_raw_free)(bdk_node_t node, uint64_t address, int aura, int num_cache_lines);
+
+/**
+ * Fill a pool with buffers
+ *
+ * @param node Node to use in a Numa setup. See bdk-numa.h
+ * @param pool Pool to initialize (0 <= pool < BDK_FPA_NUM_POOLS)
+ * @param num_blocks Number of blocks
+ *
+ * @return Zero on Success, negative on failure
+ */
+extern int (*bdk_fpa_fill_pool)(bdk_node_t node, bdk_fpa_pool_t pool, int num_blocks);
+
+/**
+ * Initialize an Aura for a specific pool
+ *
+ * @param node Node to use in a Numa setup. See bdk-numa.h
+ * @param aura Aura to initialize, or -1 to dynamically allocate a free aura
+ * @param pool Pool this aura is for (0 <= pool < BDK_FPA_NUM_POOLS)
+ * @param num_blocks Number of buffers to allow this aura to contain. This may be different
+ * from the pool
+ *
+ * @return Aura number or negative on failure
+ */
+extern int (*bdk_fpa_init_aura)(bdk_node_t node, int aura, bdk_fpa_pool_t pool, int num_blocks);
+
+/**
+ * Free a block allocated with bdk_fpa_alloc(). Provides memory ordering
+ * for core stores.
+ *
+ * @param node Node to use in a Numa setup. See bdk-numa.h
+ * @param ptr Pointer to the block to free
+ * @param aura Aura number to free to (0-BDK_FPA_NUM_AURAS)
+ * @param num_cache_lines
+ * Cache lines to invalidate. Use this if the data in the buffer is no longer
+ * requires cache coherency. Normally best to set this to zero.
+ */
+static inline void bdk_fpa_free(bdk_node_t node, void *ptr, int aura, int num_cache_lines)
+{
+ BDK_WMB;
+ __bdk_fpa_raw_free(node, bdk_ptr_to_phys(ptr), aura, num_cache_lines);
+}
+
+/**
+ * Get the size of blocks controlled by the aura
+ *
+ * @param node Node to use in a Numa setup. See bdk-numa.h
+ * @param aura Aura number to access (0-BDK_FPA_NUM_AURAS)
+ *
+ * @return Size of the block in bytes
+ */
+static inline int bdk_fpa_get_block_size(bdk_node_t node, int aura)
+{
+ __bdk_fpa_state_t *fpa_state = &__bdk_fpa_node_state[node];
+ return fpa_state->buffer_size_aura[aura];
+}
+
+/**
+ * Global FPA initialization
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_fpa_init(bdk_node_t node);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h
new file mode 100644
index 0000000000..8be3a4c1ad
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h
@@ -0,0 +1,111 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * General Purpose IO interface.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+
+/**
+ * Initialize a single GPIO as either an input or output. If it is
+ * an output, also set its output value.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param gpio GPIO to initialize
+ * @param is_output Non zero if this GPIO should be an output
+ * @param output_value
+ * Value of the GPIO if it should be an output. Not used if the
+ * GPIO isn't an output.
+ *
+ * @return Zero on success, negative ob failure
+ */
+extern int bdk_gpio_initialize(bdk_node_t node, int gpio, int is_output, int output_value);
+
+/**
+ * GPIO Read Data
+ *
+ * @param node Node GPIO block is on
+ * @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
+ *
+ * @return Status of the GPIO pins for the given block
+ */
+extern uint64_t bdk_gpio_read(bdk_node_t node, int gpio_block);
+
+/**
+ * GPIO Clear pin
+ *
+ * @param node Node GPIO block is on
+ * @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
+ * @param clear_mask Bit mask to indicate which bits to drive to '0'.
+ */
+extern void bdk_gpio_clear(bdk_node_t node, int gpio_block, uint64_t clear_mask);
+
+/**
+ * GPIO Set pin
+ *
+ * @param node Node GPIO block is on
+ * @param gpio_block GPIO block to access. Each block contains up to 64 GPIOs
+ * @param set_mask Bit mask to indicate which bits to drive to '1'.
+ */
+extern void bdk_gpio_set(bdk_node_t node, int gpio_block, uint64_t set_mask);
+
+/** GPIO Select pin
+ *
+ * @param node CPU node
+ * @param gpio GPIO number
+ * @param pin Pin number
+ */
+extern void bdk_gpio_select_pin(bdk_node_t node, int gpio, int pin);
+
+/**
+ * Return the number of GPIO pins on this chip
+ *
+ * @return Number of GPIO pins
+ */
+extern int bdk_gpio_get_num(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-hal.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-hal.h
new file mode 100644
index 0000000000..458ffede7c
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-hal.h
@@ -0,0 +1,98 @@
+#ifndef __BDK_HAL_H__
+#define __BDK_HAL_H__
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Master include file for hardware support. Use bdk.h instead
+ * of including this file directly.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @defgroup hal Hardware abstraction layer
+ */
+
+/* Global define to control if the BDK configures units to send
+ don't write back requests for freed buffers. Set to 1 to enable
+ DWB, 0 to disable them. As the BDK normally fits inside L2, sending
+ DWB just causes more L2 operations without benefit */
+#define BDK_USE_DWB 0
+
+#include "bdk-access.h"
+#include "bdk-utils.h"
+#include "bdk-config.h"
+#include "bdk-atomic.h"
+#include "bdk-spinlock.h"
+#include "bdk-rvu.h"
+#include "bdk-clock.h"
+#include "bdk-crc.h"
+#include "bdk-error-report.h"
+#include "bdk-gpio.h"
+#include "device/bdk-device.h"
+#include "if/bdk-if.h"
+#include "usb/bdk-usb-xhci-intf.h"
+#include "bdk-ecam.h"
+#include "bdk-fpa.h"
+#include "bdk-pbus-flash.h"
+#include "bdk-pki.h"
+#include "bdk-pko.h"
+#include "bdk-power-burn.h"
+#include "bdk-sso.h"
+#include "bdk-nic.h"
+#include "bdk-nix.h"
+#include "bdk-key.h"
+#include "bdk-l2c.h"
+#include "bdk-mdio.h"
+#include "bdk-mpi.h"
+#include "bdk-mmc.h"
+#include "bdk-pcie.h"
+#include "bdk-pcie-flash.h"
+#include "bdk-qlm.h"
+#include "qlm/bdk-qlm-errata-cn8xxx.h"
+#include "bdk-rng.h"
+#include "bdk-sata.h"
+#include "bdk-twsi.h"
+#include "bdk-usb.h"
+#include "bdk-access-native.h"
+#include "bdk-tns.h"
+#include "bdk-vrm.h"
+#include "aq_api/bdk-aqr-support.h"
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-key.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-key.h
new file mode 100644
index 0000000000..c16bfd7559
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-key.h
@@ -0,0 +1,86 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the on chip key memory. Key memory is
+ * 8k on chip that is inaccessible from off chip. It can
+ * also be cleared using an external hardware pin.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+#define BDK_KEY_MEM_SIZE 8192 /* Size in bytes */
+
+/**
+ * Read from KEY memory
+ *
+ * @param node Which node to use
+ * @param address Address (byte) in key memory to read
+ * 0 <= address < BDK_KEY_MEM_SIZE
+ * @return Value from key memory
+ */
+extern uint64_t bdk_key_read(bdk_node_t node, uint64_t address);
+
+/**
+ * Write to KEY memory
+ *
+ * @param node Which node to use
+ * @param address Address (byte) in key memory to write
+ * 0 <= address < BDK_KEY_MEM_SIZE
+ * @param value Value to write to key memory
+ */
+extern void bdk_key_write(bdk_node_t node, uint64_t address, uint64_t value);
+
+/**
+ * Allocate an area in key memory for storing data. Return a pointer to the
+ * memory on success.
+ *
+ * @param node Node to allocate on
+ * @param size_bytes Number of bytes to allocate
+ *
+ * @return Pointer to key memory, or NULL on failure
+ */
+extern void* bdk_key_alloc(bdk_node_t node, int size_bytes);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h
new file mode 100644
index 0000000000..cf14357f83
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h
@@ -0,0 +1,179 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the Level 2 Cache (L2C) control, measurement, and debugging
+ * facilities.
+ *
+ * <hr>$Revision: 50663 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Perform one time initialization of L2 for improved
+ * performance. This can be called after L2 is in use.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int bdk_l2c_initialize(bdk_node_t node);
+
+/**
+ * Return the L2 Cache way partitioning for a given core.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param core The core processor of interest.
+ *
+ * @return The mask specifying the partitioning. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int bdk_l2c_get_core_way_partition(bdk_node_t node, int core);
+
+/**
+ * Partitions the L2 cache for a core
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param core The core that the partitioning applies to.
+ * @param mask The partitioning of the ways expressed as a binary mask. A 0 bit allows the core
+ * to evict cache lines from a way, while a 1 bit blocks the core from evicting any lines
+ * from that way. There must be at least one allowed way (0 bit) in the mask.
+ *
+ * @note If any ways are blocked for all cores and the HW blocks, then those ways will never have
+ * any cache lines evicted from them. All cores and the hardware blocks are free to read from
+ * all ways regardless of the partitioning.
+ */
+int bdk_l2c_set_core_way_partition(bdk_node_t node, int core, uint32_t mask);
+
+/**
+ * Return the L2 Cache way partitioning for the hw blocks.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special value.
+ * @return The mask specifying the reserved way. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int bdk_l2c_get_hw_way_partition(bdk_node_t node);
+
+/**
+ * Partitions the L2 cache for the hardware blocks.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param mask The partitioning of the ways expressed as a binary mask. A 0 bit allows the core
+ * to evict cache lines from a way, while a 1 bit blocks the core from evicting any lines
+ * from that way. There must be at least one allowed way (0 bit) in the mask.
+ *
+ * @note If any ways are blocked for all cores and the HW blocks, then those ways will never have
+ * any cache lines evicted from them. All cores and the hardware blocks are free to read from
+ * all ways regardless of the partitioning.
+ */
+int bdk_l2c_set_hw_way_partition(bdk_node_t node, uint32_t mask);
+
+/**
+ * Locks a specified memory region in the L2 cache.
+ *
+ * Note that if not all lines can be locked, that means that all
+ * but one of the ways (associations) available to the locking
+ * core are locked. Having only 1 association available for
+ * normal caching may have a significant adverse affect on performance.
+ * Care should be taken to ensure that enough of the L2 cache is left
+ * unlocked to allow for normal caching of DRAM.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param start Physical address of the start of the region to lock
+ * @param len Length (in bytes) of region to lock
+ *
+ * @return Number of requested lines that where not locked.
+ * 0 on success (all locked)
+ */
+int bdk_l2c_lock_mem_region(bdk_node_t node, uint64_t start, uint64_t len);
+
+/**
+ * Unlocks a region of memory that is locked in the L2 cache
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param start start physical address
+ * @param len length (in bytes) to unlock
+ *
+ * @return Number of locked lines that the call unlocked
+ */
+int bdk_l2c_unlock_mem_region(bdk_node_t node, uint64_t start, uint64_t len);
+
+/**
+ * Flushes (and unlocks) the entire L2 cache.
+ */
+void bdk_l2c_flush(bdk_node_t node);
+
+/**
+ *
+ * @return Returns the size of the L2 cache in bytes,
+ * -1 on error (unrecognized model)
+ */
+int bdk_l2c_get_cache_size_bytes(bdk_node_t node);
+
+/**
+ * Return the number of sets in the L2 Cache
+ *
+ * @return
+ */
+int bdk_l2c_get_num_sets(bdk_node_t node);
+
+/**
+ * Return the number of associations in the L2 Cache
+ *
+ * @return
+ */
+int bdk_l2c_get_num_assoc(bdk_node_t node);
+
+/**
+ * Return true if the BDK has locked itself in L2
+ *
+ * @return
+ */
+int bdk_l2c_is_locked(bdk_node_t node);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h
new file mode 100644
index 0000000000..889f8d5d56
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h
@@ -0,0 +1,476 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
+ * clause 22 and clause 45 operations.
+ *
+ * <hr>$Revision: 51350 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * PHY register 0 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_CONTROL 0
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t reset : 1;
+ uint16_t loopback : 1;
+ uint16_t speed_lsb : 1;
+ uint16_t autoneg_enable : 1;
+ uint16_t power_down : 1;
+ uint16_t isolate : 1;
+ uint16_t restart_autoneg : 1;
+ uint16_t duplex : 1;
+ uint16_t collision_test : 1;
+ uint16_t speed_msb : 1;
+ uint16_t unidirectional_enable : 1;
+ uint16_t reserved_0_4 : 5;
+#else
+ uint16_t reserved_0_4 : 5;
+ uint16_t unidirectional_enable : 1;
+ uint16_t speed_msb : 1;
+ uint16_t collision_test : 1;
+ uint16_t duplex : 1;
+ uint16_t restart_autoneg : 1;
+ uint16_t isolate : 1;
+ uint16_t power_down : 1;
+ uint16_t autoneg_enable : 1;
+ uint16_t speed_lsb : 1;
+ uint16_t loopback : 1;
+ uint16_t reset : 1;
+#endif
+ } s;
+} bdk_mdio_phy_reg_control_t;
+
+/**
+ * PHY register 1 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_STATUS 1
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t capable_100base_t4 : 1;
+ uint16_t capable_100base_x_full : 1;
+ uint16_t capable_100base_x_half : 1;
+ uint16_t capable_10_full : 1;
+ uint16_t capable_10_half : 1;
+ uint16_t capable_100base_t2_full : 1;
+ uint16_t capable_100base_t2_half : 1;
+ uint16_t capable_extended_status : 1;
+ uint16_t capable_unidirectional : 1;
+ uint16_t capable_mf_preamble_suppression : 1;
+ uint16_t autoneg_complete : 1;
+ uint16_t remote_fault : 1;
+ uint16_t capable_autoneg : 1;
+ uint16_t link_status : 1;
+ uint16_t jabber_detect : 1;
+ uint16_t capable_extended_registers : 1;
+#else
+ uint16_t capable_extended_registers : 1;
+ uint16_t jabber_detect : 1;
+ uint16_t link_status : 1;
+ uint16_t capable_autoneg : 1;
+ uint16_t remote_fault : 1;
+ uint16_t autoneg_complete : 1;
+ uint16_t capable_mf_preamble_suppression : 1;
+ uint16_t capable_unidirectional : 1;
+ uint16_t capable_extended_status : 1;
+ uint16_t capable_100base_t2_half : 1;
+ uint16_t capable_100base_t2_full : 1;
+ uint16_t capable_10_half : 1;
+ uint16_t capable_10_full : 1;
+ uint16_t capable_100base_x_half : 1;
+ uint16_t capable_100base_x_full : 1;
+ uint16_t capable_100base_t4 : 1;
+#endif
+ } s;
+} bdk_mdio_phy_reg_status_t;
+
+/**
+ * PHY register 2 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_ID1 2
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t oui_bits_3_18;
+ } s;
+} bdk_mdio_phy_reg_id1_t;
+
+/**
+ * PHY register 3 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_ID2 3
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t oui_bits_19_24 : 6;
+ uint16_t model : 6;
+ uint16_t revision : 4;
+#else
+ uint16_t revision : 4;
+ uint16_t model : 6;
+ uint16_t oui_bits_19_24 : 6;
+#endif
+ } s;
+} bdk_mdio_phy_reg_id2_t;
+
+/**
+ * PHY register 4 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_AUTONEG_ADVER 4
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t next_page : 1;
+ uint16_t reserved_14 : 1;
+ uint16_t remote_fault : 1;
+ uint16_t reserved_12 : 1;
+ uint16_t asymmetric_pause : 1;
+ uint16_t pause : 1;
+ uint16_t advert_100base_t4 : 1;
+ uint16_t advert_100base_tx_full : 1;
+ uint16_t advert_100base_tx_half : 1;
+ uint16_t advert_10base_tx_full : 1;
+ uint16_t advert_10base_tx_half : 1;
+ uint16_t selector : 5;
+#else
+ uint16_t selector : 5;
+ uint16_t advert_10base_tx_half : 1;
+ uint16_t advert_10base_tx_full : 1;
+ uint16_t advert_100base_tx_half : 1;
+ uint16_t advert_100base_tx_full : 1;
+ uint16_t advert_100base_t4 : 1;
+ uint16_t pause : 1;
+ uint16_t asymmetric_pause : 1;
+ uint16_t reserved_12 : 1;
+ uint16_t remote_fault : 1;
+ uint16_t reserved_14 : 1;
+ uint16_t next_page : 1;
+#endif
+ } s;
+} bdk_mdio_phy_reg_autoneg_adver_t;
+
+/**
+ * PHY register 5 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t next_page : 1;
+ uint16_t ack : 1;
+ uint16_t remote_fault : 1;
+ uint16_t reserved_12 : 1;
+ uint16_t asymmetric_pause : 1;
+ uint16_t pause : 1;
+ uint16_t advert_100base_t4 : 1;
+ uint16_t advert_100base_tx_full : 1;
+ uint16_t advert_100base_tx_half : 1;
+ uint16_t advert_10base_tx_full : 1;
+ uint16_t advert_10base_tx_half : 1;
+ uint16_t selector : 5;
+#else
+ uint16_t selector : 5;
+ uint16_t advert_10base_tx_half : 1;
+ uint16_t advert_10base_tx_full : 1;
+ uint16_t advert_100base_tx_half : 1;
+ uint16_t advert_100base_tx_full : 1;
+ uint16_t advert_100base_t4 : 1;
+ uint16_t pause : 1;
+ uint16_t asymmetric_pause : 1;
+ uint16_t reserved_12 : 1;
+ uint16_t remote_fault : 1;
+ uint16_t ack : 1;
+ uint16_t next_page : 1;
+#endif
+ } s;
+} bdk_mdio_phy_reg_link_partner_ability_t;
+
+/**
+ * PHY register 6 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_AUTONEG_EXPANSION 6
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t reserved_5_15 : 11;
+ uint16_t parallel_detection_fault : 1;
+ uint16_t link_partner_next_page_capable : 1;
+ uint16_t local_next_page_capable : 1;
+ uint16_t page_received : 1;
+ uint16_t link_partner_autoneg_capable : 1;
+#else
+ uint16_t link_partner_autoneg_capable : 1;
+ uint16_t page_received : 1;
+ uint16_t local_next_page_capable : 1;
+ uint16_t link_partner_next_page_capable : 1;
+ uint16_t parallel_detection_fault : 1;
+ uint16_t reserved_5_15 : 11;
+#endif
+ } s;
+} bdk_mdio_phy_reg_autoneg_expansion_t;
+
+/**
+ * PHY register 9 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_CONTROL_1000 9
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t test_mode : 3;
+ uint16_t manual_master_slave : 1;
+ uint16_t master : 1;
+ uint16_t port_type : 1;
+ uint16_t advert_1000base_t_full : 1;
+ uint16_t advert_1000base_t_half : 1;
+ uint16_t reserved_0_7 : 8;
+#else
+ uint16_t reserved_0_7 : 8;
+ uint16_t advert_1000base_t_half : 1;
+ uint16_t advert_1000base_t_full : 1;
+ uint16_t port_type : 1;
+ uint16_t master : 1;
+ uint16_t manual_master_slave : 1;
+ uint16_t test_mode : 3;
+#endif
+ } s;
+} bdk_mdio_phy_reg_control_1000_t;
+
+/**
+ * PHY register 10 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_STATUS_1000 10
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t master_slave_fault : 1;
+ uint16_t is_master : 1;
+ uint16_t local_receiver_ok : 1;
+ uint16_t remote_receiver_ok : 1;
+ uint16_t remote_capable_1000base_t_full : 1;
+ uint16_t remote_capable_1000base_t_half : 1;
+ uint16_t reserved_8_9 : 2;
+ uint16_t idle_error_count : 8;
+#else
+ uint16_t idle_error_count : 8;
+ uint16_t reserved_8_9 : 2;
+ uint16_t remote_capable_1000base_t_half : 1;
+ uint16_t remote_capable_1000base_t_full : 1;
+ uint16_t remote_receiver_ok : 1;
+ uint16_t local_receiver_ok : 1;
+ uint16_t is_master : 1;
+ uint16_t master_slave_fault : 1;
+#endif
+ } s;
+} bdk_mdio_phy_reg_status_1000_t;
+
+/**
+ * PHY register 15 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_EXTENDED_STATUS 15
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t capable_1000base_x_full : 1;
+ uint16_t capable_1000base_x_half : 1;
+ uint16_t capable_1000base_t_full : 1;
+ uint16_t capable_1000base_t_half : 1;
+ uint16_t reserved_0_11 : 12;
+#else
+ uint16_t reserved_0_11 : 12;
+ uint16_t capable_1000base_t_half : 1;
+ uint16_t capable_1000base_t_full : 1;
+ uint16_t capable_1000base_x_half : 1;
+ uint16_t capable_1000base_x_full : 1;
+#endif
+ } s;
+} bdk_mdio_phy_reg_extended_status_t;
+
+
+/**
+ * PHY register 13 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_MMD_CONTROL 13
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint16_t function : 2;
+ uint16_t reserved_5_13 : 9;
+ uint16_t devad : 5;
+#else
+ uint16_t devad : 5;
+ uint16_t reserved_5_13 : 9;
+ uint16_t function : 2;
+#endif
+ } s;
+} bdk_mdio_phy_reg_mmd_control_t;
+
+/**
+ * PHY register 14 from the 802.3 spec
+ */
+#define BDK_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
+typedef union
+{
+ uint16_t u16;
+ struct
+ {
+ uint16_t address_data : 16;
+ } s;
+} bdk_mdio_phy_reg_mmd_address_data_t;
+
+/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
+#define BDK_MMD_DEVICE_PMA_PMD 1
+#define BDK_MMD_DEVICE_WIS 2
+#define BDK_MMD_DEVICE_PCS 3
+#define BDK_MMD_DEVICE_PHY_XS 4
+#define BDK_MMD_DEVICE_DTS_XS 5
+#define BDK_MMD_DEVICE_TC 6
+#define BDK_MMD_DEVICE_CL22_EXT 29
+#define BDK_MMD_DEVICE_VENDOR_1 30
+#define BDK_MMD_DEVICE_VENDOR_2 31
+
+/**
+ * Perform an MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+extern int bdk_mdio_read(bdk_node_t node, int bus_id, int phy_id, int location);
+
+/**
+ * Perform an MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param location Register location to write
+ * @param val Value to write
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+extern int bdk_mdio_write(bdk_node_t node, int bus_id, int phy_id, int location, int val);
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param device MDIO Manageable Device (MMD) id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+
+extern int bdk_mdio_45_read(bdk_node_t node, int bus_id, int phy_id, int device, int location);
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param device MDIO Manageable Device (MMD) id
+ * @param location Register location to write
+ * @param val Value to write
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+extern int bdk_mdio_45_write(bdk_node_t node, int bus_id, int phy_id, int device, int location,
+ int val);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mmc.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mmc.h
new file mode 100644
index 0000000000..93f3286379
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mmc.h
@@ -0,0 +1,89 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the MMC, eMMC, or SD
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Initialize a MMC for read/write
+ *
+ * @author creese (10/14/2013)
+ * @param chip_sel Chip select to use
+ *
+ * @return Size of the SD card, zero on failure
+ */
+extern int64_t bdk_mmc_initialize(bdk_node_t node, int chip_sel);
+
+/**
+ * Read blocks from a MMC card
+ *
+ * @author creese (10/14/2013)
+ * @param node Node to access
+ * @param chip_sel Chip select to use
+ * @param address Offset into the card in bytes. Must be a multiple of 512
+ * @param buffer Buffer to read into
+ * @param length Length to read in bytes. Must be a multiple of 512
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_mmc_read(bdk_node_t node, int chip_sel, uint64_t address, void *buffer, int length);
+
+/**
+ * Write blocks to a MMC card
+ *
+ * @author creese (10/14/2013)
+ * @param node Node to access
+ * @param chip_sel Chip select to use
+ * @param address Offset into the card in bytes. Must be a multiple of 512
+ * @param buffer Buffer to write
+ * @param length Length to write in bytes. Must be a multiple of 512
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_mmc_write(bdk_node_t node, int chip_sel, uint64_t address, const void *buffer, int length);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mpi.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mpi.h
new file mode 100644
index 0000000000..46da75b019
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mpi.h
@@ -0,0 +1,105 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the SPI / MPI bus
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+typedef enum
+{
+ BDK_MPI_FLAGS_ENABLE_CS0 = 1<<0, /**< Chip select 0 will be needed */
+ BDK_MPI_FLAGS_ENABLE_CS1 = 1<<1, /**< Chip select 1 will be needed */
+ BDK_MPI_FLAGS_ENABLE_CS2 = 1<<2, /**< Chip select 2 will be needed */
+ BDK_MPI_FLAGS_ENABLE_CS3 = 1<<3, /**< Chip select 3 will be needed */
+ BDK_MPI_FLAGS_CS_ACTIVE_HI = 1<<4, /**< Chip select is active high, else active low */
+ BDK_MPI_FLAGS_ONE_WIRE = 1<<5, /**< Input and output are multiplexed over SPI_DO */
+ BDK_MPI_FLAGS_IDLE_CLOCKS = 1<<7, /**< Continue to clock between commands */
+ BDK_MPI_FLAGS_IDLE_LOW = 1<<8, /**< Clear the clock is idle high, Set the clock is idle low */
+ BDK_MPI_FLAGS_LSB_FIRST = 1<<9, /**< Set to shift the LSB first, otherwise MSB will shift first */
+} bdk_mpi_flags_t;
+
+/**
+ * Initialize MPI/SPI for use. The different configuration
+ * options are encoded as bitmask inside the flags parameter.
+ *
+ * @param node Numa node to use
+ * @param clock_rate_hz
+ * Clock rate in Hz (1-16M)
+ * @param flags Setup flags ORed together
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_mpi_initialize(bdk_node_t node, int clock_rate_hz, bdk_mpi_flags_t flags);
+
+/**
+ * Perform a SPI/MPI transfer. The transfer can contain tx_count
+ * bytes that are transferred out, followed by rx_count bytes
+ * that are read in. Both tx_count and rx_count may be zero if
+ * no transfer is needed. Transmit data is sent most significant
+ * byte first, unless BDK_MPI_FLAGS_LSB_FIRST is set. Receive data
+ * is in the return value with the last byte in the least
+ * signnificant byte.
+ *
+ * @param node Numa node to use
+ * @param chip_select
+ * Which chip select to enable during the transfer
+ * @param leave_cs_enabled
+ * Leave the chip select assert after the transaction. Normally can
+ * be zero. Set to non zero if you want to perform repeated
+ * transactions.
+ * @param tx_count Number of bytes to transfer before startng the rx/shift data.
+ * Can be zero.
+ * @param tx_data Data to transmit. The low order bytes are used for the data. Order
+ * of shift out is controlled by BDK_MPI_FLAGS_LSB_FIRST
+ * @param rx_count Number of bytes to read. These bytes will be in the return value
+ * least significant bytes
+ *
+ * @return Read data
+ */
+extern uint64_t bdk_mpi_transfer(bdk_node_t node, int chip_select,
+ int leave_cs_enabled, int tx_count, uint64_t tx_data, int rx_count);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nic.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nic.h
new file mode 100644
index 0000000000..cdd4aa48a9
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nic.h
@@ -0,0 +1,107 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the NIC.
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+typedef enum
+{
+ BDK_NIC_TYPE_BGX, /* All chips */
+ BDK_NIC_TYPE_TNS, /* Only allowed on CN88XX */
+ BDK_NIC_TYPE_LBK, /* CN81XX and CN83XX */
+ BDK_NIC_TYPE_RGMII, /* CN83XX */
+} bdk_nic_type_t;
+
+/**
+ * Configure NIC for a specific port. This is called for each
+ * port on every interface that connects to NIC.
+ *
+ * @param handle Handle for port to config
+ * @param ntype Type of LMAC this NIC connects to
+ * @param lmac_credits
+ * Size of the LMAC buffer in bytes. Used to configure the number of credits to
+ * setup between the NIC and LMAC
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_nic_port_init(bdk_if_handle_t handle, bdk_nic_type_t ntype, int lmac_credits);
+
+/**
+ * Get the current TX queue depth. Note that this operation may be slow
+ * and adversly affect packet IO performance.
+ *
+ * @param handle Port to check
+ *
+ * @return Depth of the queue in packets
+ */
+extern int bdk_nic_get_queue_depth(bdk_if_handle_t handle);
+
+/**
+ * Send a packet
+ *
+ * @param handle Handle of port to send on
+ * @param packet Packet to send
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_nic_transmit(bdk_if_handle_t handle, const bdk_if_packet_t *packet);
+
+/**
+ * Query NIC and fill in the transmit stats for the supplied
+ * interface handle.
+ *
+ * @param handle Port handle
+ */
+extern void bdk_nic_fill_tx_stats(bdk_if_handle_t handle);
+
+/**
+ * Query NIC and fill in the receive stats for the supplied
+ * interface handle.
+ *
+ * @param handle Port handle
+ */
+extern void bdk_nic_fill_rx_stats(bdk_if_handle_t handle);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nix.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nix.h
new file mode 100644
index 0000000000..118ceaebbb
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nix.h
@@ -0,0 +1,105 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2016 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the NIX.
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+typedef enum
+{
+ BDK_NIX_TYPE_CGX,
+ BDK_NIX_TYPE_LBK,
+} bdk_nix_type_t;
+
+/**
+ * Configure NIC for a specific port. This is called for each
+ * port on every interface that connects to NIC.
+ *
+ * @param handle Handle for port to config
+ * @param ntype Type of LMAC this NIC connects to
+ * @param lmac_credits
+ * Size of the LMAC buffer in bytes. Used to configure the number of credits to
+ * setup between the NIC and LMAC
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_nix_port_init(bdk_if_handle_t handle, bdk_nix_type_t ntype, int lmac_credits);
+
+/**
+ * Send a packet
+ *
+ * @param handle Handle of port to send on
+ * @param packet Packet to send
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_nix_transmit(bdk_if_handle_t handle, const bdk_if_packet_t *packet);
+
+/**
+ * Get the current TX queue depth. Note that this operation may be slow
+ * and adversly affect packet IO performance.
+ *
+ * @param handle Port to check
+ *
+ * @return Depth of the queue in packets
+ */
+extern int bdk_nix_get_queue_depth(bdk_if_handle_t handle);
+
+/**
+ * Query NIC and fill in the transmit stats for the supplied
+ * interface handle.
+ *
+ * @param handle Port handle
+ */
+extern void bdk_nix_fill_tx_stats(bdk_if_handle_t handle);
+
+/**
+ * Query NIC and fill in the receive stats for the supplied
+ * interface handle.
+ *
+ * @param handle Port handle
+ */
+extern void bdk_nix_fill_rx_stats(bdk_if_handle_t handle);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pbus-flash.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pbus-flash.h
new file mode 100644
index 0000000000..a5cd2d592d
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pbus-flash.h
@@ -0,0 +1,111 @@
+/***********************license start***********************************
+* Copyright (c) 2016-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * This file provides bootbus flash operations
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+typedef struct
+{
+ int start_offset;
+ int block_size;
+ int num_blocks;
+} bdk_pbus_flash_region_t;
+
+/**
+ * Initialize the flash access library
+ */
+void bdk_pbus_flash_initialize(bdk_node_t node);
+
+/**
+ * Return a pointer to the flash chip
+ *
+ * @param chip_id Chip ID to return
+ * @return Zero if the chip doesn't exist
+ */
+uint64_t bdk_pbus_flash_get_base(int chip_id);
+
+/**
+ * Return the number of erasable regions on the chip
+ *
+ * @param chip_id Chip to return info for
+ * @return Number of regions
+ */
+int bdk_pbus_flash_get_num_regions(int chip_id);
+
+/**
+ * Return information about a flash chips region
+ *
+ * @param chip_id Chip to get info for
+ * @param region Region to get info for
+ * @return Region information
+ */
+const bdk_pbus_flash_region_t *bdk_pbus_flash_get_region_info(int chip_id, int region);
+
+/**
+ * Erase a block on the flash chip
+ *
+ * @param chip_id Chip to erase a block on
+ * @param region Region to erase a block in
+ * @param block Block number to erase
+ * @return Zero on success. Negative on failure
+ */
+int bdk_pbus_flash_erase_block(int chip_id, int region, int block);
+
+/**
+ * Write data to flash. The block must have already been erased. You can call
+ * this multiple times on the same block to piecemeal write it.
+ *
+ * @param chip_id Which flash to write
+ * @param offset Offset into device to start write
+ * @param data Data to write
+ * @param len Length of the data
+ *
+ * @return Zero on success. Negative on failure
+ */
+int bdk_pbus_flash_write(int chip_id, int offset, const void *data, int len);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie-flash.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie-flash.h
new file mode 100644
index 0000000000..6605e419bb
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie-flash.h
@@ -0,0 +1,109 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to programming the PCIe SPI flash used for config overrides
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+#define BDK_PCIE_FLASH_PREAMBLE 0x9da1
+#define BDK_PCIE_FLASH_END 0x6a5d
+#define BDK_PCIE_FLASH_MAX_OFFSET 256
+
+/**
+ * Determine if access to the PCIe SPI flash is available
+ *
+ * @param node Numa node request is for
+ * @param pcie_port PCIe port to access
+ *
+ * @return One if available, zero if not
+ */
+extern int bdk_pcie_flash_is_available(bdk_node_t node, int pcie_port);
+
+/**
+ * Read the specified offset in the PCIe SPI flash and returns its
+ * value. In the case the EEPROM isn't there or can't be read -1
+ * is returned.
+ *
+ * @param node Numa node request is for
+ * @param pcie_port PCIe port to access
+ * @param offset Offset in bytes, Must be a multiple of 8
+ *
+ * @return Value read or -1 if the read failed
+ */
+extern uint64_t bdk_pcie_flash_read(bdk_node_t node, int pcie_port, int offset);
+
+/**
+ * Write a value to the PCIe SPI flash. The value should be of the
+ * format bdk_pemx_spi_data_t.
+ *
+ * @param node Numa node request is for
+ * @param pcie_port PCIe port to access
+ * @param offset Offset to write. Must be a multiple of 8 bytes.
+ * @param value Value to write
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pcie_flash_write(bdk_node_t node, int pcie_port, int offset, uint64_t value);
+
+/**
+ * Erase the PCIe SPI Flash
+ *
+ * @param node Numa node request is for
+ * @param pcie_port PCIe port to access
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pcie_flash_erase(bdk_node_t node, int pcie_port);
+
+/**
+ * Dump the PCIe SPI Flash
+ *
+ * @param node Numa node request is for
+ * @param pcie_port PCIe port to access
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pcie_flash_dump(bdk_node_t node, int pcie_port);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h
new file mode 100644
index 0000000000..d68a6d297f
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h
@@ -0,0 +1,236 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to PCIe as a host(RC) or target(EP)
+ *
+ * <hr>$Revision: 51109 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+typedef enum
+{
+ BDK_PCIE_MEM_CONFIG, /* Config space */
+ BDK_PCIE_MEM_NORMAL, /* Memory, not prefetchable */
+ BDK_PCIE_MEM_PREFETCH, /* Memory, prefetchable */
+ BDK_PCIE_MEM_IO, /* IO */
+} bdk_pcie_mem_t;
+
+/**
+ * Return the number of possible PCIe ports on a node. The actual number
+ * of configured ports may be less and may also be disjoint.
+ *
+ * @param node Node to query
+ *
+ * @return Number of PCIe ports that are possible
+ */
+int bdk_pcie_get_num_ports(bdk_node_t node);
+
+/**
+ * Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+int bdk_pcie_rc_initialize(bdk_node_t node, int pcie_port);
+
+/**
+ * Shutdown a PCIe port and put it in reset
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param pcie_port PCIe port to shutdown
+ *
+ * @return Zero on success
+ */
+int bdk_pcie_rc_shutdown(bdk_node_t node, int pcie_port);
+
+/**
+ * Return the Core physical base address for PCIe MEM access. Memory is
+ * read/written as an offset from this address.
+ *
+ * @param node Node to use in a Numa setup
+ * @param pcie_port PCIe port the memory is on
+ * @param mem_type Type of memory
+ *
+ * @return 64bit physical address for read/write
+ */
+uint64_t bdk_pcie_get_base_address(bdk_node_t node, int pcie_port, bdk_pcie_mem_t mem_type);
+
+/**
+ * Size of the Mem address region returned at address
+ * bdk_pcie_get_base_address()
+ *
+ * @param node Node to use in a Numa setup
+ * @param pcie_port PCIe port the IO is for
+ * @param mem_type Type of memory
+ *
+ * @return Size of the Mem window
+ */
+uint64_t bdk_pcie_get_base_size(bdk_node_t node, int pcie_port, bdk_pcie_mem_t mem_type);
+
+/**
+ * Read 8bits from a Device's config space
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint8_t bdk_pcie_config_read8(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg);
+
+/**
+ * Read 16bits from a Device's config space
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint16_t bdk_pcie_config_read16(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg);
+
+/**
+ * Read 32bits from a Device's config space
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint32_t bdk_pcie_config_read32(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg) BDK_WEAK;
+
+/**
+ * Write 8bits to a Device's config space
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void bdk_pcie_config_write8(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg, uint8_t val);
+
+/**
+ * Write 16bits to a Device's config space
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void bdk_pcie_config_write16(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg, uint16_t val);
+
+/**
+ * Write 32bits to a Device's config space
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void bdk_pcie_config_write32(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg, uint32_t val) BDK_WEAK;
+
+/**
+ * Read 64bits from PCIe using a memory transaction
+ *
+ * @param node Node to read from
+ * @param pcie_port PCIe port to read
+ * @param address PCIe address to read
+ *
+ * @return Result of the read
+ */
+uint64_t bdk_pcie_mem_read64(bdk_node_t node, int pcie_port, uint64_t address);
+
+/**
+ * Write 64bits to PCIe memory
+ *
+ * @param node Node to write to
+ * @param pcie_port PCIe port to use
+ * @param address Address to write
+ * @param data Data to write
+ */
+void bdk_pcie_mem_write64(bdk_node_t node, int pcie_port, uint64_t address, uint64_t data);
+
+/**
+ * These are the operations defined that can vary per chip generation
+ */
+typedef struct
+{
+ int (*get_num_ports)(bdk_node_t node);
+ int (*rc_initialize)(bdk_node_t node, int pcie_port);
+ int (*rc_shutdown)(bdk_node_t node, int pcie_port);
+ uint64_t (*get_base_address)(bdk_node_t node, int pcie_port, bdk_pcie_mem_t mem_type);
+ uint64_t (*get_base_size)(bdk_node_t node, int pcie_port, bdk_pcie_mem_t mem_type);
+ uint64_t (*build_config_addr)(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg);
+} __bdk_pcie_ops_t;
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pki.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pki.h
new file mode 100644
index 0000000000..3379d26644
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pki.h
@@ -0,0 +1,83 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the SSO.
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * One time init of global Packet Input
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pki_global_init(bdk_node_t node);
+
+/**
+ * Configure packet input for a specific port. This is called for each
+ * port on every interface that is connected to packet input.
+ *
+ * @param handle Handle for port to config
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pki_port_init(bdk_if_handle_t handle);
+
+/**
+ * Enable PKI after all setup is complete
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pki_enable(bdk_node_t node);
+
+/**
+ * Query PKI and fill in the receive stats for the supplied interface handle. The
+ * interface must use PKI for RX.
+ *
+ * @param handle Port handle
+ *
+ * @return
+ */
+extern void bdk_pki_fill_rx_stats(bdk_if_handle_t handle);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pko.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pko.h
new file mode 100644
index 0000000000..90cd719b06
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pko.h
@@ -0,0 +1,126 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the PKO.
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/* Maximum number of segments which fit flat lmtstore operation.
+ 1) LMTST for PKO can be a maximum of 15 64bit words
+ 2) PKO descriptors are 2 64bit words each
+ 3) Every send requires PKO_SEND_HDR_S for hardware
+ So 15 words / 2 = 7 possible descriptors
+ 7 - HDR = 6 descriptors left for GATHER */
+#define BDK_PKO_SEG_LIMIT 6
+
+/**
+ * Perform global init of PKO
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pko_global_init(bdk_node_t node);
+
+/**
+ * Configure PKO for a specific port. This is called for each
+ * port on every interface that connects to PKO.
+ *
+ * @param handle Handle for port to config
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pko_port_init(bdk_if_handle_t handle);
+
+/**
+ * Enable PKO after all setup is complete
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pko_enable(bdk_node_t node);
+
+/**
+ * Get the current TX queue depth. Note that this operation may be slow
+ * and adversly affect packet IO performance.
+ *
+ * @param handle Port to check
+ *
+ * @return Depth of the queue in packets
+ */
+extern int bdk_pko_get_queue_depth(bdk_if_handle_t handle);
+
+/**
+ * Set PKO shapping as a specific queue level
+ *
+ * @param node Node to shape
+ * @param queue Queue to shape
+ * @param level Level in PKO
+ * @param is_red Non-zero of the rate is for the yellow/red transition. Zero for the
+ * green/yellow transition.
+ * @param is_packets Non-zero if the rate is packets/sec, otherwise rate is bits/sec
+ * @param rate Desired rate. A rate of zero disables shaping
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pko_shape(bdk_node_t node, int queue, int level, int is_red, int is_packets, uint64_t rate);
+
+/**
+ * Send a packet
+ *
+ * @param handle Handle of port to send on
+ * @param packet Packet to send
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_pko_transmit(bdk_if_handle_t handle, const bdk_if_packet_t *packet);
+
+/**
+ * Query PKO and fill in the receive stats for the supplied
+ * interface handle. The interface must use PKO for TX.
+ *
+ * @param handle Port handle
+ *
+ * @return
+ */
+extern void bdk_pko_fill_tx_stats(bdk_if_handle_t handle);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-power-burn.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-power-burn.h
new file mode 100644
index 0000000000..570ef1073c
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-power-burn.h
@@ -0,0 +1,67 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+typedef enum
+{
+ BDK_POWER_BURN_NONE, /* Disable power burn */
+ BDK_POWER_BURN_FULL, /* Continuously burn power */
+ BDK_POWER_BURN_CYCLE_10MS, /* Cycle: Burn for 10ms, idle for 10ms */
+ BDK_POWER_BURN_CYCLE_1S, /* Cycle: Burn for 1s, idle for 1s */
+ BDK_POWER_BURN_CYCLE_5S, /* Cycle: Burn for 5s, idle for 5s */
+ BDK_POWER_BURN_CYCLE_1M, /* Cycle: Burn for 1m, idle for 1m */
+ BDK_POWER_BURN_CYCLE_5M, /* Cycle: Burn for 5m, idle for 5m */
+} bdk_power_burn_type_t;
+
+/**
+ * Set the current power burn mode for a node
+ *
+ * @param node Node to control power burn for
+ * @param burn_type Mode of power burn
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_power_burn(bdk_node_t node, bdk_power_burn_type_t burn_type);
+
+/**
+ * Set the throttle level percent for an entire chip
+ *
+ * @param node Node to set
+ * @param throttle Percent of Throttle level (0-100)
+ */
+extern void bdk_power_throttle(bdk_node_t node, int throttle_percent);
+
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h
new file mode 100644
index 0000000000..6cb1364196
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h
@@ -0,0 +1,508 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Function and structure definitions for QLM manipulation
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+typedef enum
+{
+ BDK_QLM_MODE_DISABLED, /* QLM is disabled (all chips) */
+ BDK_QLM_MODE_PCIE_1X1, /* 1 PCIe, 1 lane. Other lanes unused */
+ BDK_QLM_MODE_PCIE_2X1, /* 2 PCIe, 1 lane each */
+ BDK_QLM_MODE_PCIE_1X2, /* 1 PCIe, 2 lanes */
+ BDK_QLM_MODE_PCIE_1X4, /* 1 PCIe, 4 lanes */
+ BDK_QLM_MODE_PCIE_1X8, /* 1 PCIe, 8 lanes */
+ BDK_QLM_MODE_PCIE_1X16, /* 1 PCIe, 16 lanes (CN93XX) */
+
+ BDK_QLM_MODE_SATA_4X1, /* SATA, each lane independent (cn88xx) */
+ BDK_QLM_MODE_SATA_2X1, /* SATA, each lane independent (cn83xx) */
+
+ BDK_QLM_MODE_ILK, /* ILK 4 lanes (cn78xx) */
+ BDK_QLM_MODE_SGMII_4X1, /* SGMII, each lane independent (cn88xx) */
+ BDK_QLM_MODE_SGMII_2X1, /* SGMII, each lane independent (cn83xx) */
+ BDK_QLM_MODE_SGMII_1X1, /* SGMII, single lane (cn80xx) */
+ BDK_QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes (cn88xx), use gbaud to tell difference */
+ BDK_QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each (cn88xx) */
+ BDK_QLM_MODE_RXAUI_1X2, /* 1 RXAUI, 2 lanes each (cn83xx) */
+ BDK_QLM_MODE_OCI, /* OCI Multichip interconnect (cn88xx) */
+ BDK_QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each (cn88xx) */
+ BDK_QLM_MODE_XFI_2X1, /* 2 XFI, 1 lane each (cn83xx) */
+ BDK_QLM_MODE_XFI_1X1, /* 1 XFI, single lane (cn80xx) */
+ BDK_QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each (cn88xx) */
+ BDK_QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each (cn88xx) */
+ BDK_QLM_MODE_10G_KR_2X1, /* 2 10GBASE-KR, 1 lane each (cn83xx) */
+ BDK_QLM_MODE_10G_KR_1X1, /* 1 10GBASE-KR, single lane (cn80xx) */
+ BDK_QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each (cn88xx) */
+ BDK_QLM_MODE_QSGMII_4X1, /* QSGMII is 4 SGMII on one lane (cn81xx, cn83xx) */
+ BDK_QLM_MODE_25G_4X1, /* 25G, 1 lane each (CN93XX QLMs) */
+ BDK_QLM_MODE_25G_2X1, /* 25G, 1 lane each (CN93XX DLMs) */
+ BDK_QLM_MODE_50G_2X2, /* 50G, 2 lanes each (CN93XX QLMs) */
+ BDK_QLM_MODE_50G_1X2, /* 50G, 2 lanes each (CN93XX DLMs) */
+ BDK_QLM_MODE_100G_1X4, /* 100G, 4 lanes each (CN93XX) */
+ BDK_QLM_MODE_25G_KR_4X1, /* 25G-KR, 1 lane each (CN93XX QLMs) */
+ BDK_QLM_MODE_25G_KR_2X1, /* 25G-KR, 1 lane each (CN93XX DLMs) */
+ BDK_QLM_MODE_50G_KR_2X2, /* 50G-KR, 2 lanes each (CN93XX QLMs) */
+ BDK_QLM_MODE_50G_KR_1X2, /* 50G-KR, 2 lanes each (CN93XX DLMs) */
+ BDK_QLM_MODE_100G_KR4_1X4, /* 100G-KR4, 4 lanes each (CN93XX) */
+ BDK_QLM_MODE_USXGMII_4X1, /* USXGMII, 1 lane each, 10M, 100M, 1G, 2.5G, 5G, 10G, 20G (CN93XX QLMs) */
+ BDK_QLM_MODE_USXGMII_2X1, /* USXGMII, 1 lane each, 10M, 100M, 1G, 2.5G, 5G, 10G, 20G (CN93XX QLMs) */
+ BDK_QLM_MODE_LAST,
+} bdk_qlm_modes_t;
+
+typedef enum
+{
+ BDK_QLM_CLK_COMMON_0,
+ BDK_QLM_CLK_COMMON_1,
+ BDK_QLM_CLK_EXTERNAL,
+ BDK_QLM_CLK_COMMON_2, /* Must be after EXTERNAL as device trees have hard coded values */
+ BDK_QLM_CLK_LAST,
+} bdk_qlm_clock_t;
+
+typedef enum
+{
+ BDK_QLM_MODE_FLAG_ENDPOINT = 1, /* PCIe in EP instead of RC */
+} bdk_qlm_mode_flags_t;
+
+typedef enum
+{
+ BDK_QLM_LOOP_DISABLED, /* No shallow loopback */
+} bdk_qlm_loop_t;
+
+typedef enum
+{
+ BDK_QLM_DIRECTION_TX = 1,
+ BDK_QLM_DIRECTION_RX = 2,
+ BDK_QLM_DIRECTION_BOTH = 3,
+} bdk_qlm_direction_t;
+
+/**
+ * Types of QLM margining supported
+ */
+typedef enum
+{
+ BDK_QLM_MARGIN_VERTICAL,
+ BDK_QLM_MARGIN_HORIZONTAL,
+} bdk_qlm_margin_t;
+
+/**
+ * Eye diagram captures are stored in the following structure
+ */
+typedef struct
+{
+ int width; /* Width in the x direction (time) */
+ int height; /* Height in the y direction (voltage) */
+ uint32_t data[64][128]; /* Error count at location, saturates as max */
+} bdk_qlm_eye_t;
+
+
+/**
+ * How to do the various QLM operations changes greatly
+ * between chips. Each chip has its specific operations
+ * stored in the structure below. The correct structure
+ * is chosen based on the chip we're running on.
+ */
+typedef struct
+{
+ uint32_t chip_model;
+ void (*init)(bdk_node_t node);
+ int (*get_num)(bdk_node_t node);
+ int (*get_lanes)(bdk_node_t node, int qlm);
+ bdk_qlm_modes_t (*get_mode)(bdk_node_t node, int qlm);
+ int (*set_mode)(bdk_node_t node, int qlm, bdk_qlm_modes_t mode, int baud_mhz, bdk_qlm_mode_flags_t flags);
+ int (*get_gbaud_mhz)(bdk_node_t node, int qlm);
+ int (*measure_refclock)(bdk_node_t node, int qlm);
+ int (*get_qlm_num)(bdk_node_t node, bdk_if_t iftype, int interface, int index);
+ int (*reset)(bdk_node_t node, int qlm);
+ int (*enable_prbs)(bdk_node_t node, int qlm, int prbs, bdk_qlm_direction_t dir);
+ int (*disable_prbs)(bdk_node_t node, int qlm);
+ uint64_t (*get_prbs_errors)(bdk_node_t node, int qlm, int lane, int clear);
+ void (*inject_prbs_error)(bdk_node_t node, int qlm, int lane);
+ int (*enable_loop)(bdk_node_t node, int qlm, bdk_qlm_loop_t loop);
+ int (*auto_config)(bdk_node_t node);
+ int (*dip_auto_config)(bdk_node_t node);
+ int (*tune_lane_tx)(bdk_node_t node, int qlm, int lane, int tx_swing, int tx_pre, int tx_post, int tx_gain, int tx_vboost);
+ int (*rx_equalization)(bdk_node_t node, int qlm, int lane);
+ int (*eye_capture)(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_eye_t *eye);
+} bdk_qlm_ops_t;
+
+/**
+ * Initialize the QLM layer
+ */
+extern void bdk_qlm_init(bdk_node_t node) BDK_WEAK;
+
+/**
+ * Return the number of QLMs supported for the chip
+ *
+ * @return Number of QLMs
+ */
+extern int bdk_qlm_get_num(bdk_node_t node);
+
+/**
+ * Return the number of lanes in a QLM. QLMs normally contain
+ * 4 lanes, except for chips which only have half of a QLM.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param qlm QLM to get lanes number for
+ *
+ * @return Number of lanes on the QLM
+ */
+extern int bdk_qlm_get_lanes(bdk_node_t node, int qlm);
+
+/**
+ * Convert a mode into a configuration variable string value
+ *
+ * @param mode Mode to convert
+ *
+ * @return configuration value string
+ */
+extern const char *bdk_qlm_mode_to_cfg_str(bdk_qlm_modes_t mode);
+
+/**
+ * Convert a mode into a human understandable string
+ *
+ * @param mode Mode to convert
+ *
+ * @return Easy to read string
+ */
+extern const char *bdk_qlm_mode_tostring(bdk_qlm_modes_t mode);
+
+/**
+ * Convert a configuration variable value string into a mode
+ *
+ * @param val Configuration variable value
+ *
+ * @return mode
+ */
+extern bdk_qlm_modes_t bdk_qlm_cfg_string_to_mode(const char *val);
+
+/**
+ * Get the mode of a QLM as a human readable string
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param qlm QLM to examine
+ *
+ * @return String mode
+ */
+extern bdk_qlm_modes_t bdk_qlm_get_mode(bdk_node_t node, int qlm);
+
+/**
+ * For chips that don't use pin strapping, this function programs
+ * the QLM to the specified mode
+ *
+ * @param node Node to use in a Numa setup
+ * @param qlm QLM to configure
+ * @param mode Desired mode
+ * @param baud_mhz Desired speed
+ * @param flags Flags to specify mode specific options
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_set_mode(bdk_node_t node, int qlm, bdk_qlm_modes_t mode, int baud_mhz, bdk_qlm_mode_flags_t flags);
+
+/**
+ * Set the QLM's clock source.
+ *
+ * @param node Node to use in a Numa setup
+ * @param qlm QLM to configure
+ * @param clk Clock source for QLM
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_set_clock(bdk_node_t node, int qlm, bdk_qlm_clock_t clk);
+
+/**
+ * Get the speed (Gbaud) of the QLM in Mhz.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param qlm QLM to examine
+ *
+ * @return Speed in Mhz
+ */
+extern int bdk_qlm_get_gbaud_mhz(bdk_node_t node, int qlm);
+
+/**
+ * Measure the reference clock of a QLM
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param qlm QLM to measure
+ *
+ * @return Clock rate in Hz
+ */
+extern int bdk_qlm_measure_clock(bdk_node_t node, int qlm);
+
+/**
+ * Lookup the hardware QLM number for a given interface type and
+ * index. If the associated interface doesn't map to a QLM,
+ * returns -1.
+ *
+ * @param node Node to use in a Numa setup
+ * @param iftype Interface type
+ * @param interface Interface index number
+ * @param index Port on the interface. Most chips use the
+ * same mode for all ports, but there are
+ * exceptions. For example, BGX2 on CN83XX
+ * spans two DLMs.
+ *
+ * @return QLM number or -1 on failure
+ */
+extern int bdk_qlm_get(bdk_node_t node, bdk_if_t iftype, int interface, int index);
+
+/**
+ * Reset a QLM to its initial state
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_reset(bdk_node_t node, int qlm);
+
+/**
+ * Enable PRBS on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ * @param prbs PRBS mode (31, etc)
+ * @param dir Directions to enable. This is so you can enable TX and later
+ * enable RX after TX has run for a time
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_enable_prbs(bdk_node_t node, int qlm, int prbs, bdk_qlm_direction_t dir);
+
+/**
+ * Disable PRBS on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_disable_prbs(bdk_node_t node, int qlm);
+
+/**
+ * Return the number of PRBS errors since PRBS started running
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param lane Which lane
+ * @param clear Clear the counter after returning its value
+ *
+ * @return Number of errors
+ */
+extern uint64_t bdk_qlm_get_prbs_errors(bdk_node_t node, int qlm, int lane, int clear);
+
+/**
+ * Inject an error into PRBS
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param lane Which lane
+ */
+extern void bdk_qlm_inject_prbs_error(bdk_node_t node, int qlm, int lane);
+
+/**
+ * Enable shallow loopback on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ * @param loop Type of loopback. Not all QLMs support all modes
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_enable_loop(bdk_node_t node, int qlm, bdk_qlm_loop_t loop);
+
+/**
+ * Configure the TX tuning parameters for a QLM lane. The tuning parameters can
+ * be specified as -1 to maintain their current value
+ *
+ * @param node Node to configure
+ * @param qlm QLM to configure
+ * @param lane Lane to configure
+ * @param tx_swing Transmit swing (coef 0) Range 0-31
+ * @param tx_pre Pre cursor emphasis (Coef -1). Range 0-15
+ * @param tx_post Post cursor emphasis (Coef +1). Range 0-31
+ * @param tx_gain Transmit gain. Range 0-7
+ * @param tx_vboost Transmit voltage boost. Range 0-1
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_tune_lane_tx(bdk_node_t node, int qlm, int lane, int tx_swing, int tx_pre, int tx_post, int tx_gain, int tx_vboost);
+
+/**
+ * Perform RX equalization on a QLM
+ *
+ * @param node Node the QLM is on
+ * @param qlm QLM to perform RX equalization on
+ * @param lane Lane to use, or -1 for all lanes
+ *
+ * @return Zero on success, negative if any lane failed RX equalization
+ */
+extern int bdk_qlm_rx_equalization(bdk_node_t node, int qlm, int lane);
+
+/**
+ * Capture an eye diagram for the given QLM lane. The output data is written
+ * to "eye".
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param qlm_lane Which lane
+ * @param eye Output eye data
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_eye_capture(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_eye_t *eye);
+
+/**
+ * Display an eye diagram for the given QLM lane. The eye data can be in "eye", or
+ * captured during the call if "eye" is NULL.
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param qlm_lane Which lane
+ * @param format Display format. 0 = raw, 1 = Color ASCII
+ * @param eye Eye data to display, or NULL if the data should be captured.
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_eye_display(bdk_node_t node, int qlm, int qlm_lane, int format, const bdk_qlm_eye_t *eye);
+
+/**
+ * Call the board specific method of determining the required QLM configuration
+ * and automatically settign up the QLMs to match. For example, on the EBB8800
+ * this function queries the MCU for the current setup.
+ *
+ * @param node Node to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_auto_config(bdk_node_t node);
+
+/**
+ * Get the current RX margining parameter
+ *
+ * @param node Node to read margin value from
+ * @param qlm QLM to read from
+ * @param qlm_lane Lane to read
+ * @param margin_type
+ * Type of margining parameter to read
+ *
+ * @return Current margining parameter value
+ */
+extern int64_t bdk_qlm_margin_rx_get(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type);
+
+/**
+ * Get the current RX margining parameter minimum value
+ *
+ * @param node Node to read margin value from
+ * @param qlm QLM to read from
+ * @param qlm_lane Lane to read
+ * @param margin_type
+ * Type of margining parameter to read
+ *
+ * @return Current margining parameter minimum value
+ */
+extern int64_t bdk_qlm_margin_rx_get_min(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type);
+
+/**
+ * Get the current RX margining parameter maximum value
+ *
+ * @param node Node to read margin value from
+ * @param qlm QLM to read from
+ * @param qlm_lane Lane to read
+ * @param margin_type
+ * Type of margining parameter to read
+ *
+ * @return Current margining parameter maximum value
+ */
+extern int64_t bdk_qlm_margin_rx_get_max(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type);
+
+/**
+ * Set the current RX margining parameter value
+ *
+ * @param node Node to set margin value on
+ * @param qlm QLM to set
+ * @param qlm_lane Lane to set
+ * @param margin_type
+ * Type of margining parameter to set
+ * @param value Value of margining parameter
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_margin_rx_set(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type, int value);
+
+/**
+ * Restore the supplied RX margining parameter value as if it was never set. This
+ * disables any overrides in the SERDES need to perform margining
+ *
+ * @param node Node to restore margin value on
+ * @param qlm QLM to restore
+ * @param qlm_lane Lane to restore
+ * @param margin_type
+ * Type of margining parameter to restore
+ * @param value Value of margining parameter
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_margin_rx_restore(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type, int value);
+
+/**
+ * For Cavium SFF query the dip switches to determine the QLM setup. Applying
+ * any configuration found.
+ *
+ * @param node Node to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+
+extern int bdk_qlm_dip_auto_config(bdk_node_t node);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h
new file mode 100644
index 0000000000..d5a7d0a43b
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h
@@ -0,0 +1,79 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Function and structure definitions for random number generator hardware
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Reads 8 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+extern uint8_t bdk_rng_get_random8(void);
+
+/**
+ * Reads 16 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+extern uint16_t bdk_rng_get_random16(void);
+
+/**
+ * Reads 32 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+extern uint32_t bdk_rng_get_random32(void);
+
+/**
+ * Reads 64 bits of random data from Random number generator
+ *
+ * @return random data
+ */
+extern uint64_t bdk_rng_get_random64(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h
new file mode 100644
index 0000000000..53b73ca4aa
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h
@@ -0,0 +1,66 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the hardware RVU.
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+#define BDK_RVU_PF 0 /* The BDK uses a hard coded PF number for accessing RVU devices */
+
+/**
+ * Allocate a block of MSIX vectors inside RVU
+ *
+ * @param node Node to allocate for
+ * @param msix_count Number to allocate
+ *
+ * @return Starting offset of MSIX vectors. On failure this function calls bdk_fatal(), so
+ * no error checking is needed on the return value.
+ */
+static inline int bdk_rvu_alloc_msix(bdk_node_t node, int msix_count)
+{
+ return 0; /* FIXME: Implement MSIX allocation for RVU */
+}
+
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sata.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sata.h
new file mode 100644
index 0000000000..755a3ba7aa
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sata.h
@@ -0,0 +1,163 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * This file provides a SATA driver
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * SATA pattern generation and loopback supports a number of
+ * modes. This enumeration describes the modes used by
+ * bdk_sata_bist_fir().
+ */
+typedef enum
+{
+ BDK_SATA_BIST_FIS_RETIMED, /* Send FIS to tell device to enter Retimed loopback */
+ BDK_SATA_BIST_FIS_ANALOG, /* Send FIS to tell device to enter Analog loopback */
+ BDK_SATA_BIST_FIS_TX_ONLY, /* Send FIS to tell device to transit only */
+ BDK_SATA_BIST_SW_RETIMED, /* No FIS, just enter local retimed loopback */
+ BDK_SATA_BIST_SW_TX_ONLY_SSOP, /* No FIS, just enter local transit only, SSOP pattern */
+ BDK_SATA_BIST_SW_TX_ONLY_HTDP, /* No FIS, just enter local transit only, HTDP pattern */
+ BDK_SATA_BIST_SW_TX_ONLY_LTDP, /* No FIS, just enter local transit only, LTDP pattern */
+ BDK_SATA_BIST_SW_TX_ONLY_LFSCP, /* No FIS, just enter local transit only, LFSCP pattern */
+ BDK_SATA_BIST_SW_TX_ONLY_COMP, /* No FIS, just enter local transit only, COMP pattern */
+ BDK_SATA_BIST_SW_TX_ONLY_LBP, /* No FIS, just enter local transit only, LBP pattern */
+ BDK_SATA_BIST_SW_TX_ONLY_MFTP, /* No FIS, just enter local transit only, MFTP pattern */
+ BDK_SATA_BIST_SW_TX_ONLY_HFTP, /* No FIS, just enter local transit only, HFTP pattern */
+ BDK_SATA_BIST_SW_TX_ONLY_LFTP, /* No FIS, just enter local transit only, LFTP pattern */
+} bdk_sata_bist_fis_t;
+
+/**
+ * Return the number of SATA controllers on the chip
+ *
+ * @param node Node to query
+ *
+ * @return Number of controllers, could be zero.
+ */
+int bdk_sata_get_controllers(bdk_node_t node);
+
+/**
+ * Initialize a SATA controller and begin device detection
+ *
+ * @param node Node to initialize
+ * @param controller Which controller to initialize
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_initialize(bdk_node_t node, int controller);
+
+/**
+ * Shutdown a SATA controller
+ *
+ * @param node Node to access
+ * @param controller Controller to shutdown
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_shutdown(bdk_node_t node, int controller);
+
+/**
+ * Return the number of SATA ports connected to this AHCI controller
+ *
+ * @param node Node to query
+ * @param controller SATA controller
+ *
+ * @return Number of ports. Zero if the controller doesn't connect to a QLM.
+ */
+int bdk_sata_get_ports(bdk_node_t node, int controller);
+
+/**
+ * Identify the SATA device connected to a controller
+ *
+ * @param node Node to query
+ * @param controller Controller to query
+ * @param port Which SATA port on the controller, zero based
+ *
+ * @return Size of the disk in bytes
+ */
+uint64_t bdk_sata_identify(bdk_node_t node, int controller, int port);
+
+/**
+ * Read data from a SATA device
+ *
+ * @param node Node the controller is on
+ * @param controller Which controller
+ * @param port Which port on the controller, zero based
+ * @param lba 48 bit Block address to read
+ * @param sectors Number of 512 bytes sectors to read
+ * @param buffer Buffer to receive the data. Must be at least 512 * sectors in size
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_read(bdk_node_t node, int controller, int port, uint64_t lba, int sectors, void *buffer);
+
+/**
+ * Write data to a SATA device
+ *
+ * @param node Node the controller is on
+ * @param controller Which controller
+ * @param port Which port on the controller, zero based
+ * @param lba 48 bit Block address to write
+ * @param sectors Number of 512 bytes sectors to write
+ * @param buffer Data buffer to write. Must be at least 512 * sectors in size
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_write(bdk_node_t node, int controller, int port, uint64_t lba, int sectors, const void *buffer);
+
+/**
+ * Enter one of the SATA pattern generation / loop testing modes
+ *
+ * @param node Node to access
+ * @param controller SATA controller to access
+ * @param port Which port on the controller
+ * @param mode Test mode to enter
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_bist_fis(bdk_node_t node, int controller, int port, bdk_sata_bist_fis_t mode);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h
new file mode 100644
index 0000000000..5e34a4f7b9
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h
@@ -0,0 +1,146 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Implementation of spinlocks.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Spinlocks
+ */
+typedef union
+{
+ uint64_t combined;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint32_t ticket;
+ uint32_t serving;
+#else
+ uint32_t serving;
+ uint32_t ticket;
+#endif
+ } s;
+} bdk_spinlock_t;
+
+/**
+ * Initialize a spinlock
+ *
+ * @param lock Lock to initialize
+ */
+static inline void bdk_spinlock_init(bdk_spinlock_t *lock)
+{
+ asm volatile ("str xzr, [%[b]]"
+ : "+m" (lock->combined)
+ : [b] "r" (&lock->combined)
+ : "memory");
+}
+
+/**
+ * Releases lock
+ *
+ * @param lock pointer to lock structure
+ */
+static inline void bdk_spinlock_unlock(bdk_spinlock_t *lock) __attribute__ ((always_inline));
+static inline void bdk_spinlock_unlock(bdk_spinlock_t *lock)
+{
+ /* Implies a release */
+ asm volatile ("stlr %w[v], [%[b]]"
+ : "+m" (lock->s.serving)
+ : [v] "r" (lock->s.serving + 1), [b] "r" (&lock->s.serving)
+ : "memory");
+}
+
+/**
+ * Gets lock, spins until lock is taken
+ *
+ * @param lock pointer to lock structure
+ */
+static inline void bdk_spinlock_lock(bdk_spinlock_t *lock) __attribute__ ((always_inline));
+static inline void bdk_spinlock_lock(bdk_spinlock_t *lock)
+{
+ uint64_t combined;
+ uint32_t ticket;
+ uint32_t serving;
+
+ asm volatile (
+ "mov %x[serving], 1<<32 \n"
+ "ldadda %x[serving], %x[combined], [%[ptr]] \n"
+ "and %x[serving], %x[combined], 0xffffffff \n"
+ "lsr %x[ticket], %x[combined], 32 \n"
+ "cmp %x[ticket], %x[serving] \n"
+ "b.eq 1f \n"
+ "sevl \n"
+ "2: wfe \n"
+ "ldxr %w[serving], [%[ptr2]] \n"
+ "cmp %x[ticket], %x[serving] \n"
+ "b.ne 2b \n"
+ "1: \n"
+ : [serving] "=&r" (serving), [ticket] "=&r" (ticket), [combined] "=&r" (combined), "+m" (lock->combined)
+ : [ptr] "r" (&lock->combined), [ptr2] "r" (&lock->s.serving)
+ : "memory"
+ );
+}
+
+/**
+ * Trys to get the lock, failing if we can't get it immediately
+ *
+ * @param lock pointer to lock structure
+ */
+static inline int bdk_spinlock_trylock(bdk_spinlock_t *lock) __attribute__ ((always_inline));
+static inline int bdk_spinlock_trylock(bdk_spinlock_t *lock)
+{
+ uint64_t combined = *(volatile uint64_t *)&lock->combined;
+ uint32_t ticket = combined >> 32;
+ uint32_t serving = (uint32_t)combined;
+ if (ticket != serving)
+ return -1;
+ uint64_t new_combined = combined + (1ull << 32);
+ bool success = bdk_atomic_compare_and_store64(&lock->combined, combined, new_combined);
+ return success ? 0 : -1;
+}
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sso.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sso.h
new file mode 100644
index 0000000000..a04d5ca3cf
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sso.h
@@ -0,0 +1,69 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the SSO.
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Initialize the SSO
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_sso_init(bdk_node_t node);
+
+/**
+ * Register a bdk-if handle with the SSO code so the SSO can be used to receive
+ * traffic from it.
+ *
+ * @param handle Handle to register
+ */
+extern void bdk_sso_register_handle(bdk_if_handle_t handle);
+
+/**
+ * Function called during bdk_thread_yield() to process work while we're idle
+ */
+extern void bdk_sso_process_work(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-tns.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-tns.h
new file mode 100644
index 0000000000..89264c8604
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-tns.h
@@ -0,0 +1,109 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Thunder Network Switch interface.
+ *
+ * $Revision$
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Initialize the TNS block to enable clocks, allow register accesses, and
+ * perform some basic initialization in anticipation of future packet
+ * processing.
+ *
+ * TNS at power-up will be in BYPASS mode where packets from the vNIC pipes
+ * to the BGX ports will be direct, and this will not change that.
+ *
+ * This is normally called automatically in bdk-init-main.c.
+ *
+ * @param node Node to initialize
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_tns_initialize(bdk_node_t node) BDK_WEAK;
+
+/**
+ * Disable TNS from processing packets. After this, TNS must be fully
+ * initialized. The NIC and BGX blocks must already be stopped before
+ * calling this function.
+ *
+ * Nota Bene: In CN88XX_PASS_1 there is a bug that prevents TNS DataPath
+ * from draining packets. So ensure that NIC and BGX have
+ * also drained their packet queues.
+ *
+ * @param node
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_tns_shutdown(bdk_node_t node);
+
+/**
+ * Set the TNS 'profile' to passthru. I.e. do the necessary writes
+ * to the TNS datapath and TNS sst (Search, SDE, and TxQ) registers
+ * to configure the TNS to allow vNIC0..vNIC7 <-> LMAC0..LMAC7 traffic
+ * to flow straight through TNS (although the actual enabling of using
+ * the TNS is done elsewhere (in traffic-gen.))
+ *
+ * @param node Node to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_tns_profile_passthru(bdk_node_t node) BDK_WEAK;
+
+/**
+ * Set the TNS 'profile' to bgxloopback. I.e. do the necessary writes
+ * to the TNS datapath and TNS sst (Search, SDE, and TxQ) registers
+ * to configure the TNS to allow any packets received on LMAC0..LMAC7
+ * (BGX ports) to be reflected back to the same port after hitting the
+ * TNS (although the actual enabling of using the TNS is done elsewhere
+ * (in traffic-gen.))
+ *
+ * @param node Node to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_tns_profile_bgxloopback(bdk_node_t node) BDK_WEAK;
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h
new file mode 100644
index 0000000000..2840ca5c96
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h
@@ -0,0 +1,102 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the TWSI / I2C bus
+ *
+ * Note: Currently on 7 bit device addresses are supported
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Initialize the TWSI blocks. This just sets the clock rate.
+ * Many times stuff will work without calling this, but some
+ * TWSI devices will fail. This is normally called automatically
+ * in bdk-init-main.c.
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_twsix_initialize(bdk_node_t node) BDK_WEAK;
+
+/**
+ * Do a twsi read from a 7 bit device address using an (optional)
+ * internal address. Up to 4 bytes can be read at a time.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param twsi_id which TWSI bus to use
+ * @param dev_addr Device address (7 bit)
+ * @param internal_addr
+ * Internal address. Can be 0, 1 or 2 bytes in width
+ * @param num_bytes Number of data bytes to read (1-4)
+ * @param ia_width_bytes
+ * Internal address size in bytes (0, 1, or 2)
+ *
+ * @return Read data, or -1 on failure
+ */
+extern int64_t bdk_twsix_read_ia(bdk_node_t node, int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes);
+
+/**
+ * Write 1-8 bytes to a TWSI device using an internal address.
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param twsi_id which TWSI interface to use
+ * @param dev_addr TWSI device address (7 bit only)
+ * @param internal_addr
+ * TWSI internal address (0, 8, or 16 bits)
+ * @param num_bytes Number of bytes to write (1-8)
+ * @param ia_width_bytes
+ * internal address width, in bytes (0, 1, 2)
+ * @param data Data to write. Data is written MSB first on the twsi bus, and
+ * only the lower num_bytes bytes of the argument are valid. (If
+ * a 2 byte write is done, only the low 2 bytes of the argument is
+ * used.
+ *
+ * @return Zero on success, -1 on error
+ */
+extern int bdk_twsix_write_ia(bdk_node_t node, int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t data);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h
new file mode 100644
index 0000000000..6e78e5db14
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h
@@ -0,0 +1,109 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to USB3 or USB2.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+typedef enum
+{
+ BDK_USB_TEST_USB2_DISABLE,
+ BDK_USB_TEST_USB2_J_STATE,
+ BDK_USB_TEST_USB2_K_STATE,
+ BDK_USB_TEST_USB2_SE0_NAK,
+ BDK_USB_TEST_USB2_PACKET,
+ BDK_USB_TEST_USB2_FORCE_ENABLE,
+ BDK_USB_XHCI_INIT,
+ BDK_USB_XHCI_LIST_ADDRESSES,
+ BDK_USB_XHCI_POLL_STATUS,
+ BDK_USB_XHCI_TOGGLE_POLLING,
+ BDK_USB_TEST_USB2_LAST,
+} bdk_usb_test_t;
+
+typedef enum
+{
+ BDK_USB_CLOCK_SS_PAD_HS_PAD = 0x0, /* Superspeed and high speed use PAD clock */
+ BDK_USB_CLOCK_SS_REF0_HS_REF0 = 0x1, /* Superspeed and high speed use DLM/QLM ref clock 0 */
+ BDK_USB_CLOCK_SS_REF1_HS_REF1 = 0x2, /* Superspeed and high speed use DLM/QLM ref clock 1 */
+ BDK_USB_CLOCK_SS_PAD_HS_PLL = 0x3, /* Superspeed uses PAD clock, high speed uses PLL ref clock */
+ BDK_USB_CLOCK_SS_REF0_HS_PLL = 0x4, /* Superspeed uses DLM/QLM ref clock 0, high speed uses PLL ref clock */
+ BDK_USB_CLOCK_SS_REF1_HS_PLL = 0x5, /* Superspeed uses DLM/QLM ref clock 1, high speed uses PLL ref clock */
+} bdk_usb_clock_t;
+
+/**
+ * Initialize the clocks for USB such that it is ready for a generic XHCI driver
+ *
+ * @param node Node to init
+ * @param usb_port Port to intialize
+ * @param clock_type Type of clock connected to the usb port
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_usb_initialize(bdk_node_t node, int usb_port, bdk_usb_clock_t clock_type);
+
+/**
+ * Put the USB port into a specific testing mode
+ *
+ * @param node Node to use
+ * @param usb_port Port to use
+ * @param test_mode USB test mode
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_usb_test_mode(bdk_node_t node, int usb_port, bdk_usb_test_t test_mode);
+
+/**
+ * Convert a USB test enumeration into a string for display to the user
+ *
+ * @param node Node to use
+ * @param usb_port Port to use
+ * @param test_mode Mode to convert
+ *
+ * @return String name of test
+ */
+extern const char *bdk_usb_get_test_mode_string(bdk_node_t node, int usb_port, bdk_usb_test_t test_mode);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h
new file mode 100644
index 0000000000..c0ed43582e
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h
@@ -0,0 +1,206 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include "libbdk-arch/bdk-csrs-lmc.h"
+#include "libbdk-arch/bdk-csrs-rst.h"
+
+/**
+ * @file
+ * Small utility functions and macros to ease programming.
+ *
+ * <hr>$Revision: 38306 $<hr>
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/*
+ * The macros bdk_likely and bdk_unlikely use the
+ * __builtin_expect GCC operation to control branch
+ * probabilities for a conditional. For example, an "if"
+ * statement in the code that will almost always be
+ * executed should be written as "if (bdk_likely(...))".
+ * If the "else" section of an if statement is more
+ * probable, use "if (bdk_unlikey(...))".
+ */
+#define bdk_likely(x) __builtin_expect(!!(x), 1)
+#define bdk_unlikely(x) __builtin_expect(!!(x), 0)
+
+#define BDK_DISPLAY_PASS 1 /* Control the display of the detail chip pass info */
+#define BDK_CACHE_LINE_SIZE (128) // In bytes
+#define BDK_CACHE_LINE_MASK (BDK_CACHE_LINE_SIZE - 1) // In bytes
+#define BDK_CACHE_LINE_ALIGNED __attribute__ ((aligned (BDK_CACHE_LINE_SIZE)))
+
+/**
+ * Builds a bit mask given the required size in bits.
+ *
+ * @param bits Number of bits in the mask
+ * @return The mask
+ */
+static inline uint64_t bdk_build_mask(uint64_t bits)
+{
+ if (bits == 64)
+ return -1;
+ else
+ return ~((~0x0ull) << bits);
+}
+
+/**
+ * Extract bits out of a number
+ *
+ * @param input Number to extract from
+ * @param lsb Starting bit, least significant (0-63)
+ * @param width Width in bits (1-64)
+ *
+ * @return Extracted number
+ */
+static inline uint64_t bdk_extract(uint64_t input, int lsb, int width)
+{
+ uint64_t result = input >> lsb;
+ result &= bdk_build_mask(width);
+ return result;
+}
+
+/**
+ * Extract signed bits out of a number
+ *
+ * @param input Number to extract from
+ * @param lsb Starting bit, least significant (0-63)
+ * @param width Width in bits (1-64)
+ *
+ * @return Extracted number
+ */
+static inline int64_t bdk_extracts(uint64_t input, int lsb, int width)
+{
+ int64_t result = input >> lsb;
+ result <<= 64 - width;
+ result >>= 64 - width;
+ return result;
+}
+
+/**
+ * Extract a signed magnatude value. Signed magnatude is a value where the MSB
+ * is treated as a sign bit, not like the normal twos compliment
+ *
+ * @param v Value to extract from
+ * @param lsb LSB of number
+ * @param msb MSB, which is the signed bit
+ *
+ * @return Extracted number
+ */
+static inline int64_t bdk_extract_smag(uint64_t v, int lsb, int msb) __attribute__((always_inline));
+static inline int64_t bdk_extract_smag(uint64_t v, int lsb, int msb)
+{
+ int64_t r = bdk_extract(v, lsb, msb - lsb);
+ if (v & (1ull << msb))
+ r = -r;
+ return r;
+}
+
+/**
+ * Insert bits into a number
+ *
+ * @param original Original data, before insert
+ * @param input Data to insert
+ * @param lsb Starting bit, least significant (0-63)
+ * @param width Width in bits (1-64)
+ *
+ * @return Number with inserted bits
+ */
+static inline uint64_t bdk_insert(uint64_t original, uint64_t input, int lsb, int width) __attribute__((always_inline));
+static inline uint64_t bdk_insert(uint64_t original, uint64_t input, int lsb, int width)
+{
+ uint64_t mask = bdk_build_mask(width);
+ uint64_t result = original & ~(mask << lsb);
+ result |= (input & mask) << lsb;
+ return result;
+}
+
+/**
+ * Return the number of cores available in the chip
+ *
+ * @return
+ */
+static inline int bdk_get_num_cores(bdk_node_t node)
+{
+ uint64_t available = BDK_CSR_READ(node, BDK_RST_PP_AVAILABLE);
+ return bdk_dpop(available);
+}
+
+
+/**
+ * Return true if DRAM has been configured
+ *
+ * @return Boolean
+ */
+static inline int __bdk_is_dram_enabled(bdk_node_t node) __attribute__((always_inline));
+static inline int __bdk_is_dram_enabled(bdk_node_t node)
+{
+ BDK_CSR_INIT(lmcx_ddr_pll_ctl, node, BDK_LMCX_DDR_PLL_CTL(0));
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return lmcx_ddr_pll_ctl.cn83xx.reset_n;
+ else
+ return !lmcx_ddr_pll_ctl.cn9.pll_reset;
+}
+
+/**
+ * Zero a block of memory
+ *
+ * @param start
+ * @param length
+ */
+static inline void bdk_zero_memory(void *start, uint64_t length) __attribute__((always_inline));
+static inline void bdk_zero_memory(void *start, uint64_t length)
+{
+ if (((long)start & BDK_CACHE_LINE_MASK) || (length & BDK_CACHE_LINE_MASK))
+ {
+ /* Use slwo memset for unaligned memory */
+ memset(start, 0, length);
+ }
+ else
+ {
+ void *end = start + length;
+ while (start<end)
+ {
+ asm volatile ("dc zva,%0" : : "r"(start));
+ start += BDK_CACHE_LINE_SIZE;
+ }
+ }
+}
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h
new file mode 100644
index 0000000000..8e6ec38209
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h
@@ -0,0 +1,66 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to the VRM.
+ *
+ * @addtogroup hal
+ * @{
+ */
+
+/**
+ * Initialize the VRM
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_vrm_initialize(bdk_node_t node);
+
+/**
+ * The VRM may be reporting temperature or other issues with the system. Poll the
+ * VRM and handle any throttling or other actions needed.
+ *
+ * @param node Node to poll
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_vrm_poll(bdk_node_t node);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h
new file mode 100644
index 0000000000..3ca3f00710
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h
@@ -0,0 +1,259 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * bdk_device_t represents devices connected using ECAMs. This
+ * are discover by scanning the ECAMs and instantiating devices
+ * for what is found.
+ *
+ * The discovery process for a device is: Scan all ECAMs:
+ * 1) Device found on an ECAM that doesn't have a bdk_device_t
+ * 2) bdk_device_t created, put in
+ * BDK_DEVICE_STATE_NOT_PROBED state
+ * For all devices in state BDK_DEVICE_STATE_NOT_PROBED:
+ * 1) Lookup driver probe() function. If not found, skip
+ * 2) Call probe() 3) Based on probe(), transition to
+ * either BDK_DEVICE_STATE_PROBED or
+ * BDK_DEVICE_STATE_PROBE_FAIL
+ * For all devices in state BDK_DEVICE_STATE_PROBED:
+ * 1) Lookup driver init() function. If not found, skip
+ * 2) Call init() 3) Based on init(), transition to either
+ * BDK_DEVICE_STATE_READY or BDK_DEVICE_STATE_INIT_FAIL
+ * In general all devices should transition to
+ * BDK_DEVICE_STATE_PROBED before any init() functions are
+ * called. This can be used for synchronization. For example,
+ * the FPA should be functional after a probe() so PKI/PKO can
+ * succeed when calling alloc in init().
+ *
+ * @defgroup device ECAM Attached Devices
+ * @addtogroup device
+ * @{
+ */
+
+/**
+ * Possible states of a device
+ */
+typedef enum
+{
+ BDK_DEVICE_STATE_NOT_PROBED, /* Device is known and offline. We haven't probed it */
+ BDK_DEVICE_STATE_PROBE_FAIL, /* Device failed probing and is offline */
+ BDK_DEVICE_STATE_PROBED, /* Device succeeded probing, about to go online */
+ BDK_DEVICE_STATE_INIT_FAIL, /* Device init call failed, offline */
+ BDK_DEVICE_STATE_READY, /* Device init call success, online */
+} bdk_device_state_t;
+
+/**
+ * The structure of a ECAM BAR entry inside if a device
+ */
+typedef struct
+{
+ uint64_t address; /* Base physical address */
+ uint32_t size2; /* Size in bytes as 2^size */
+ uint32_t flags; /* Type flags for the BAR */
+} bdk_device_bar_t;
+
+/**
+ * Defines the BDK's representation of a ECAM connected device
+ */
+typedef struct
+{
+ char name[16]; /* Name of the device */
+ bdk_device_bar_t bar[4]; /* Device BARs, first for better alignment */
+ bdk_device_state_t state : 8; /* Current state of bdk_device_t */
+ bdk_node_t node : 3; /* Node the device is on */
+ uint8_t ecam : 5; /* ECAM for the device */
+ uint8_t bus; /* ECAM bus number (0-255) */
+ uint8_t dev : 5; /* ECAM device (0-31) */
+ uint8_t func : 3; /* ECAM deivce function (0-7) */
+ uint32_t id; /* ECAM device ID */
+ uint16_t instance; /* Cavium internal instance number */
+} bdk_device_t;
+#define BDK_NO_DEVICE_INSTANCE 0xffffu
+
+/**
+ * Defines the main entry points for a device driver. Full
+ * definition is in bdk-device.h
+ */
+struct bdk_driver_s;
+
+/**
+ * Called to register a new driver with the bdk-device system. Drivers are probed
+ * and initialized as device are found for them. If devices have already been
+ * added before the driver was registered, the driver will be probed and
+ * initialized before this function returns.
+ *
+ * @param driver Driver functions
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_device_add_driver(struct bdk_driver_s *driver);
+
+/**
+ * Called by the ECAM code whan a new device is detected in the system
+ *
+ * @param node Node the ECAM is on
+ * @param ecam ECAM the device is on
+ * @param bus Bus number for the device
+ * @param dev Device number
+ * @param func Function number
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_device_add(bdk_node_t node, int ecam, int bus, int dev, int func);
+
+/**
+ * Rename a device. Called by driver to give devices friendly names
+ *
+ * @param device Device to rename
+ * @param format Printf style format string
+ */
+extern void bdk_device_rename(bdk_device_t *device, const char *format, ...) __attribute__ ((format(printf, 2, 3)));
+
+/**
+ * Called by the ECAM code once all devices have been added
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_device_init(void);
+
+/**
+ * Lookup a device by ECAM ID and internal instance number. This can be used by
+ * one device to find a handle to an associated device. For example, PKI would
+ * use this function to get a handle to the FPA.
+ *
+ * @param node Node to lookup for
+ * @param id ECAM ID
+ * @param instance Cavium internal instance number
+ *
+ * @return Device pointer, or NULL if the device isn't found
+ */
+extern const bdk_device_t *bdk_device_lookup(bdk_node_t node, uint32_t id, int instance);
+
+/**
+ * Read from a device BAR
+ *
+ * @param device Device to read from
+ * @param bar Which BAR to read from (0-3)
+ * @param size Size of the read
+ * @param offset Offset into the BAR
+ *
+ * @return Value read
+ */
+extern uint64_t bdk_bar_read(const bdk_device_t *device, int bar, int size, uint64_t offset);
+
+/**
+ * Write to a device BAR
+ *
+ * @param device Device to write to
+ * @param bar Which BAR to read from (0-3)
+ * @param size Size of the write
+ * @param offset Offset into the BAR
+ * @param value Value to write
+ */
+extern void bdk_bar_write(const bdk_device_t *device, int bar, int size, uint64_t offset, uint64_t value);
+
+/**
+ * This macro makes it easy to define a variable of the correct
+ * type for a BAR.
+ */
+#define BDK_BAR_DEFINE(name, REG) typedef_##REG name
+
+/**
+ * This macro makes it easy to define a variable and initialize it
+ * with a BAR.
+ */
+#define BDK_BAR_INIT(name, device, REG) typedef_##REG name = {.u = bdk_bar_read(device, device_bar_##REG, sizeof(typedef_##REG), REG)}
+
+/**
+ * Macro to read a BAR
+ */
+#define BDK_BAR_READ(device, REG) bdk_bar_read(device, device_bar_##REG, sizeof(typedef_##REG), REG)
+
+/**
+ * Macro to write a BAR
+ */
+#define BDK_BAR_WRITE(device, REG, value) bdk_bar_write(device, device_bar_##REG, sizeof(typedef_##REG), REG, value)
+
+/**
+ * Macro to make a read, modify, and write sequence easy. The "code_block"
+ * should be replaced with a C code block or a comma separated list of
+ * "name.s.field = value", without the quotes.
+ */
+#define BDK_BAR_MODIFY(name, device, REG, code_block) do { \
+ uint64_t _tmp_address = REG; \
+ typedef_##REG name = {.u = bdk_bar_read(device, device_bar_##REG, sizeof(typedef_##REG), _tmp_address)}; \
+ code_block; \
+ bdk_bar_write(device, device_bar_##REG, sizeof(typedef_##REG), _tmp_address, name.u); \
+ } while (0)
+
+/**
+ * This macro spins on a field waiting for it to reach a value. It
+ * is common in code to need to wait for a specific field in a
+ * REG to match a specific value. Conceptually this macro
+ * expands to:
+ *
+ * 1) read REG
+ * 2) Check if ("type".s."field" "op" "value")
+ * 3) If #2 isn't true loop to #1 unless too much time has passed.
+ */
+#define BDK_BAR_WAIT_FOR_FIELD(device, REG, field, op, value, timeout_usec) \
+ ({int result; \
+ do { \
+ uint64_t done = bdk_clock_get_count(BDK_CLOCK_TIME) + (uint64_t)timeout_usec * \
+ bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / 1000000; \
+ typedef_##REG c; \
+ uint64_t _tmp_address = REG; \
+ while (1) \
+ { \
+ c.u = bdk_bar_read(device, device_bar_##REG, sizeof(typedef_##REG), _tmp_address); \
+ if ((c.s.field) op (value)) { \
+ result = 0; \
+ break; \
+ } else if (bdk_clock_get_count(BDK_CLOCK_TIME) > done) { \
+ result = -1; \
+ break; \
+ } else \
+ bdk_thread_yield(); \
+ } \
+ } while (0); \
+ result;})
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h b/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h
new file mode 100644
index 0000000000..834f8970f1
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h
@@ -0,0 +1,194 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Core initialization functions
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @defgroup thread Threading library
+ * @{
+ */
+
+/**
+ * Call this function to take secondary cores out of reset and have
+ * them start running threads
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param coremask Cores to start. Zero is a shortcut for all.
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int bdk_init_cores(bdk_node_t node, uint64_t coremask);
+
+/**
+ * Put cores back in reset and power them down
+ *
+ * @param node Node to update
+ * @param coremask Each bit will be a core put in reset. Cores already in reset are unaffected
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_reset_cores(bdk_node_t node, uint64_t coremask);
+
+/**
+ * Call this function to take secondary nodes and cores out of
+ * reset and have them start running threads
+ *
+ * @param skip_cores If non-zero, cores are not started. Only the nodes are setup
+ * @param ccpi_sw_gbaud
+ * If CCPI is in software mode, this is the speed the CCPI QLMs will be configured
+ * for
+ *
+ * @return Zero on success, negative on failure.
+ */
+extern int bdk_init_nodes(int skip_cores, int ccpi_sw_gbaud);
+
+/**
+ * Called very early in during init of both the master and slave. It performs one
+ * time init of CCPI QLM and link parameters. It must only be called once per
+ * boot.
+ *
+ * @param is_master Non-zero if the caller is the master node
+ */
+extern void __bdk_init_ccpi_early(int is_master);
+
+/**
+ * Brings the CCPI lanes and links into an operational state without perofrming
+ * node discovery and enumeration. After this function succeeds, CCPI lanes and
+ * links are ready for traffic, but node routing has not been setup.
+ *
+ * Note this function runs on the slave node with the BDK code not at its link
+ * address. Many normal BDK functions do not work properly. Be careful.
+ *
+ * @param is_master Non-zero when run on the master node. Zero when run on the slave
+ * @param gbaud Baud rate to run links at. This is only used if the QLMs are in software init
+ * mode. If they are strapped for hardware init, the strapping speed is used.
+ * @param ccpi_trace Non-zero to enable CCPI tracing. Note that tracing doesn't use the standard
+ * bdk-trace functions. This code runs on the secondary node before we are
+ * multi-node, and the C library doesn't work right.
+ *
+ * @return Zero on success, negative on failure. Zero means CCPI lanes and links are
+ * functional.
+ */
+extern int __bdk_init_ccpi_connection(int is_master, uint64_t gbaud, int ccpi_trace);
+
+/**
+ * Brings the CCPI lanes and links into an operational state without enabling
+ * multi-node operation. Calling this function when the CCPI links are already
+ * up does nothing. This function must return zero before you can go multi-node
+ * by calling bdk_init_ccpi_multinode().
+ *
+ * @param gbaud Baud rate to run links at. This is only used if the QLMs are in software init
+ * mode. If they are strapped for hardware init, the strapping speed is used.
+ *
+ * @return Zero on success, negative on failure. Zero means all CCPI links are functional.
+ */
+extern int __bdk_init_ccpi_links(uint64_t gbaud) BDK_WEAK;
+
+/**
+ * Once CCPI links are operational, this function transitions the system to a
+ * multi-node setup. Note that this function only performs the low level CCPI
+ * details, not BDK software setup on the other nodes. Call bdk_init_nodes()
+ * for high level access to multi-node.
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_init_ccpi_multinode(void) BDK_WEAK;
+
+/**
+ * This function is the first function run on all cores once the
+ * threading system takes over.
+ *
+ * @param arg
+ * @param arg1
+ */
+extern void __bdk_init_main(int arg, void *arg1);
+
+/**
+ * Perform one time initialization for a node. Called for each
+ * node from the master node.
+ */
+extern void __bdk_init_node(bdk_node_t node);
+
+/**
+ * Set the baud rate on a UART
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param uart uart to set
+ * @param baudrate Baud rate (9600, 19200, 115200, etc)
+ * @param use_flow_control
+ * Non zero if hardware flow control should be enabled
+ */
+extern void bdk_set_baudrate(bdk_node_t node, int uart, int baudrate, int use_flow_control);
+
+/**
+ * Get the coremask of the cores actively running the BDK. Doesn't count cores
+ * that aren't booted.
+ *
+ * @param node Node to coremask the count for
+ *
+ * @return 64bit bitmask
+ */
+extern uint64_t bdk_get_running_coremask(bdk_node_t node);
+
+/**
+ * Return the number of cores actively running in the BDK for the given node
+ *
+ * @param node Node to get the core count for
+ *
+ * @return Number of cores running. Doesn't count cores that aren't booted
+ */
+extern int bdk_get_num_running_cores(bdk_node_t node);
+
+#ifndef BDK_SHOW_BOOT_BANNERS
+#define BDK_SHOW_BOOT_BANNERS 1
+#endif
+
+#define BDK_UART_BAUDRATE 115200
+//#define BDK_UART_BAUDRATE 921600
+
+extern uint64_t __bdk_init_reg_x0; /* The contents of X0 when this image started */
+extern uint64_t __bdk_init_reg_x1; /* The contents of X1 when this image started */
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h b/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h
new file mode 100644
index 0000000000..ef62dd7fe3
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h
@@ -0,0 +1,122 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Functions for controlling threads.
+ *
+ * <hr>$Revision: 49448 $<hr>
+ *
+ * @defgroup thread Threading library
+ * @{
+ */
+
+/* Use a larger stack size for main() as it tends to do lots of
+ extra stuff. For example, DDR init requires a bigger stack */
+#define BDK_THREAD_MAIN_STACK_SIZE 16384
+#define BDK_THREAD_DEFAULT_STACK_SIZE 8192
+
+typedef void (*bdk_thread_func_t)(int arg, void *arg1);
+
+extern int bdk_thread_initialize(void);
+extern void bdk_thread_yield(void);
+extern int bdk_thread_create(bdk_node_t node, uint64_t coremask, bdk_thread_func_t func, int arg0, void *arg1, int stack_size);
+extern void bdk_thread_destroy(void) __attribute__ ((noreturn));
+extern void bdk_thread_first(bdk_thread_func_t func, int arg0, void *arg1, int stack_size) __attribute__ ((noreturn));
+
+/**
+ * Number of the Core on which the program is currently running.
+ *
+ * @return Number of cores
+ */
+static inline int bdk_get_core_num(void) __attribute__ ((always_inline));
+static inline int bdk_get_core_num(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ int mpidr_el1;
+ BDK_MRS(MPIDR_EL1, mpidr_el1);
+ /* Core is 4 bits from AFF0 and rest from AFF1 */
+ int core_num;
+ core_num = mpidr_el1 & 0xf;
+ core_num |= (mpidr_el1 & 0xff00) >> 4;
+ return core_num;
+ }
+ else
+ {
+ uint64_t cvm_pn_el1;
+ BDK_MRS(s3_0_c11_c4_2, cvm_pn_el1);
+ return cvm_pn_el1 & 0xffff;
+ }
+}
+
+/**
+ * Return a mask representing this core in a 64bit bitmask
+ *
+ * @return
+ */
+static inline uint64_t bdk_core_to_mask(void) __attribute__ ((always_inline));
+static inline uint64_t bdk_core_to_mask(void)
+{
+ return 1ull << bdk_get_core_num();
+}
+
+static inline int bdk_is_boot_core(void)
+{
+ const int master = 0x80000000 | (bdk_numa_master() << 16);
+ int mpidr_el1;
+ BDK_MRS_NV(MPIDR_EL1, mpidr_el1);
+ return mpidr_el1 == master;
+}
+
+
+static inline void *bdk_thread_get_id(void) __attribute__ ((always_inline));
+static inline void *bdk_thread_get_id(void)
+{
+ uint64_t current;
+ BDK_MRS_NV(TPIDR_EL3, current);
+ /* If we haven't started threading yet use the core number. Add one
+ so the thread id is never zero */
+ if (!current)
+ current = bdk_get_core_num() + 1;
+ return (void*)current;
+}
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-trust/bdk-signed.h b/src/vendorcode/cavium/include/bdk/libbdk-trust/bdk-signed.h
new file mode 100644
index 0000000000..601291c4bb
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-trust/bdk-signed.h
@@ -0,0 +1,94 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Utility functions handling signed nad possibly encrypted files
+ *
+ * @defgroup signed Signed File IO
+ * @{
+ */
+
+/**
+ * Enumeration representing the possible data types in a signed file
+ */
+typedef enum
+{
+ BDK_SIGNED_IMAGE, /* BDK code image */
+ BDK_SIGNED_DTS, /* Device tree file */
+ BDK_SIGNED_PUB_KEY, /* Chain of trust public key, BDK proprietary format */
+} bdk_signed_data_t;
+
+/**
+ * Flags to pass to bdk_signed functions
+ */
+typedef enum
+{
+ BDK_SIGNED_FLAG_NONE = 0, /* Good for most files. Verfies as needed for trusted boot */
+ BDK_SIGNED_FLAG_NOT_ENCRYPTED = 1 << 1, /* The file is not encrypted, even with trusted boot */
+ BDK_SIGNED_FLAG_ALLOW_UNSIGNED = 1 << 2,/* File is not signed, even with trusted boot */
+} bdk_signed_flags_t;
+
+/**
+ * Load a file and verify its signature. If the file is encrypted, it is
+ * decrypted. If the file is compressed, it is decompressed.
+ *
+ * @param filename File to load
+ * @param loc Offset into file for image. This is normally zero for normal files. Device
+ * files, such as /dev/mem, will use this to locate the image.
+ * @param data_type Type of data in the file, enumerated by bdk_signed_data_t. This is required
+ * so the code can determine the file size before loading the whole file.
+ * @param flags Flags for controlling file loading
+ * @param filesize Set the size of the file if the file is loaded properly. If load fails, set to
+ * zero.
+ *
+ * @return Pointer to the data from the file, or NULL on failure
+ */
+extern void *bdk_signed_load(const char *filename, uint64_t loc,
+ bdk_signed_data_t data_type, bdk_signed_flags_t flags, uint64_t *filesize);
+
+/**
+ * Load the BDK's public signing key, which is signed by the Root of Trust
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_signed_load_public(void);
+
+/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-trust/bdk-trust.h b/src/vendorcode/cavium/include/bdk/libbdk-trust/bdk-trust.h
new file mode 100644
index 0000000000..693bed042e
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-trust/bdk-trust.h
@@ -0,0 +1,136 @@
+#ifndef __BDK_TRUST_H__
+#define __BDK_TRUST_H__
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Master include file for trusted boot support. Use bdk.h instead
+ * of including this file directly.
+ *
+ * @defgroup trust Trusted boot support
+ */
+
+#include "bdk-signed.h"
+
+typedef enum
+{
+ BDK_TRUST_LEVEL_BROKEN, /* Trust is unknown or was broken during boot. Fatal error state */
+ BDK_TRUST_LEVEL_NONE, /* Untrusted boot */
+ BDK_TRUST_LEVEL_SIGNED, /* Trusted boot verified by ROTPK */
+ BDK_TRUST_LEVEL_SIGNED_SSK, /* Trusted boot with SSK encryption */
+ BDK_TRUST_LEVEL_SIGNED_BSSK,/* Trusted boot with BSSK encryption */
+}
+bdk_trust_level_t;
+
+typedef struct
+{
+ uint64_t total_length;
+ uint32_t s[8];
+} bdk_sha256_state_t;
+
+/**
+ * Start a new SHA256
+ *
+ * @param hash_state Hash state to initialize
+ */
+extern void bdk_sha256_init(bdk_sha256_state_t *hash_state);
+
+/**
+ * Update SHA256 for a data block
+ *
+ * @param hash_state Hash state
+ * @param data Data to hash
+ * @param size Size of the data in bytes
+ */
+extern void bdk_sha256_update(bdk_sha256_state_t *hash_state, const void *data, int size);
+
+/**
+ * Finish a SHA256
+ *
+ * @param hash_state Hash state
+ *
+ * @return Pointer to the 64 byte SHA256
+ */
+extern void *bdk_sha256_finish(bdk_sha256_state_t *hash_state);
+
+/**
+ * Perform AES128 encryption with CBC
+ *
+ * @param key Key to use for encryption. Should be a pointer to key memory.
+ * @param data Data to encrypt
+ * @param size Size of the data in bytes. Must be a multiple of 16
+ * @param iv Initial vector. Set to 16 zero bytes for start, then use to chain multiple
+ * calls.
+ */
+extern void bdk_aes128cbc_encrypt(const void *key, void *data, int size, void *iv);
+
+/**
+ * Perform AES128 decryption with CBC
+ *
+ * @param key Key to use for decryption. Should be a pointer to key memory.
+ * @param data Data to decrypt
+ * @param size Size of the data in bytes. Must be a multiple of 16
+ * @param iv Initial vector. Set to 16 zero bytes for start, then use to chain multiple
+ * calls.
+ */
+extern void bdk_aes128cbc_decrypt(const void *key, void *data, int size, void *iv);
+
+/**
+ * Called by boot stub (TBL1FW) to initialize the state of trust
+ */
+extern void __bdk_trust_init(void);
+
+/**
+ * Returns the current level of trust. Must be called after
+ * __bdk_trust_init()
+ *
+ * @return Enumerated trsut level, see bdk_trust_level_t
+ */
+extern bdk_trust_level_t bdk_trust_get_level(void);
+
+/**
+ * Return the current secure NV counter stored in the fuses
+ *
+ * @return NV counter (0-31)
+ */
+extern int bdk_trust_get_nv_counter(void);
+
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libdram/libdram-config.h b/src/vendorcode/cavium/include/bdk/libdram/libdram-config.h
new file mode 100644
index 0000000000..9415d09f4e
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libdram/libdram-config.h
@@ -0,0 +1,262 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#ifndef __LIBDRAM_CONFIG_H__
+#define __LIBDRAM_CONFIG_H__
+
+#define DDR_CFG_T_MAX_DIMMS 2 /* ThunderX supports a max of two DIMMs per LMC */
+
+/* Structure that provides DIMM information, either in the form of an SPD TWSI
+ address, or a pointer to an array that contains SPD data. One of the two
+ fields must be valid. Note that these fields historically were dimension 2, left
+ over from CN38XX/CN58XX. These chips supported a 128 bit wide LMC, requiring
+ two DIMMs. All other chips use a 64bit wide LMC with multiple LMCs. All
+ Thunder chips use one DIMM for 64bits, so we no longer use an index */
+typedef struct {
+ uint16_t spd_addr; /* TWSI address of SPD, 0 if not used */
+ const uint8_t *spd_ptr; /* pointer to SPD data array, NULL if not used */
+} dimm_config_t;
+
+typedef struct {
+ uint8_t odt_ena; /* FIX: dqx_ctl for Octeon 3 DDR4 */
+ uint64_t odt_mask; /* FIX: wodt_mask for Octeon 3 */
+ bdk_lmcx_modereg_params1_t odt_mask1;
+ bdk_lmcx_modereg_params2_t odt_mask2; /* FIX: needed for DDR4 */
+ uint8_t qs_dic; /* FIX: rodt_ctl for Octeon 3 */
+ uint64_t rodt_ctl; /* FIX: rodt_mask for Octeon 3 */
+} dimm_odt_config_t;
+
+/*
+ The parameters below make up the custom_lmc_config data structure.
+ This structure is used to customize the way that the LMC DRAM
+ Controller is configured for a particular board design.
+
+ The HRM describes LMC Read Leveling which supports automatic
+ selection of per byte-lane delays. When measuring the read delays
+ the LMC configuration software sweeps through a range of settings
+ for LMC0_COMP_CTL2[RODT_CTL], the Octeon II on-die-termination
+ resistance and LMC0_MODEREG_PARAMS1[RTT_NOM_XX], the DRAM
+ on-die-termination resistance. The minimum and maximum parameters
+ for rtt_nom_idx and rodt_ctl listed below determine the ranges of
+ ODT settings used for the measurements. Note that for rtt_nom an
+ index is used into a sorted table rather than the direct csr setting
+ in order to optimize the sweep.
+
+ .min_rtt_nom_idx: 1=120ohms, 2=60ohms, 3=40ohms, 4=30ohms, 5=20ohms
+ .max_rtt_nom_idx: 1=120ohms, 2=60ohms, 3=40ohms, 4=30ohms, 5=20ohms
+ .min_rodt_ctl: 1=20ohms, 2=30ohms, 3=40ohms, 4=60ohms, 5=120ohms
+ .max_rodt_ctl: 1=20ohms, 2=30ohms, 3=40ohms, 4=60ohms, 5=120ohms
+
+ The settings below control the Octeon II drive strength for the CK,
+ ADD/CMD, and DQ/DQS signals. 1=24ohms, 2=26.67ohms, 3=30ohms,
+ 4=34.3ohms, 5=40ohms, 6=48ohms, 6=60ohms.
+
+ .dqx_ctl: Drive strength control for DDR_DQX/DDR_DQS_X_P/N drivers.
+ .ck_ctl: Drive strength control for DDR_CK_X_P/DDR_DIMMX_CSX_L/DDR_DIMMX_ODT_X drivers.
+ .cmd_ctl: Drive strength control for CMD/A/RESET_L/CKEX drivers.
+
+ The LMC controller software selects the most optimal CAS Latency
+ that complies with the appropriate SPD values and the frequency
+ that the DRAMS are being operated. When operating the DRAMs at
+ frequencies substantially lower than their rated frequencies it
+ might be necessary to limit the minimum CAS Latency the LMC
+ controller software is allowed to select in order to make the DRAM
+ work reliably.
+
+ .min_cas_latency: Minimum allowed CAS Latency
+
+
+ The value used for LMC0_RLEVEL_CTL[OFFSET_EN] determine how the
+ read-leveling information that the Octeon II gathers is interpreted
+ to determine the per-byte read delays.
+
+ .offset_en: Value used for LMC0_RLEVEL_CTL[OFFSET_EN].
+ .offset_udimm: Value used for LMC0_RLEVEL_CTL[OFFSET] for UDIMMS.
+ .offset_rdimm: Value used for LMC0_RLEVEL_CTL[OFFSET] for RDIMMS.
+
+
+ The LMC configuration software sweeps through a range of ODT
+ settings while measuring the per-byte read delays. During those
+ measurements the software makes an assessment of the quality of the
+ measurements in order to determine which measurements provide the
+ most accurate delays. The automatic settings provide the option to
+ allow that same assessment to determine the most optimal RODT_CTL
+ and/or RTT_NOM settings.
+
+ The automatic approach might provide the best means to determine
+ the settings used for initial poweron of a new design. However,
+ the final settings should be determined by board analysis, testing,
+ and experience.
+
+ .ddr_rtt_nom_auto: 1 means automatically set RTT_NOM value.
+ .ddr_rodt_ctl_auto: 1 means automatically set RODT_CTL value.
+
+ .rlevel_compute: Enables software interpretation of per-byte read
+ delays using the measurements collected by the
+ Octeon II rather than completely relying on the
+ Octeon II to determine the delays. 1=software
+ computation is recommended since a more complete
+ analysis is implemented in software.
+
+ .rlevel_comp_offset: Set to 2 unless instructed differently by Cavium.
+
+ .rlevel_average_loops: Determines the number of times the read-leveling
+ sequence is run for each rank. The results is
+ then averaged across the number of loops. The
+ default setting is 1.
+
+ .ddr2t_udimm:
+ .ddr2t_rdimm: Turn on the DDR 2T mode. 2-cycle window for CMD and
+ address. This mode helps relieve setup time pressure
+ on the address and command bus. Please refer to
+ Micron's tech note tn_47_01 titled DDR2-533 Memory
+ Design Guide for Two Dimm Unbuffered Systems for
+ physical details.
+
+ .disable_sequential_delay_check: As result of the flyby topology
+ prescribed in the JEDEC specifications the byte delays should
+ maintain a consistent increasing or decreasing trend across
+ the bytes on standard dimms. This setting can be used disable
+ that check for unusual circumstances where the check is not
+ useful.
+
+ .maximum_adjacent_rlevel_delay_increment: An additional sequential
+ delay check for the delays that result from the flyby
+ topology. This value specifies the maximum difference between
+ the delays of adjacent bytes. A value of 0 disables this
+ check.
+
+ .fprch2 Front Porch Enable: When set, the turn-off
+ time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
+ 00 = 0 CKs
+ 01 = 1 CKs
+ 10 = 2 CKs
+
+ .parity: The parity input signal PAR_IN on each dimm must be
+ strapped high or low on the board. This bit is programmed
+ into LMC0_DIMM_CTL[PARITY] and it must be set to match the
+ board strapping. This signal is typically strapped low.
+
+ .mode32b: Enable 32-bit datapath mode. Set to 1 if only 32 DQ pins
+ are used. (cn61xx, cn71xx)
+
+ .dll_write_offset: FIXME: Add description
+ .dll_read_offset: FIXME: Add description
+ */
+
+
+typedef struct {
+ const char *part;
+ int speed;
+ uint64_t rlevel_rank[4][4];
+} rlevel_table_t;
+
+typedef struct {
+ uint8_t min_rtt_nom_idx;
+ uint8_t max_rtt_nom_idx;
+ uint8_t min_rodt_ctl;
+ uint8_t max_rodt_ctl;
+ //uint8_t dqx_ctl;
+ uint8_t ck_ctl;
+ uint8_t cmd_ctl;
+ uint8_t ctl_ctl;
+ uint8_t min_cas_latency;
+ uint8_t offset_en;
+ uint8_t offset_udimm;
+ uint8_t offset_rdimm;
+ uint8_t rlevel_compute;
+ uint8_t ddr_rtt_nom_auto;
+ uint8_t ddr_rodt_ctl_auto;
+ uint8_t rlevel_comp_offset_udimm;
+ uint8_t rlevel_comp_offset_rdimm;
+ uint8_t rlevel_average_loops;
+ uint8_t ddr2t_udimm;
+ uint8_t ddr2t_rdimm;
+ uint8_t disable_sequential_delay_check;
+ uint8_t maximum_adjacent_rlevel_delay_increment;
+ uint8_t parity;
+ uint8_t fprch2;
+ uint8_t mode32b;
+ uint8_t measured_vref;
+ const int8_t *dll_write_offset; /* Indexed by byte number (0-8, includes ecc byte) */
+ const int8_t *dll_read_offset; /* Indexed by byte number (0-8, includes ecc byte) */
+ const rlevel_table_t *rlevel_table; /* Only used if ENABLE_CUSTOM_RLEVEL_TABLE. List of DIMMs to check */
+} ddr3_custom_config_t;
+
+typedef struct {
+ dimm_config_t dimm_config_table[DDR_CFG_T_MAX_DIMMS]; /* Indexed by DIMM */
+ dimm_odt_config_t odt_1rank_config[DDR_CFG_T_MAX_DIMMS]; /* Indexed by number of DIMMs minus 1 */
+ dimm_odt_config_t odt_2rank_config[DDR_CFG_T_MAX_DIMMS]; /* Indexed by number of DIMMs minus 1 */
+ dimm_odt_config_t odt_4rank_config[DDR_CFG_T_MAX_DIMMS]; /* Indexed by number of DIMMs minus 1 */
+ ddr3_custom_config_t custom_lmc_config;
+} ddr_configuration_t;
+
+typedef struct {
+ const char *name;
+ ddr_configuration_t config[4]; /* Indexed by LMC */
+ int ddr_clock_hertz;
+} dram_config_t;
+
+extern int libdram_config(int node, const dram_config_t *dram_config, int ddr_clock_override);
+extern int libdram_tune(int node);
+//extern int libdram_margin_write_voltage(int node);
+//extern int libdram_margin_read_voltage(int node);
+//extern int libdram_margin_read_timing(int node);
+//extern int libdram_margin_write_timing(int node);
+extern int libdram_margin(int node);
+extern uint32_t libdram_get_freq(int node);
+extern uint32_t libdram_get_freq_from_pll(int node, int lmc);
+
+/**
+ * Load a DRAM configuration based on the current bdk-config settings
+ *
+ * @param node Node the DRAM config is for
+ *
+ * @return Pointer to __libdram_global_cfg, a global structure. Returns NULL if bdk-config
+ * lacks information about DRAM.
+ */
+extern const dram_config_t* libdram_config_load(bdk_node_t node);
+
+/* The various DRAM configs in the libdram/configs directory need space
+ to store the DRAM config. Since only one config is ever in active use
+ at a time, store the configs in __libdram_global_cfg. In a multi-node
+ setup, independent calls to get the DRAM config will load first node 0's
+ config, then node 1's */
+extern dram_config_t __libdram_global_cfg;
+
+#endif /* __LIBDRAM_CONFIG_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libdram/libdram.h b/src/vendorcode/cavium/include/bdk/libdram/libdram.h
new file mode 100644
index 0000000000..ff6fec69c8
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libdram/libdram.h
@@ -0,0 +1,51 @@
+#ifndef __LIBDRAM_H__
+#define __LIBDRAM_H__
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * This file defines the external interface for libdram, the
+ * library supporting DRAM initialization on Cavium's THUNDERX
+ * line of chips. This is the only header file in the libdram
+ * directory that applications should include.
+ */
+
+#include "libdram-config.h"
+
+#endif /* __LIBDRAM_H__ */