aboutsummaryrefslogtreecommitdiff
path: root/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h')
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h20807
1 files changed, 20807 insertions, 0 deletions
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h
new file mode 100644
index 0000000000..d1b23fccff
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h
@@ -0,0 +1,20807 @@
+#ifndef __BDK_CSRS_GSERN_H__
+#define __BDK_CSRS_GSERN_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium GSERN.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration gsern_bar_e
+ *
+ * GSER Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_GSERN_BAR_E_GSERNX_PF_BAR0(a) (0x87e090000000ll + 0x1000000ll * (a))
+#define BDK_GSERN_BAR_E_GSERNX_PF_BAR0_SIZE 0x100000ull
+
+/**
+ * Enumeration gsern_psb_acc_e
+ *
+ * GSERN Power Serial Bus Accumulator Enumeration
+ * Enumerates the GSERN accumulators for LMC slaves, which correspond to index {b} of
+ * PSBS_SYS()_ACCUM().
+ */
+#define BDK_GSERN_PSB_ACC_E_TBD0 (0)
+#define BDK_GSERN_PSB_ACC_E_TBD1 (1)
+#define BDK_GSERN_PSB_ACC_E_TBD2 (2)
+#define BDK_GSERN_PSB_ACC_E_TBD3 (3)
+
+/**
+ * Enumeration gsern_psb_event_e
+ *
+ * GSERN Power Serial Bus Event Enumeration
+ * Enumerates the event numbers for GSERN slaves, which correspond to index {b} of
+ * PSBS_SYS()_EVENT()_CFG.
+ */
+#define BDK_GSERN_PSB_EVENT_E_TBD0 (0)
+
+/**
+ * Register (RSL) gsern#_common_bias_bcfg
+ *
+ * GSER Common Bias Base Configuration Register
+ */
+union bdk_gsernx_common_bias_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_bias_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t dac1 : 4; /**< [ 35: 32](R/W) Ir25 reference current trim. Default setting (0x8) selects 0% trim. Minimum and
+ Maximum settings allow for up to + or - 12.5% trim. For debug use only. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t dac0 : 4; /**< [ 27: 24](R/W) Ic25 reference current trim. Default setting (0x8) selects 0% trim. Minimum and
+ Maximum settings allow for up to + or - 12.5% trim. For debug use only. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t bias : 2; /**< [ 17: 16](R/W) Opamp bias current setting. For debug use only.
+ 0x0 = 33 uA.
+ 0x1 = 25 uA.
+ 0x2 = 20 uA.
+ 0x3 = 17 uA. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t bypass : 1; /**< [ 8: 8](R/W) Assert to bypass the bandgap reference and use a resistive divider from VDDA
+ instead. For diagnostic use only. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t bias_pwdn : 1; /**< [ 0: 0](R/W) Bias current power down control. */
+#else /* Word 0 - Little Endian */
+ uint64_t bias_pwdn : 1; /**< [ 0: 0](R/W) Bias current power down control. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t bypass : 1; /**< [ 8: 8](R/W) Assert to bypass the bandgap reference and use a resistive divider from VDDA
+ instead. For diagnostic use only. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t bias : 2; /**< [ 17: 16](R/W) Opamp bias current setting. For debug use only.
+ 0x0 = 33 uA.
+ 0x1 = 25 uA.
+ 0x2 = 20 uA.
+ 0x3 = 17 uA. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t dac0 : 4; /**< [ 27: 24](R/W) Ic25 reference current trim. Default setting (0x8) selects 0% trim. Minimum and
+ Maximum settings allow for up to + or - 12.5% trim. For debug use only. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t dac1 : 4; /**< [ 35: 32](R/W) Ir25 reference current trim. Default setting (0x8) selects 0% trim. Minimum and
+ Maximum settings allow for up to + or - 12.5% trim. For debug use only. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_bias_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_bias_bcfg bdk_gsernx_common_bias_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_BIAS_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_BIAS_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0330ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_BIAS_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_BIAS_BCFG(a) bdk_gsernx_common_bias_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_BIAS_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_BIAS_BCFG(a) "GSERNX_COMMON_BIAS_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_BIAS_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_BIAS_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_BIAS_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_const
+ *
+ * GSER Common Constants Register
+ */
+union bdk_gsernx_common_const
+{
+ uint64_t u;
+ struct bdk_gsernx_common_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_const_s cn; */
+};
+typedef union bdk_gsernx_common_const bdk_gsernx_common_const_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0088ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_CONST(a) bdk_gsernx_common_const_t
+#define bustype_BDK_GSERNX_COMMON_CONST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_CONST(a) "GSERNX_COMMON_CONST"
+#define device_bar_BDK_GSERNX_COMMON_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_CONST(a) (a)
+#define arguments_BDK_GSERNX_COMMON_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_const1
+ *
+ * GSER Common Constants Register 1
+ */
+union bdk_gsernx_common_const1
+{
+ uint64_t u;
+ struct bdk_gsernx_common_const1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t number_lanes : 4; /**< [ 3: 0](RO/H) The number of lanes in this module, e.g., 4 for a QLM or 2 for a DLM.
+ Internal:
+ FIXME reset value 4 (done). Add reset_matches_size (not done). Note: for dlm
+ tieoffs will set reset value to 2. */
+#else /* Word 0 - Little Endian */
+ uint64_t number_lanes : 4; /**< [ 3: 0](RO/H) The number of lanes in this module, e.g., 4 for a QLM or 2 for a DLM.
+ Internal:
+ FIXME reset value 4 (done). Add reset_matches_size (not done). Note: for dlm
+ tieoffs will set reset value to 2. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_const1_s cn; */
+};
+typedef union bdk_gsernx_common_const1 bdk_gsernx_common_const1_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_CONST1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_CONST1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0110ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_CONST1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_CONST1(a) bdk_gsernx_common_const1_t
+#define bustype_BDK_GSERNX_COMMON_CONST1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_CONST1(a) "GSERNX_COMMON_CONST1"
+#define device_bar_BDK_GSERNX_COMMON_CONST1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_CONST1(a) (a)
+#define arguments_BDK_GSERNX_COMMON_CONST1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_eco
+ *
+ * INTERNAL: GSER Common ECO Register
+ */
+union bdk_gsernx_common_eco
+{
+ uint64_t u;
+ struct bdk_gsernx_common_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t eco_rw : 62; /**< [ 63: 2](R/W) Internal:
+ Reserved for ECO use. */
+ uint64_t eco_rw_pll : 2; /**< [ 1: 0](R/W) Internal:
+ Pre-connected to the PLL. Reserved for ECO use. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw_pll : 2; /**< [ 1: 0](R/W) Internal:
+ Pre-connected to the PLL. Reserved for ECO use. */
+ uint64_t eco_rw : 62; /**< [ 63: 2](R/W) Internal:
+ Reserved for ECO use. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_eco_s cn; */
+};
+typedef union bdk_gsernx_common_eco bdk_gsernx_common_eco_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_ECO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_ECO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0770ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_ECO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_ECO(a) bdk_gsernx_common_eco_t
+#define bustype_BDK_GSERNX_COMMON_ECO(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_ECO(a) "GSERNX_COMMON_ECO"
+#define device_bar_BDK_GSERNX_COMMON_ECO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_ECO(a) (a)
+#define arguments_BDK_GSERNX_COMMON_ECO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_init_bsts
+ *
+ * GSER Common Initialization Base-level Status Register
+ */
+union bdk_gsernx_common_init_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_common_init_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t pll_cp_cal : 4; /**< [ 19: 16](RO/H) PLL calibration state machine's resulting charge pump setting. Only
+ valid if [CAL_READY] is set. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pll_band_cal : 5; /**< [ 12: 8](RO/H) PLL calibration state machine's resulting VCO band setting. Only valid
+ if [CAL_READY] is set. */
+ uint64_t reserved_7 : 1;
+ uint64_t deep_idle : 1; /**< [ 6: 6](RO/H) PLL reset state machine state is deep idle. */
+ uint64_t rst_sm_complete : 1; /**< [ 5: 5](RO/H) PLL reset state machine has completed. If
+ [RST_SM_COMPLETE] is set and [RST_SM_READY] is not, there may still
+ be CSR register settings preventing the PLL from being ready
+ for use, e.g., power-down or reset overrides. */
+ uint64_t rst_sm_ready : 1; /**< [ 4: 4](RO/H) PLL reset state machine status indicating that the reset
+ sequence has completed and this PLL is ready for use. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) PLL lock status; only valid if [LOCK_READY] is set. */
+ uint64_t lock_ready : 1; /**< [ 2: 2](RO/H) PLL lock status check is complete following most recent PLL
+ reset or assertion of GSERN()_COMMON_RST_BCFG[LOCK_CHECK]. */
+ uint64_t cal_fail : 1; /**< [ 1: 1](RO/H) PLL calibration failed; valid only if [CAL_READY] is set. */
+ uint64_t cal_ready : 1; /**< [ 0: 0](RO/H) PLL calibration completed. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_ready : 1; /**< [ 0: 0](RO/H) PLL calibration completed. */
+ uint64_t cal_fail : 1; /**< [ 1: 1](RO/H) PLL calibration failed; valid only if [CAL_READY] is set. */
+ uint64_t lock_ready : 1; /**< [ 2: 2](RO/H) PLL lock status check is complete following most recent PLL
+ reset or assertion of GSERN()_COMMON_RST_BCFG[LOCK_CHECK]. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) PLL lock status; only valid if [LOCK_READY] is set. */
+ uint64_t rst_sm_ready : 1; /**< [ 4: 4](RO/H) PLL reset state machine status indicating that the reset
+ sequence has completed and this PLL is ready for use. */
+ uint64_t rst_sm_complete : 1; /**< [ 5: 5](RO/H) PLL reset state machine has completed. If
+ [RST_SM_COMPLETE] is set and [RST_SM_READY] is not, there may still
+ be CSR register settings preventing the PLL from being ready
+ for use, e.g., power-down or reset overrides. */
+ uint64_t deep_idle : 1; /**< [ 6: 6](RO/H) PLL reset state machine state is deep idle. */
+ uint64_t reserved_7 : 1;
+ uint64_t pll_band_cal : 5; /**< [ 12: 8](RO/H) PLL calibration state machine's resulting VCO band setting. Only valid
+ if [CAL_READY] is set. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pll_cp_cal : 4; /**< [ 19: 16](RO/H) PLL calibration state machine's resulting charge pump setting. Only
+ valid if [CAL_READY] is set. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_init_bsts_s cn; */
+};
+typedef union bdk_gsernx_common_init_bsts bdk_gsernx_common_init_bsts_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_INIT_BSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_INIT_BSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f05d8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_INIT_BSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_INIT_BSTS(a) bdk_gsernx_common_init_bsts_t
+#define bustype_BDK_GSERNX_COMMON_INIT_BSTS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_INIT_BSTS(a) "GSERNX_COMMON_INIT_BSTS"
+#define device_bar_BDK_GSERNX_COMMON_INIT_BSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_INIT_BSTS(a) (a)
+#define arguments_BDK_GSERNX_COMMON_INIT_BSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_pll_1_bcfg
+ *
+ * GSER Common PLL Base Configuration Register 1
+ */
+union bdk_gsernx_common_pll_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_pll_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t reserved_36_37 : 2;
+ uint64_t post_div : 9; /**< [ 35: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV]. Divider range is
+ between 8 - 511. If a number less than 8 is selected it will be added to
+ the minimum value of 8. For example, if 2 is specified the value will be
+ interpreted to be 10. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion (divide by 2^18 to find fraction, e.g., 2621 is
+ ~10,000 ppm). */
+#else /* Word 0 - Little Endian */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion (divide by 2^18 to find fraction, e.g., 2621 is
+ ~10,000 ppm). */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t post_div : 9; /**< [ 35: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV]. Divider range is
+ between 8 - 511. If a number less than 8 is selected it will be added to
+ the minimum value of 8. For example, if 2 is specified the value will be
+ interpreted to be 10. */
+ uint64_t reserved_36_37 : 2;
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_pll_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_pll_1_bcfg bdk_gsernx_common_pll_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_PLL_1_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_PLL_1_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0220ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_PLL_1_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_PLL_1_BCFG(a) bdk_gsernx_common_pll_1_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_PLL_1_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_PLL_1_BCFG(a) "GSERNX_COMMON_PLL_1_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_PLL_1_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_PLL_1_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_PLL_1_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_pll_2_bcfg
+ *
+ * GSER Common PLL Base Configuration Register 2
+ */
+union bdk_gsernx_common_pll_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_pll_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t mio_refclk_en : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Enable sending the common PLL reference clock to the counter in MIO. */
+ uint64_t lock_check_cnt_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [LOCK_CHECK_CNT_OVRD]. */
+ uint64_t lock_check_cnt_ovrd : 15; /**< [ 54: 40](R/W) Lock check counter override value. This counter is used to wait for PLL lock to
+ be valid. It counts every reference clock cycle and once its done asserts
+ GSERN()_COMMON_INIT_BSTS[LOCK_READY]. For common PLL, the reference clock is the
+ input from the pad. For lane PLLs, the reference clock is the output of the
+ common PLL. To use value assert GSERN()_LANE()_RST1_BCFG[LOCK_CHECK] or trigger
+ a PLL reset sequence. */
+ uint64_t reserved_34_39 : 6;
+ uint64_t vcm_sel : 1; /**< [ 33: 33](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t cp_boost : 1; /**< [ 32: 32](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t ssc_sata_mode : 2; /**< [ 31: 30](R/W) PLL SATA spread spectrum control.
+ 0x0 = Down spreading. PPM triangle wave total peak-to-peak spread subtracted from
+ nominal frequency.
+ 0x1 = Up spreading. PPM triangle wave total peak-to-peak spread added to nominal
+ frequency.
+ 0x2 = Center spreading. PPM triangle wave total peak-to-peak spread centered at nominal
+ frequency.
+ 0x3 = Square wave subtracted from nominal frequency. */
+ uint64_t ssc_ppm : 2; /**< [ 29: 28](R/W) Spread-spectrum clocking total peak-to-peak spread.
+ 0x0 = 5000 PPM.
+ 0x1 = 3000 PPM.
+ 0x2 = 2500 PPM.
+ 0x3 = 1000 PPM. */
+ uint64_t pnr_refclk_en : 1; /**< [ 27: 27](R/W) Enable PLL reference clock to internal logic. */
+ uint64_t ssc_en : 1; /**< [ 26: 26](R/W) Spread-spectrum clocking enable. */
+ uint64_t ref_clk_bypass : 1; /**< [ 25: 25](R/W) Bypass reference clock to the PLL output. */
+ uint64_t pfd_offset : 1; /**< [ 24: 24](R/W) PLL PFD offset enable. */
+ uint64_t opamp : 4; /**< [ 23: 20](R/W) PLL loop filter op-amp configuration. */
+ uint64_t res : 4; /**< [ 19: 16](R/W) PLL loop filter configuration. */
+ uint64_t reserved_15 : 1;
+ uint64_t vco_bias : 3; /**< [ 14: 12](R/W) VCO bias control. */
+ uint64_t cal_dac_low : 4; /**< [ 11: 8](R/W) PLL calibration DAC low control. */
+ uint64_t cal_dac_mid : 4; /**< [ 7: 4](R/W) PLL calibration DAC middle control. */
+ uint64_t cal_dac_high : 4; /**< [ 3: 0](R/W) PLL calibration DAC high control. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_dac_high : 4; /**< [ 3: 0](R/W) PLL calibration DAC high control. */
+ uint64_t cal_dac_mid : 4; /**< [ 7: 4](R/W) PLL calibration DAC middle control. */
+ uint64_t cal_dac_low : 4; /**< [ 11: 8](R/W) PLL calibration DAC low control. */
+ uint64_t vco_bias : 3; /**< [ 14: 12](R/W) VCO bias control. */
+ uint64_t reserved_15 : 1;
+ uint64_t res : 4; /**< [ 19: 16](R/W) PLL loop filter configuration. */
+ uint64_t opamp : 4; /**< [ 23: 20](R/W) PLL loop filter op-amp configuration. */
+ uint64_t pfd_offset : 1; /**< [ 24: 24](R/W) PLL PFD offset enable. */
+ uint64_t ref_clk_bypass : 1; /**< [ 25: 25](R/W) Bypass reference clock to the PLL output. */
+ uint64_t ssc_en : 1; /**< [ 26: 26](R/W) Spread-spectrum clocking enable. */
+ uint64_t pnr_refclk_en : 1; /**< [ 27: 27](R/W) Enable PLL reference clock to internal logic. */
+ uint64_t ssc_ppm : 2; /**< [ 29: 28](R/W) Spread-spectrum clocking total peak-to-peak spread.
+ 0x0 = 5000 PPM.
+ 0x1 = 3000 PPM.
+ 0x2 = 2500 PPM.
+ 0x3 = 1000 PPM. */
+ uint64_t ssc_sata_mode : 2; /**< [ 31: 30](R/W) PLL SATA spread spectrum control.
+ 0x0 = Down spreading. PPM triangle wave total peak-to-peak spread subtracted from
+ nominal frequency.
+ 0x1 = Up spreading. PPM triangle wave total peak-to-peak spread added to nominal
+ frequency.
+ 0x2 = Center spreading. PPM triangle wave total peak-to-peak spread centered at nominal
+ frequency.
+ 0x3 = Square wave subtracted from nominal frequency. */
+ uint64_t cp_boost : 1; /**< [ 32: 32](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t vcm_sel : 1; /**< [ 33: 33](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t reserved_34_39 : 6;
+ uint64_t lock_check_cnt_ovrd : 15; /**< [ 54: 40](R/W) Lock check counter override value. This counter is used to wait for PLL lock to
+ be valid. It counts every reference clock cycle and once its done asserts
+ GSERN()_COMMON_INIT_BSTS[LOCK_READY]. For common PLL, the reference clock is the
+ input from the pad. For lane PLLs, the reference clock is the output of the
+ common PLL. To use value assert GSERN()_LANE()_RST1_BCFG[LOCK_CHECK] or trigger
+ a PLL reset sequence. */
+ uint64_t lock_check_cnt_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [LOCK_CHECK_CNT_OVRD]. */
+ uint64_t mio_refclk_en : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Enable sending the common PLL reference clock to the counter in MIO. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_pll_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_pll_2_bcfg bdk_gsernx_common_pll_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_PLL_2_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_PLL_2_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f02a8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_PLL_2_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_PLL_2_BCFG(a) bdk_gsernx_common_pll_2_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_PLL_2_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_PLL_2_BCFG(a) "GSERNX_COMMON_PLL_2_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_PLL_2_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_PLL_2_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_PLL_2_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_refclk_bcfg
+ *
+ * GSER Common PLL Base Configuration Register 1
+ */
+union bdk_gsernx_common_refclk_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_refclk_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t hcsl : 1; /**< [ 4: 4](R/W) Enable [HCSL] and [OCT] to set HCSL on chip termination in the receiver of the
+ off-chip reference clock, e.g., for a PCIe reference clock. Leave [HCSL] low for
+ LVPECL on-chip termination. */
+ uint64_t oct : 1; /**< [ 3: 3](R/W) Enable on chip termination (OCT) in the receiver of the off-chip reference
+ clock. */
+ uint64_t pwdn : 1; /**< [ 2: 2](R/W) Power down.
+ 0 = Power on. Set to 0 if any lanes in this module will be used.
+ 1 = All paths through the common block reference clock receiver will be powered
+ off and no reference clock will reach the common PLL (or its bypass path). */
+ uint64_t cclksel : 2; /**< [ 1: 0](R/W) Selection controls for the reference clock
+ 0x0 = Choose on-chip common clock zero.
+ 0x1 = Choose on-chip common clock one.
+ 0x2 = Choose on-chip common clock two.
+ 0x3 = Choose the off-chip reference clock (requires that [PWDN] be low). */
+#else /* Word 0 - Little Endian */
+ uint64_t cclksel : 2; /**< [ 1: 0](R/W) Selection controls for the reference clock
+ 0x0 = Choose on-chip common clock zero.
+ 0x1 = Choose on-chip common clock one.
+ 0x2 = Choose on-chip common clock two.
+ 0x3 = Choose the off-chip reference clock (requires that [PWDN] be low). */
+ uint64_t pwdn : 1; /**< [ 2: 2](R/W) Power down.
+ 0 = Power on. Set to 0 if any lanes in this module will be used.
+ 1 = All paths through the common block reference clock receiver will be powered
+ off and no reference clock will reach the common PLL (or its bypass path). */
+ uint64_t oct : 1; /**< [ 3: 3](R/W) Enable on chip termination (OCT) in the receiver of the off-chip reference
+ clock. */
+ uint64_t hcsl : 1; /**< [ 4: 4](R/W) Enable [HCSL] and [OCT] to set HCSL on chip termination in the receiver of the
+ off-chip reference clock, e.g., for a PCIe reference clock. Leave [HCSL] low for
+ LVPECL on-chip termination. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_refclk_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_refclk_bcfg bdk_gsernx_common_refclk_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_REFCLK_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_REFCLK_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0198ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_REFCLK_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_REFCLK_BCFG(a) bdk_gsernx_common_refclk_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_REFCLK_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_REFCLK_BCFG(a) "GSERNX_COMMON_REFCLK_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_REFCLK_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_REFCLK_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_REFCLK_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_refclk_ctr
+ *
+ * GSER Common Reference Clock Cycle Counter Register
+ * A free-running counter of common PLL reference clock cycles to enable rough
+ * confirmation of reference clock frequency via software. Read the counter; wait some
+ * time, e.g., 100ms; read the counter; calculate frequency based on the difference in
+ * values during the known wait time.
+ */
+union bdk_gsernx_common_refclk_ctr
+{
+ uint64_t u;
+ struct bdk_gsernx_common_refclk_ctr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Running count of common PLL reference clock cycles. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Running count of common PLL reference clock cycles. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_refclk_ctr_s cn; */
+};
+typedef union bdk_gsernx_common_refclk_ctr bdk_gsernx_common_refclk_ctr_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_REFCLK_CTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_REFCLK_CTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f06e8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_REFCLK_CTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_REFCLK_CTR(a) bdk_gsernx_common_refclk_ctr_t
+#define bustype_BDK_GSERNX_COMMON_REFCLK_CTR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_REFCLK_CTR(a) "GSERNX_COMMON_REFCLK_CTR"
+#define device_bar_BDK_GSERNX_COMMON_REFCLK_CTR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_REFCLK_CTR(a) (a)
+#define arguments_BDK_GSERNX_COMMON_REFCLK_CTR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rev
+ *
+ * GSER Common Revision Register
+ * Revision number
+ */
+union bdk_gsernx_common_rev
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rev : 8; /**< [ 7: 0](RO/H) Revision number for GSERN common subblock.
+ Internal:
+ Used primarily for E5. */
+#else /* Word 0 - Little Endian */
+ uint64_t rev : 8; /**< [ 7: 0](RO/H) Revision number for GSERN common subblock.
+ Internal:
+ Used primarily for E5. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rev_s cn; */
+};
+typedef union bdk_gsernx_common_rev bdk_gsernx_common_rev_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_REV(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_REV(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_REV", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_REV(a) bdk_gsernx_common_rev_t
+#define bustype_BDK_GSERNX_COMMON_REV(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_REV(a) "GSERNX_COMMON_REV"
+#define device_bar_BDK_GSERNX_COMMON_REV(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_REV(a) (a)
+#define arguments_BDK_GSERNX_COMMON_REV(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_bcfg
+ *
+ * GSER Common Reset State Machine Controls and Overrides Register
+ */
+union bdk_gsernx_common_rst_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t domain_rst_en : 1; /**< [ 55: 55](R/W) Domain reset enable.
+ 0 = Prevent reseting lane logic with domain reset.
+ 1 = Enable reseting all lane logic with domain reset.
+
+ For PCIe configurations, typically 1 for a root complex and 0 for an endpoint. */
+ uint64_t reserved_49_54 : 6;
+ uint64_t rst_pll_rst_sm : 1; /**< [ 48: 48](R/W) Set to reset the full PLL reset state machine;
+ deassert to run the complete reset initialization sequence
+ starting with common PLL initialization. */
+ uint64_t reserved_13_47 : 35;
+ uint64_t pll_go2deep_idle : 1; /**< [ 12: 12](R/W) Set to cycle the common PLL into deep idle. */
+ uint64_t lock_ppm : 2; /**< [ 11: 10](R/W) PLL lock PPM setting; after GSERN()_COMMON_RST_BCFG[LOCK_WAIT], compare
+ reference clock and divided VCO clock for this many cycles:
+ 0x0 = Compare after 5000 reference clock cycles.
+ 0x1 = Compare after 10000 reference clock cycles.
+ 0x2 = Compare after 20000 reference clock cycles.
+ 0x3 = Compare after 2500 reference clock cycles. */
+ uint64_t lock_wait : 2; /**< [ 9: 8](R/W) Wait time for PLL lock check function to start:
+ 0x0 = Wait 2500 reference clock cycles.
+ 0x1 = Wait 5000 reference clock cycles.
+ 0x2 = Wait 10000 reference clock cycles.
+ 0x3 = Wait 1250 reference clock cycles. */
+ uint64_t lock_check : 1; /**< [ 7: 7](R/W) Trigger a PLL lock status check; result returned in
+ GSERN()_COMMON_INIT_BSTS[LOCK] when GSERN()_COMMON_INIT_BSTS[LOCK_READY]
+ asserts. deassert and re-assert to repeat checking. */
+ uint64_t vco_cal_reset : 1; /**< [ 6: 6](R/W) PLL VCO calibration state machine reset. */
+ uint64_t fracn_reset : 1; /**< [ 5: 5](R/W) PLL fractional-N state machine reset. */
+ uint64_t ssc_reset : 1; /**< [ 4: 4](R/W) PLL SSC state machine reset. */
+ uint64_t post_div_reset : 1; /**< [ 3: 3](RO) Reserved.
+ Internal:
+ Was common PLL post divider reset. No longer used. */
+ uint64_t reset : 1; /**< [ 2: 2](R/W) PLL primary reset; must assert [POST_DIV_RESET] if [RESET] is asserted. */
+ uint64_t cal_en : 1; /**< [ 1: 1](R/W) Enable PLL calibration procedure. */
+ uint64_t pwdn : 1; /**< [ 0: 0](R/W) PLL power down control. */
+#else /* Word 0 - Little Endian */
+ uint64_t pwdn : 1; /**< [ 0: 0](R/W) PLL power down control. */
+ uint64_t cal_en : 1; /**< [ 1: 1](R/W) Enable PLL calibration procedure. */
+ uint64_t reset : 1; /**< [ 2: 2](R/W) PLL primary reset; must assert [POST_DIV_RESET] if [RESET] is asserted. */
+ uint64_t post_div_reset : 1; /**< [ 3: 3](RO) Reserved.
+ Internal:
+ Was common PLL post divider reset. No longer used. */
+ uint64_t ssc_reset : 1; /**< [ 4: 4](R/W) PLL SSC state machine reset. */
+ uint64_t fracn_reset : 1; /**< [ 5: 5](R/W) PLL fractional-N state machine reset. */
+ uint64_t vco_cal_reset : 1; /**< [ 6: 6](R/W) PLL VCO calibration state machine reset. */
+ uint64_t lock_check : 1; /**< [ 7: 7](R/W) Trigger a PLL lock status check; result returned in
+ GSERN()_COMMON_INIT_BSTS[LOCK] when GSERN()_COMMON_INIT_BSTS[LOCK_READY]
+ asserts. deassert and re-assert to repeat checking. */
+ uint64_t lock_wait : 2; /**< [ 9: 8](R/W) Wait time for PLL lock check function to start:
+ 0x0 = Wait 2500 reference clock cycles.
+ 0x1 = Wait 5000 reference clock cycles.
+ 0x2 = Wait 10000 reference clock cycles.
+ 0x3 = Wait 1250 reference clock cycles. */
+ uint64_t lock_ppm : 2; /**< [ 11: 10](R/W) PLL lock PPM setting; after GSERN()_COMMON_RST_BCFG[LOCK_WAIT], compare
+ reference clock and divided VCO clock for this many cycles:
+ 0x0 = Compare after 5000 reference clock cycles.
+ 0x1 = Compare after 10000 reference clock cycles.
+ 0x2 = Compare after 20000 reference clock cycles.
+ 0x3 = Compare after 2500 reference clock cycles. */
+ uint64_t pll_go2deep_idle : 1; /**< [ 12: 12](R/W) Set to cycle the common PLL into deep idle. */
+ uint64_t reserved_13_47 : 35;
+ uint64_t rst_pll_rst_sm : 1; /**< [ 48: 48](R/W) Set to reset the full PLL reset state machine;
+ deassert to run the complete reset initialization sequence
+ starting with common PLL initialization. */
+ uint64_t reserved_49_54 : 6;
+ uint64_t domain_rst_en : 1; /**< [ 55: 55](R/W) Domain reset enable.
+ 0 = Prevent reseting lane logic with domain reset.
+ 1 = Enable reseting all lane logic with domain reset.
+
+ For PCIe configurations, typically 1 for a root complex and 0 for an endpoint. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_bcfg bdk_gsernx_common_rst_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f03b8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_BCFG(a) bdk_gsernx_common_rst_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_BCFG(a) "GSERNX_COMMON_RST_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_cnt0_bcfg
+ *
+ * GSER Common Reset State Machine Delay Count Register 0
+ * Wait counts for the common block reset state machines. All fields must be set
+ * before bringing the common block out of reset.
+ */
+union bdk_gsernx_common_rst_cnt0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_cnt0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t pre_bias_pwup_wait : 7; /**< [ 6: 0](R/W) Wait count in service clock cycles after initial trigger before
+ deasserting powerdown to the bias generator. The actual delay will be
+ three cycles more than set here, so set this field to the minimum
+ specified delay, 0x40, minus three, or greater. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_bias_pwup_wait : 7; /**< [ 6: 0](R/W) Wait count in service clock cycles after initial trigger before
+ deasserting powerdown to the bias generator. The actual delay will be
+ three cycles more than set here, so set this field to the minimum
+ specified delay, 0x40, minus three, or greater. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_cnt0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_cnt0_bcfg bdk_gsernx_common_rst_cnt0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT0_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT0_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0440ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_CNT0_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) bdk_gsernx_common_rst_cnt0_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) "GSERNX_COMMON_RST_CNT0_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_cnt1_bcfg
+ *
+ * GSER Common Reset State Machine Delay Count Register 1
+ * Wait counts for the common block reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_common_rst_cnt1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_cnt1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t cal_en_wait : 18; /**< [ 49: 32](R/W) Wait count in service clock cycles after calibration enable before deasserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t pre_cal_en_wait : 12; /**< [ 27: 16](R/W) Wait count in service clock cycles after deasserting resets to the PLL fracn,
+ ssc, and cal_en state machines before asserting calibration enable to the
+ PLL. Set this to one less than the desired number of cycles of delay. */
+ uint64_t reserved_11_15 : 5;
+ uint64_t pre_pwup_wait : 11; /**< [ 10: 0](R/W) Wait count in service clock cycles after powering up the bias
+ generator before deasserting pwdn to the PLL. The actual delay will
+ be one cycle more than set here, so set this field to the minimum
+ specified delay, 0x400, minus one, or greater. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_pwup_wait : 11; /**< [ 10: 0](R/W) Wait count in service clock cycles after powering up the bias
+ generator before deasserting pwdn to the PLL. The actual delay will
+ be one cycle more than set here, so set this field to the minimum
+ specified delay, 0x400, minus one, or greater. */
+ uint64_t reserved_11_15 : 5;
+ uint64_t pre_cal_en_wait : 12; /**< [ 27: 16](R/W) Wait count in service clock cycles after deasserting resets to the PLL fracn,
+ ssc, and cal_en state machines before asserting calibration enable to the
+ PLL. Set this to one less than the desired number of cycles of delay. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t cal_en_wait : 18; /**< [ 49: 32](R/W) Wait count in service clock cycles after calibration enable before deasserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_cnt1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_cnt1_bcfg bdk_gsernx_common_rst_cnt1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT1_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT1_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f04c8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_CNT1_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) bdk_gsernx_common_rst_cnt1_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) "GSERNX_COMMON_RST_CNT1_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_cnt2_bcfg
+ *
+ * GSER Common Reset State Machine Delay Count Register 2
+ * Wait counts for the common block reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_common_rst_cnt2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_cnt2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pre_run_wait : 14; /**< [ 61: 48](R/W) Wait count in service clock cycles after the PLL is running before deasserting
+ common lane reset to bring the lanes out of reset. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t pre_pll_sm_reset_wait : 9; /**< [ 40: 32](R/W) Wait count in service clock cycles after deasserting pwdn before
+ deasserting resets to the PLL fracn, ssc, and cal_en state
+ machines. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t pre_pdiv_reset_wait : 13; /**< [ 28: 16](R/W) Reserved.
+ Internal:
+ The PLL no longer has a postdivider reset. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pre_pll_reset_wait : 12; /**< [ 11: 0](R/W) Wait count in service clock cycles after calibration enable deasserts
+ before deasserting reset to the PLL. Set this field to one less
+ than the desired number of cycles of delay. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_pll_reset_wait : 12; /**< [ 11: 0](R/W) Wait count in service clock cycles after calibration enable deasserts
+ before deasserting reset to the PLL. Set this field to one less
+ than the desired number of cycles of delay. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pre_pdiv_reset_wait : 13; /**< [ 28: 16](R/W) Reserved.
+ Internal:
+ The PLL no longer has a postdivider reset. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t pre_pll_sm_reset_wait : 9; /**< [ 40: 32](R/W) Wait count in service clock cycles after deasserting pwdn before
+ deasserting resets to the PLL fracn, ssc, and cal_en state
+ machines. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t pre_run_wait : 14; /**< [ 61: 48](R/W) Wait count in service clock cycles after the PLL is running before deasserting
+ common lane reset to bring the lanes out of reset. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_cnt2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_cnt2_bcfg bdk_gsernx_common_rst_cnt2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT2_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT2_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0550ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_CNT2_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) bdk_gsernx_common_rst_cnt2_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) "GSERNX_COMMON_RST_CNT2_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_rdy_bcfg
+ *
+ * GSER Common Reset Ready Control Register
+ */
+union bdk_gsernx_common_rst_rdy_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_rdy_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ln_en : 4; /**< [ 3: 0](R/W) Enables for lane reset ready inclusion in aggregated QLM reset ready output to
+ the reset controller. Each bit enables contribution from the corresponding lane.
+ \<0\> = Include lane 0.
+ \<1\> = Include lane 1.
+ \<2\> = Include lane 2.
+ \<3\> = Include lane 3. */
+#else /* Word 0 - Little Endian */
+ uint64_t ln_en : 4; /**< [ 3: 0](R/W) Enables for lane reset ready inclusion in aggregated QLM reset ready output to
+ the reset controller. Each bit enables contribution from the corresponding lane.
+ \<0\> = Include lane 0.
+ \<1\> = Include lane 1.
+ \<2\> = Include lane 2.
+ \<3\> = Include lane 3. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_rdy_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_rdy_bcfg bdk_gsernx_common_rst_rdy_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_RDY_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_RDY_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0660ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_RDY_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) bdk_gsernx_common_rst_rdy_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) "GSERNX_COMMON_RST_RDY_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_btsclk_cfg
+ *
+ * GSER Lane BTS Synchronous Ethernet Clock Control Register
+ * Register controls settings for providing a clock output from the lane which is
+ * synchronous to the clock recovered from the received data stream.
+ */
+union bdk_gsernx_lanex_btsclk_cfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_btsclk_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t en : 1; /**< [ 8: 8](R/W) Enable driving the clock output from the lane. This bit should be set low before
+ changing [DRATIO]; it may be written to 1 in the same cycle that [DRATIO] is
+ written. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t dratio : 2; /**< [ 1: 0](R/W) Divider ratio for the clock output from the lane relative to the clock for the
+ parallel receive data.
+ 0x0 = Divide by 1, i.e., no division.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8. */
+#else /* Word 0 - Little Endian */
+ uint64_t dratio : 2; /**< [ 1: 0](R/W) Divider ratio for the clock output from the lane relative to the clock for the
+ parallel receive data.
+ 0x0 = Divide by 1, i.e., no division.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t en : 1; /**< [ 8: 8](R/W) Enable driving the clock output from the lane. This bit should be set low before
+ changing [DRATIO]; it may be written to 1 in the same cycle that [DRATIO] is
+ written. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_btsclk_cfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_btsclk_cfg bdk_gsernx_lanex_btsclk_cfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_BTSCLK_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_BTSCLK_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003870ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_BTSCLK_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) bdk_gsernx_lanex_btsclk_cfg_t
+#define bustype_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) "GSERNX_LANEX_BTSCLK_CFG"
+#define device_bar_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_cdrfsm_bcfg
+ *
+ * GSER Lane Receiver CDR FSM Base Configuration Register
+ * Controls for the clock data recover PLL control finite state
+ * machine. Set these controls prior to bringing the analog receiver out of
+ * reset.
+ */
+union bdk_gsernx_lanex_cdrfsm_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_cdrfsm_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t voter_sp_mask : 1; /**< [ 33: 33](R/W/H) Set to mask out "010" and "101" patterns in RX cdr voter. */
+ uint64_t rst_n : 1; /**< [ 32: 32](R/W/H) Clear to hold the receive CDR FSM in reset. */
+ uint64_t clk_sel : 2; /**< [ 31: 30](R/W/H) 0x0 = Run off div5clk from TX.
+ 0x1 = Run off div3clk from TX.
+ 0x2 = Run off div5clk from RX.
+ 0x3 = Run off div3clk from RX.
+
+ [CLK_SEL]\<0\> is also used in GSER TX to allow clocking the CDR FSM
+ with a divided copy of the transmit clock. This field should be set
+ as desired before sequencing the transmitter and/or receiver reset
+ state machine(s). */
+ uint64_t trunc : 2; /**< [ 29: 28](R/W/H) Reserved.
+ Internal:
+ state2[16:0] is CDR state machine 2nd order loop state variable.
+
+ 0x0 = state2[16:0] is truncated to 13 bits (plus sign bit).
+ 0x1 = state2[16:0] is truncated to 14 bits (plus sign bit).
+ 0x2 = state2[16:0] is truncated to 15 bits (plus sign bit).
+ 0x3 = state2[16:0] is truncated to 16 bits (plus sign bit, no truncation). */
+ uint64_t limit : 2; /**< [ 27: 26](R/W/H) 0x0 = Pass-through next state at boundaries.
+ 0x1 = Limit next state at boundaries.
+ 0x2-3 = Limit & pause next state at boundaries. */
+ uint64_t eoffs : 7; /**< [ 25: 19](R/W/H) E interp state offset. */
+ uint64_t qoffs : 7; /**< [ 18: 12](R/W/H) Q interp state offset. */
+ uint64_t inc2 : 6; /**< [ 11: 6](R/W/H) 2nd order loop inc. */
+ uint64_t inc1 : 6; /**< [ 5: 0](R/W/H) 1st order loop inc. */
+#else /* Word 0 - Little Endian */
+ uint64_t inc1 : 6; /**< [ 5: 0](R/W/H) 1st order loop inc. */
+ uint64_t inc2 : 6; /**< [ 11: 6](R/W/H) 2nd order loop inc. */
+ uint64_t qoffs : 7; /**< [ 18: 12](R/W/H) Q interp state offset. */
+ uint64_t eoffs : 7; /**< [ 25: 19](R/W/H) E interp state offset. */
+ uint64_t limit : 2; /**< [ 27: 26](R/W/H) 0x0 = Pass-through next state at boundaries.
+ 0x1 = Limit next state at boundaries.
+ 0x2-3 = Limit & pause next state at boundaries. */
+ uint64_t trunc : 2; /**< [ 29: 28](R/W/H) Reserved.
+ Internal:
+ state2[16:0] is CDR state machine 2nd order loop state variable.
+
+ 0x0 = state2[16:0] is truncated to 13 bits (plus sign bit).
+ 0x1 = state2[16:0] is truncated to 14 bits (plus sign bit).
+ 0x2 = state2[16:0] is truncated to 15 bits (plus sign bit).
+ 0x3 = state2[16:0] is truncated to 16 bits (plus sign bit, no truncation). */
+ uint64_t clk_sel : 2; /**< [ 31: 30](R/W/H) 0x0 = Run off div5clk from TX.
+ 0x1 = Run off div3clk from TX.
+ 0x2 = Run off div5clk from RX.
+ 0x3 = Run off div3clk from RX.
+
+ [CLK_SEL]\<0\> is also used in GSER TX to allow clocking the CDR FSM
+ with a divided copy of the transmit clock. This field should be set
+ as desired before sequencing the transmitter and/or receiver reset
+ state machine(s). */
+ uint64_t rst_n : 1; /**< [ 32: 32](R/W/H) Clear to hold the receive CDR FSM in reset. */
+ uint64_t voter_sp_mask : 1; /**< [ 33: 33](R/W/H) Set to mask out "010" and "101" patterns in RX cdr voter. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_cdrfsm_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_cdrfsm_bcfg bdk_gsernx_lanex_cdrfsm_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_CDRFSM_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_CDRFSM_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001cf0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_CDRFSM_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) bdk_gsernx_lanex_cdrfsm_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) "GSERNX_LANEX_CDRFSM_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_cgx_txeq_bcfg
+ *
+ * GSER Lane CGX Tx Equalizer Base Configuration Register
+ * Register controls settings for the transmitter equalizer taps
+ * when the GSER is configured for CGX mode and KR training is not enabled.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL] is set to 'CGX'.
+ */
+union bdk_gsernx_lanex_cgx_txeq_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_cgx_txeq_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t tx_coeff_update : 1; /**< [ 27: 27](R/W/H) Transmitter coefficient update.
+ An asserting edge will start the transmitter coefficient update
+ sequencer. This field self-clears when the sequence has completed.
+ To update the GSER transmitter euqalizer coefficients program
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CSPD].
+
+ then write [TX_COEFF_UPDATE] to 1. */
+ uint64_t tx_enable : 1; /**< [ 26: 26](R/W) Transmitter enable.
+ 0 = Disable the serdes transmitter.
+ 1 = Enable the serdes transmitter.
+
+ Internal:
+ Drives the cgx_tx_enable input to the GSERN src_mux. */
+ uint64_t tx_stuff : 1; /**< [ 25: 25](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter bit stuffing.
+ Programs the transmitter PCS lite layer for bit stuffing.
+ Not used for Ethernet connections.
+ Leave programmed to 0x0.
+ Drives the cgx_tx_stuff input to the GSERN src_mux. */
+ uint64_t tx_oob : 1; /**< [ 24: 24](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter OOB signaling.
+ Not typically used for Ethernet connnections.
+ Leave programmed to 0x0.
+ Drives the cgx_tx_oob input to the GSERN src_mux. */
+ uint64_t tx_idle : 1; /**< [ 23: 23](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter electrical idle.
+ Used to force the transmitter to electrical idle.
+ Not typically used for Ethernet connections.
+ Leave progreammed to 0x0.
+ Drives the cgx_tx_idle input to the GSERN src_mux. */
+ uint64_t tx_cspd : 1; /**< [ 22: 22](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t tx_bs : 6; /**< [ 21: 16](R/W) TX bias/swing selection. This setting only takes effect if [TX_CSPD]
+ is deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t tx_cpost : 5; /**< [ 15: 11](R/W) Transmitter Post (C+1) equalizer tap coefficient value.
+ Programs the transmitter Post tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_cmain : 6; /**< [ 10: 5](R/W) Transmitter Main (C0) equalizer tap coefficient value.
+ Programs the serdes transmitter Main tap.
+ Valid range is 0x30 to 0x18.
+ When programing the transmitter Pre, Main, and Post
+ taps the following rules must be adhered to:
+ _ ([TX_CMAIN] + [TX_CPRE] + [TX_CPOST]) \<= 0x30.
+ _ ([TX_CMAIN] - [TX_CPRE] - [TX_CPOST]) \>= 0x6.
+ _ 0x30 \<= [TX_CMAIN] \<= 0x18.
+ _ 0x16 \>= [TX_CPRE] \>= 0x0.
+ _ 0x16 \>= [TX_CPOST] \>= 0x0.
+
+ [TX_CMAIN] should be adjusted when either [TX_CPRE] or [TX_CPOST] is adjusted to
+ provide constant power transmitter amplitude adjustments.
+
+ To update the GSER serdes transmitter Pre, Main, and Post
+ equalizer taps from the [TX_CPOST], [TX_CMAIN], and [TX_CPRE]
+ fields write GSERN()_LANE()_CGX_TXEQ_BCFG[TX_COEFF_UPDATE]
+ to 1 and subsequently clear [TX_COEFF_UPDATE] to 0. This step
+ transfers the [TX_CPOST], [TX_CMAIN], and [TX_CPRE] to the
+ serdes transmitter equalizer.
+
+ Related CSRs:
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_COEFF_UPDATE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CSPD]. */
+ uint64_t tx_cpre : 5; /**< [ 4: 0](R/W) Transmitter Pre (C-1) equalizer tap coefficient value.
+ Programs the transmitter Pre tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_cpre : 5; /**< [ 4: 0](R/W) Transmitter Pre (C-1) equalizer tap coefficient value.
+ Programs the transmitter Pre tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_cmain : 6; /**< [ 10: 5](R/W) Transmitter Main (C0) equalizer tap coefficient value.
+ Programs the serdes transmitter Main tap.
+ Valid range is 0x30 to 0x18.
+ When programing the transmitter Pre, Main, and Post
+ taps the following rules must be adhered to:
+ _ ([TX_CMAIN] + [TX_CPRE] + [TX_CPOST]) \<= 0x30.
+ _ ([TX_CMAIN] - [TX_CPRE] - [TX_CPOST]) \>= 0x6.
+ _ 0x30 \<= [TX_CMAIN] \<= 0x18.
+ _ 0x16 \>= [TX_CPRE] \>= 0x0.
+ _ 0x16 \>= [TX_CPOST] \>= 0x0.
+
+ [TX_CMAIN] should be adjusted when either [TX_CPRE] or [TX_CPOST] is adjusted to
+ provide constant power transmitter amplitude adjustments.
+
+ To update the GSER serdes transmitter Pre, Main, and Post
+ equalizer taps from the [TX_CPOST], [TX_CMAIN], and [TX_CPRE]
+ fields write GSERN()_LANE()_CGX_TXEQ_BCFG[TX_COEFF_UPDATE]
+ to 1 and subsequently clear [TX_COEFF_UPDATE] to 0. This step
+ transfers the [TX_CPOST], [TX_CMAIN], and [TX_CPRE] to the
+ serdes transmitter equalizer.
+
+ Related CSRs:
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_COEFF_UPDATE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CSPD]. */
+ uint64_t tx_cpost : 5; /**< [ 15: 11](R/W) Transmitter Post (C+1) equalizer tap coefficient value.
+ Programs the transmitter Post tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_bs : 6; /**< [ 21: 16](R/W) TX bias/swing selection. This setting only takes effect if [TX_CSPD]
+ is deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t tx_cspd : 1; /**< [ 22: 22](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t tx_idle : 1; /**< [ 23: 23](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter electrical idle.
+ Used to force the transmitter to electrical idle.
+ Not typically used for Ethernet connections.
+ Leave progreammed to 0x0.
+ Drives the cgx_tx_idle input to the GSERN src_mux. */
+ uint64_t tx_oob : 1; /**< [ 24: 24](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter OOB signaling.
+ Not typically used for Ethernet connnections.
+ Leave programmed to 0x0.
+ Drives the cgx_tx_oob input to the GSERN src_mux. */
+ uint64_t tx_stuff : 1; /**< [ 25: 25](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter bit stuffing.
+ Programs the transmitter PCS lite layer for bit stuffing.
+ Not used for Ethernet connections.
+ Leave programmed to 0x0.
+ Drives the cgx_tx_stuff input to the GSERN src_mux. */
+ uint64_t tx_enable : 1; /**< [ 26: 26](R/W) Transmitter enable.
+ 0 = Disable the serdes transmitter.
+ 1 = Enable the serdes transmitter.
+
+ Internal:
+ Drives the cgx_tx_enable input to the GSERN src_mux. */
+ uint64_t tx_coeff_update : 1; /**< [ 27: 27](R/W/H) Transmitter coefficient update.
+ An asserting edge will start the transmitter coefficient update
+ sequencer. This field self-clears when the sequence has completed.
+ To update the GSER transmitter euqalizer coefficients program
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CSPD].
+
+ then write [TX_COEFF_UPDATE] to 1. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_cgx_txeq_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_cgx_txeq_bcfg bdk_gsernx_lanex_cgx_txeq_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003450ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_CGX_TXEQ_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) bdk_gsernx_lanex_cgx_txeq_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) "GSERNX_LANEX_CGX_TXEQ_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_const
+ *
+ * GSER Lane CONST Register
+ * Lane number within the multilane macro.
+ */
+union bdk_gsernx_lanex_const
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t lane_num : 8; /**< [ 7: 0](RO/H) Lane number of this lane within the multilane module */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_num : 8; /**< [ 7: 0](RO/H) Lane number of this lane within the multilane module */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_const_s cn; */
+};
+typedef union bdk_gsernx_lanex_const bdk_gsernx_lanex_const_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_CONST(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_CONST(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000100ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_CONST", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_CONST(a,b) bdk_gsernx_lanex_const_t
+#define bustype_BDK_GSERNX_LANEX_CONST(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_CONST(a,b) "GSERNX_LANEX_CONST"
+#define device_bar_BDK_GSERNX_LANEX_CONST(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_CONST(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_CONST(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eco
+ *
+ * INTERNAL: GSER Lane ECO Register
+ */
+union bdk_gsernx_lanex_eco
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t eco_rw : 50; /**< [ 63: 14](R/W) Internal:
+ Reserved for ECO use. */
+ uint64_t eco_rw_pll : 2; /**< [ 13: 12](R/W) Internal:
+ Pre-connected to the PLL. Reserved for ECO use. */
+ uint64_t eco_rw_tx : 4; /**< [ 11: 8](R/W) Internal:
+ Pre-connected to Tx custom. Reserved for ECO use. */
+ uint64_t eco_rw_rx_top : 4; /**< [ 7: 4](R/W) Internal:
+ Pre-connected to the north side of Rx custom. Reserved for ECO use. */
+ uint64_t eco_rw_rx_bot : 4; /**< [ 3: 0](R/W) Internal:
+ Pre-connected to the south side of Rx custom. Reserved for ECO use. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw_rx_bot : 4; /**< [ 3: 0](R/W) Internal:
+ Pre-connected to the south side of Rx custom. Reserved for ECO use. */
+ uint64_t eco_rw_rx_top : 4; /**< [ 7: 4](R/W) Internal:
+ Pre-connected to the north side of Rx custom. Reserved for ECO use. */
+ uint64_t eco_rw_tx : 4; /**< [ 11: 8](R/W) Internal:
+ Pre-connected to Tx custom. Reserved for ECO use. */
+ uint64_t eco_rw_pll : 2; /**< [ 13: 12](R/W) Internal:
+ Pre-connected to the PLL. Reserved for ECO use. */
+ uint64_t eco_rw : 50; /**< [ 63: 14](R/W) Internal:
+ Reserved for ECO use. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eco_s cn; */
+};
+typedef union bdk_gsernx_lanex_eco bdk_gsernx_lanex_eco_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_ECO(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_ECO(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003970ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_ECO", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_ECO(a,b) bdk_gsernx_lanex_eco_t
+#define bustype_BDK_GSERNX_LANEX_ECO(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_ECO(a,b) "GSERNX_LANEX_ECO"
+#define device_bar_BDK_GSERNX_LANEX_ECO(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_ECO(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_ECO(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eee_bcfg
+ *
+ * INTERNAL: GSER Lane EEE Base Configuration Register
+ *
+ * Reserved.
+ * Internal:
+ * Register controls settings for GSER behavior when Energy Efficient Ethernet (EEE) is
+ * in use on the link.
+ */
+union bdk_gsernx_lanex_eee_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eee_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t rx_qa_sqlch_cnt : 12; /**< [ 57: 46](R/W) Reserved.
+ Internal:
+ Receiever QUIET to DATA squelch count.
+ Used to implement a delay or filter function for the receive data to the
+ CGX MAC when the receiver transitions from the EEE QUIET state to the
+ EEE ACTIVE state. [RX_QA_SQLCH_CNT] counter is in units of 10ns.
+ Used in conjuncton with GSERN()_LANE()_EEE_BCFG[RX_QA_SQLCH_EN]. */
+ uint64_t rx_qa_sqlch_en : 1; /**< [ 45: 45](R/W) Reserved.
+ Internal:
+ Receiever QUIET to DATA squelch enable.
+ When [RX_QA_SQLCH_EN] is enabled the receive data to the CGX MAC will be
+ suppressed following the transition from receiver EEE QUIET state to
+ receiver EEE ACTIVE state for the time defined by the
+ GSERN()_LANE()_EEE_BCFG[RX_QA_SQLCH_CNT] squelch count in units of 10ns.
+ This is a optional filtering function to prevent garbage data to the CGX MAC
+ as the receiver is transitioning from the EEE QUIET to EEE ACTIVE states. */
+ uint64_t tx_quiet_drv_en : 1; /**< [ 44: 44](R/W) Reserved.
+ Internal:
+ Transmitter QUIET drive enable.
+ When [TX_QUIET_DRV_EN] is set to one the transmitter Tx+/Tx- driver outputs
+ will drive to electrical idle when either the CGX MAC moves the
+ SerDes transmitter block from the EEE ACTIVE state to the EEE QUIET state or
+ the GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD] is set to one. This ensures that
+ the link partner receiver energy detector sees the local device transmitter
+ transition from the EEE ACTIVE state to the EEE QUIET state.
+ When [TX_QUIET_DRV_EN] is set to one the transmitter Tx+/Tx- driver outputs
+ will drive to electrical idle even if the GSERN()_LANE()_EEE_BCFG[TX_PWRDN_EN]
+ is cleared to zero to inhibit the transmitter from powering down during EEE
+ deep sleep TX QUIET state. When [TX_QUIET_DRV_EN] is cleared to zero the
+ Transmitter Tx+/Tx- outputs will only drive to electrical idle when the
+ transmitter is powered down by CGX or GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD]
+ is set to one and GSERN()_LANE()_EEE_BCFG[TX_PWRDN_EN] is also
+ set to one to enable transmitter power down. */
+ uint64_t eee_edet : 1; /**< [ 43: 43](RO/H) Reserved.
+ Internal:
+ EEE energy detected.
+ For diagnostic use only. Reflects the state of
+ the EEE energy detector. Used to test signals for the wake from
+ EEE deep sleep power down modes of the SerDes. */
+ uint64_t eee_ovrrd : 1; /**< [ 42: 42](R/W) Reserved.
+ Internal:
+ EEE override.
+ For diagnostic use only. When [EEE_OVRRD] is set to one the SerDes EEE rx and
+ tx modes are controlled by GSERN()_LANE()_EEE_BCFG[EEE_RX_OVRRD] and
+ GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD]. Used to test the EEE deep sleep
+ power down modes of the SerDes. */
+ uint64_t eee_tx_ovrrd : 2; /**< [ 41: 40](R/W) Reserved.
+ Internal:
+ EEE Tx override.
+ For diagnostic use only. When GSERN()_LANE()_EEE_BCFG[EEE_OVRRD] is set to one
+ the SerDes transmitter modes are controlled by [EEE_TX_OVRRD]. Used to
+ test the EEE deep sleep power down modes of the SerDes transmitter.
+ 0x0 = ACTIVE/DATA mode
+ 0x1 = QUIET
+ 0x2 = ALERT
+ 0x3 = Reserved. */
+ uint64_t eee_rx_ovrrd : 1; /**< [ 39: 39](R/W) Reserved.
+ Internal:
+ EEE Rx override.
+ For diagnostic use only. When GSERN()_LANE()_EEE_BCFG[EEE_OVRRD] is set to one
+ the SerDes receiver modes are controlled by [EEE_RX_OVRRD]. Used to
+ test the EEE deep sleep power down modes of the SerDes receiver.
+ 0x0 = ACTIVE/DATA mode
+ 0x1 = QUIET */
+ uint64_t bypass_edet : 1; /**< [ 38: 38](R/W) Reserved.
+ Internal:
+ EEE energy detect bypass.
+ 0 = The Energy Detect EDET signal to CGX will behave normally. EDET will be set
+ to one when energy is detected at the lane receiver and EDET will be cleared to zero
+ when there is no energy detected at the lane receiver.
+ 1 = The Energy Detect EDET signal to CGX will always be set to 1 bypassing
+ the energy detect function. */
+ uint64_t pwrdn_mode : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Programs the PHY power mode down during EEE.
+ Used to select the P1, P2, or Shutdown powe states when entering deep sleep mode.
+ 0x0 = Reserved.
+ 0x1 = The PHY will power down to the P1 power state and the power state cntrols
+ will be configured from the GSERN()_LANE()_EEE_RSTP1_BCFG register.
+ 0x2 = The PHY will power down to the P2 power state and the power state controls
+ will be configured from the GSERN()_LANE()_EEE_RSTP2_BCFG register.
+ 0x3 = The PHY will power down to the shutdown (SHDN) power state and the power
+ state controls will be configured from the GSERN()_LANE()_EEE_RSTSHDN_BCFG register. */
+ uint64_t eyemon_pwrdn_en : 1; /**< [ 35: 35](R/W) Reserved.
+ Internal:
+ Programs the behavior of the eye monitor power down during EEE.
+ 0 = The eye monitor will not power down during EEE quiet mode.
+ 1 = The eye monitor will power down during the EEE quiet mode. */
+ uint64_t lpll_pwrdn_en : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Programs the behavior of the lane PLL power down during EEE.
+ 0 = The lane PLL will not power down during EEE quiet mode.
+ 1 = The lane PLL will power down during the EEE quiet mode. */
+ uint64_t tx_pwrdn_en : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Programs the behavior of the transmitter power down during EEE.
+ 0 = The transmitter will not power down during EEE quiet mode.
+ 1 = The transmitter will power down during the EEE quiet mode. */
+ uint64_t rx_pwrdn_en : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Programs the behavior of the receiver power down during EEE.
+ 0 = The receiver will not power down during EEE quiet mode.
+ 1 = The receiver will power down during the EEE Quiet mode. */
+ uint64_t tx_dly_cnt : 16; /**< [ 31: 16](R/W) Reserved.
+ Internal:
+ Programs the delay of the TX PCS layer when the Tx side is transitione from the EEE QUIET
+ phase to the ALERT or ACTIVE phase. This programmable delay adds delau to ensure that
+ txdivclk is running and stable before Tx data resumes.
+ The delay units are in units of service-clock cycles. For diagnostic use only. */
+ uint64_t rx_dly_cnt : 16; /**< [ 15: 0](R/W) Reserved.
+ Internal:
+ Programs the delay of the RX PCS layer when the receiver is transitioned froom the EEE
+ QUIET to ACTIVE phase. The programmable delay adds delay to ensure that the rxdivclk
+ is running and stable before Rx data resumes.
+ The delay units are in units of service-clock cycles. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_dly_cnt : 16; /**< [ 15: 0](R/W) Reserved.
+ Internal:
+ Programs the delay of the RX PCS layer when the receiver is transitioned froom the EEE
+ QUIET to ACTIVE phase. The programmable delay adds delay to ensure that the rxdivclk
+ is running and stable before Rx data resumes.
+ The delay units are in units of service-clock cycles. For diagnostic use only. */
+ uint64_t tx_dly_cnt : 16; /**< [ 31: 16](R/W) Reserved.
+ Internal:
+ Programs the delay of the TX PCS layer when the Tx side is transitione from the EEE QUIET
+ phase to the ALERT or ACTIVE phase. This programmable delay adds delau to ensure that
+ txdivclk is running and stable before Tx data resumes.
+ The delay units are in units of service-clock cycles. For diagnostic use only. */
+ uint64_t rx_pwrdn_en : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Programs the behavior of the receiver power down during EEE.
+ 0 = The receiver will not power down during EEE quiet mode.
+ 1 = The receiver will power down during the EEE Quiet mode. */
+ uint64_t tx_pwrdn_en : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Programs the behavior of the transmitter power down during EEE.
+ 0 = The transmitter will not power down during EEE quiet mode.
+ 1 = The transmitter will power down during the EEE quiet mode. */
+ uint64_t lpll_pwrdn_en : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Programs the behavior of the lane PLL power down during EEE.
+ 0 = The lane PLL will not power down during EEE quiet mode.
+ 1 = The lane PLL will power down during the EEE quiet mode. */
+ uint64_t eyemon_pwrdn_en : 1; /**< [ 35: 35](R/W) Reserved.
+ Internal:
+ Programs the behavior of the eye monitor power down during EEE.
+ 0 = The eye monitor will not power down during EEE quiet mode.
+ 1 = The eye monitor will power down during the EEE quiet mode. */
+ uint64_t pwrdn_mode : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Programs the PHY power mode down during EEE.
+ Used to select the P1, P2, or Shutdown powe states when entering deep sleep mode.
+ 0x0 = Reserved.
+ 0x1 = The PHY will power down to the P1 power state and the power state cntrols
+ will be configured from the GSERN()_LANE()_EEE_RSTP1_BCFG register.
+ 0x2 = The PHY will power down to the P2 power state and the power state controls
+ will be configured from the GSERN()_LANE()_EEE_RSTP2_BCFG register.
+ 0x3 = The PHY will power down to the shutdown (SHDN) power state and the power
+ state controls will be configured from the GSERN()_LANE()_EEE_RSTSHDN_BCFG register. */
+ uint64_t bypass_edet : 1; /**< [ 38: 38](R/W) Reserved.
+ Internal:
+ EEE energy detect bypass.
+ 0 = The Energy Detect EDET signal to CGX will behave normally. EDET will be set
+ to one when energy is detected at the lane receiver and EDET will be cleared to zero
+ when there is no energy detected at the lane receiver.
+ 1 = The Energy Detect EDET signal to CGX will always be set to 1 bypassing
+ the energy detect function. */
+ uint64_t eee_rx_ovrrd : 1; /**< [ 39: 39](R/W) Reserved.
+ Internal:
+ EEE Rx override.
+ For diagnostic use only. When GSERN()_LANE()_EEE_BCFG[EEE_OVRRD] is set to one
+ the SerDes receiver modes are controlled by [EEE_RX_OVRRD]. Used to
+ test the EEE deep sleep power down modes of the SerDes receiver.
+ 0x0 = ACTIVE/DATA mode
+ 0x1 = QUIET */
+ uint64_t eee_tx_ovrrd : 2; /**< [ 41: 40](R/W) Reserved.
+ Internal:
+ EEE Tx override.
+ For diagnostic use only. When GSERN()_LANE()_EEE_BCFG[EEE_OVRRD] is set to one
+ the SerDes transmitter modes are controlled by [EEE_TX_OVRRD]. Used to
+ test the EEE deep sleep power down modes of the SerDes transmitter.
+ 0x0 = ACTIVE/DATA mode
+ 0x1 = QUIET
+ 0x2 = ALERT
+ 0x3 = Reserved. */
+ uint64_t eee_ovrrd : 1; /**< [ 42: 42](R/W) Reserved.
+ Internal:
+ EEE override.
+ For diagnostic use only. When [EEE_OVRRD] is set to one the SerDes EEE rx and
+ tx modes are controlled by GSERN()_LANE()_EEE_BCFG[EEE_RX_OVRRD] and
+ GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD]. Used to test the EEE deep sleep
+ power down modes of the SerDes. */
+ uint64_t eee_edet : 1; /**< [ 43: 43](RO/H) Reserved.
+ Internal:
+ EEE energy detected.
+ For diagnostic use only. Reflects the state of
+ the EEE energy detector. Used to test signals for the wake from
+ EEE deep sleep power down modes of the SerDes. */
+ uint64_t tx_quiet_drv_en : 1; /**< [ 44: 44](R/W) Reserved.
+ Internal:
+ Transmitter QUIET drive enable.
+ When [TX_QUIET_DRV_EN] is set to one the transmitter Tx+/Tx- driver outputs
+ will drive to electrical idle when either the CGX MAC moves the
+ SerDes transmitter block from the EEE ACTIVE state to the EEE QUIET state or
+ the GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD] is set to one. This ensures that
+ the link partner receiver energy detector sees the local device transmitter
+ transition from the EEE ACTIVE state to the EEE QUIET state.
+ When [TX_QUIET_DRV_EN] is set to one the transmitter Tx+/Tx- driver outputs
+ will drive to electrical idle even if the GSERN()_LANE()_EEE_BCFG[TX_PWRDN_EN]
+ is cleared to zero to inhibit the transmitter from powering down during EEE
+ deep sleep TX QUIET state. When [TX_QUIET_DRV_EN] is cleared to zero the
+ Transmitter Tx+/Tx- outputs will only drive to electrical idle when the
+ transmitter is powered down by CGX or GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD]
+ is set to one and GSERN()_LANE()_EEE_BCFG[TX_PWRDN_EN] is also
+ set to one to enable transmitter power down. */
+ uint64_t rx_qa_sqlch_en : 1; /**< [ 45: 45](R/W) Reserved.
+ Internal:
+ Receiever QUIET to DATA squelch enable.
+ When [RX_QA_SQLCH_EN] is enabled the receive data to the CGX MAC will be
+ suppressed following the transition from receiver EEE QUIET state to
+ receiver EEE ACTIVE state for the time defined by the
+ GSERN()_LANE()_EEE_BCFG[RX_QA_SQLCH_CNT] squelch count in units of 10ns.
+ This is a optional filtering function to prevent garbage data to the CGX MAC
+ as the receiver is transitioning from the EEE QUIET to EEE ACTIVE states. */
+ uint64_t rx_qa_sqlch_cnt : 12; /**< [ 57: 46](R/W) Reserved.
+ Internal:
+ Receiever QUIET to DATA squelch count.
+ Used to implement a delay or filter function for the receive data to the
+ CGX MAC when the receiver transitions from the EEE QUIET state to the
+ EEE ACTIVE state. [RX_QA_SQLCH_CNT] counter is in units of 10ns.
+ Used in conjuncton with GSERN()_LANE()_EEE_BCFG[RX_QA_SQLCH_EN]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eee_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_eee_bcfg bdk_gsernx_lanex_eee_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EEE_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EEE_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003650ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EEE_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EEE_BCFG(a,b) bdk_gsernx_lanex_eee_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_EEE_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EEE_BCFG(a,b) "GSERNX_LANEX_EEE_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_EEE_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EEE_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EEE_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eee_rstp1_bcfg
+ *
+ * INTERNAL: GSER Lane EEE PowerDown P1 Reset States Control Register
+ *
+ * Reserved.
+ * Internal:
+ * Controls the power down and reset states of the serdes lane PLL, transmitter, receiver,
+ * receiver adaptation, and eye monitor blocks during the EEE deep sleep power down P1 state.
+ */
+union bdk_gsernx_lanex_eee_rstp1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eee_rstp1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep P1 PowerDown state.
+ Note: this value is never likely to be changed from the normal run state (0x8). */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep P1 PowerDown state.
+ Note: this value is never likely to be changed from the normal run state (0x8). */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eee_rstp1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_eee_rstp1_bcfg bdk_gsernx_lanex_eee_rstp1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003750ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EEE_RSTP1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) bdk_gsernx_lanex_eee_rstp1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) "GSERNX_LANEX_EEE_RSTP1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eee_rstp2_bcfg
+ *
+ * INTERNAL: GSER Lane EEE PowerDown P2 Reset States Control Register
+ *
+ * Reserved.
+ * Internal:
+ * Controls the power down and reset states of the serdes lane PLL, transmitter, receiver,
+ * receiver adaptation, and eye monitor blocks during the EEE deep sleep power down P2 state.
+ */
+union bdk_gsernx_lanex_eee_rstp2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eee_rstp2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep P2 PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eee_rstp2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_eee_rstp2_bcfg bdk_gsernx_lanex_eee_rstp2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003760ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EEE_RSTP2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) bdk_gsernx_lanex_eee_rstp2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) "GSERNX_LANEX_EEE_RSTP2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eee_rstshdn_bcfg
+ *
+ * INTERNAL: GSER Lane EEE PowerDown P2 Reset States Control Register
+ *
+ * Reserved.
+ * Internal:
+ * Controls the power down and reset states of the serdes lane PLL, transmitter, receiver,
+ * receiver adaptation, and eye monitor blocks during the EEE deep sleep power shut down state.
+ */
+union bdk_gsernx_lanex_eee_rstshdn_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eee_rstshdn_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep shutdown PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eee_rstshdn_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_eee_rstshdn_bcfg bdk_gsernx_lanex_eee_rstshdn_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003770ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EEE_RSTSHDN_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) bdk_gsernx_lanex_eee_rstshdn_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) "GSERNX_LANEX_EEE_RSTSHDN_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eye_ctl
+ *
+ * GSER Lane PCS Lite Eye Data Gathering Control Register
+ */
+union bdk_gsernx_lanex_eye_ctl
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eye_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t rst_n : 1; /**< [ 56: 56](R/W) Clear and then set to reset the cycle count timer, the
+ done indicator, and the eye error counts. */
+ uint64_t reserved_49_55 : 7;
+ uint64_t eye_en : 1; /**< [ 48: 48](R/W) Enable eye error counting (with or without cycle count limits,
+ depending on GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN]). If the cycle count
+ limit feature is not used, counting will stop when
+ GSERN()_LANE()_EYE_CTL[EYE_EN] deasserts. Set this bit prior to
+ deasserting GSERN()_LANE()_EYE_CTL[RST_N] to use the eye data gathering
+ feature. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W) Enable use of GSERN()_LANE()_EYE_CTL[CYCLE_CNT] to limit number of cycles
+ of PCS RX clock over which the errors are accumulated. Set this bit
+ prior to deasserting GSERN()_LANE()_EYE_CTL[RST_N] to use cycle count
+ limiting in the eye data gathering feature. */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W) When enabled, this contains the count of PCS receive-clock cycles
+ over which error counts are accumulated. Set
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT] prior to deasserting
+ GSERN()_LANE()_EYE_CTL[RST_N] to use cycle count limiting in the eye data
+ gathering feature. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W) When enabled, this contains the count of PCS receive-clock cycles
+ over which error counts are accumulated. Set
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT] prior to deasserting
+ GSERN()_LANE()_EYE_CTL[RST_N] to use cycle count limiting in the eye data
+ gathering feature. */
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W) Enable use of GSERN()_LANE()_EYE_CTL[CYCLE_CNT] to limit number of cycles
+ of PCS RX clock over which the errors are accumulated. Set this bit
+ prior to deasserting GSERN()_LANE()_EYE_CTL[RST_N] to use cycle count
+ limiting in the eye data gathering feature. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t eye_en : 1; /**< [ 48: 48](R/W) Enable eye error counting (with or without cycle count limits,
+ depending on GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN]). If the cycle count
+ limit feature is not used, counting will stop when
+ GSERN()_LANE()_EYE_CTL[EYE_EN] deasserts. Set this bit prior to
+ deasserting GSERN()_LANE()_EYE_CTL[RST_N] to use the eye data gathering
+ feature. */
+ uint64_t reserved_49_55 : 7;
+ uint64_t rst_n : 1; /**< [ 56: 56](R/W) Clear and then set to reset the cycle count timer, the
+ done indicator, and the eye error counts. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eye_ctl_s cn; */
+};
+typedef union bdk_gsernx_lanex_eye_ctl bdk_gsernx_lanex_eye_ctl_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EYE_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EYE_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900007b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EYE_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EYE_CTL(a,b) bdk_gsernx_lanex_eye_ctl_t
+#define bustype_BDK_GSERNX_LANEX_EYE_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EYE_CTL(a,b) "GSERNX_LANEX_EYE_CTL"
+#define device_bar_BDK_GSERNX_LANEX_EYE_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EYE_CTL(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EYE_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eye_ctl_2
+ *
+ * GSER Lane PCS Lite Eye Data Gathering Control Register 2
+ * The low 4 bits in this register allow for shifting either the doutq or
+ * doute_cal data by 1 or 2 UI to allow for an offset in the framing of the
+ * deserialized data between these two data paths in the receiver. Software
+ * will need to iterate eye or scope measurement with identical settings
+ * for the quadurature and eye datapaths, adjusting the shift bits in this
+ * register until no differences are accumulated. (Note that shifting both
+ * doutq and doute_cal would typically not be useful, since the resulting
+ * alignment would be the same as if neither were shifted.)
+ *
+ * The remaining bits control various aspects of the eye monitor error
+ * counting logic.
+ */
+union bdk_gsernx_lanex_eye_ctl_2
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eye_ctl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t capture_ones_en : 1; /**< [ 40: 40](R/W) Set to enable capture ones, so that a full eye
+ diagram can be generated. deassert to capture half an eye. The
+ default is to enable the full eye. */
+ uint64_t capture_ones : 1; /**< [ 39: 39](R/W) Set to choose to capture eye data for ones bits in the serial
+ order in the received data stream. Clear to choose to capture
+ eye data for zero bits in serial order in the received data stream.
+ Program as desired before enabling eye data capture. Unlike
+ [CAPTURE_EDGEMODE], this signal sets the mode within the eye monitor
+ only.
+ For 00 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=0.
+ For 01 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=1.
+ For 10 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=1.
+ For 11 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=0. */
+ uint64_t reserved_33_38 : 6;
+ uint64_t eye_adapt_en : 1; /**< [ 32: 32](R/W) Set to enable eye path in the RX calibration DFE (rxcaldfe).
+ It can be asserted/deasserted with GSERN()_LANE()_EYE_CTL[EYE_EN]. It must be
+ enabled for [CAPTURE_EDGEMODE] and GSERN()_LANE()_RX_OS_5_BCFG[C1_E_ADJUST] to
+ be applied to the eye/E path. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t capture_edgemode : 1; /**< [ 24: 24](R/W) Set to choose capture of eye data for bits that transitioned in
+ serial order in the received data stream. Clear to choose capture
+ of eye data for bits that did not transitioned in serial order in
+ the received data stream. Program as desired before enabling eye data
+ capture. Unlike [CAPTURE_TRANS] and GSERN()_LANE()_RX_8_BCFG[DFE_EDGEMODE_OVRD], this signal
+ controls the calculation of the c1 bits for the eye/E path. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t capture_trans : 1; /**< [ 16: 16](R/W) Set to choose capture of eye data for bits that transitioned in
+ serial order in the received data stream. Clear to choose capture
+ of eye data for bits that did not transitioned in serial order in
+ the received data stream. Program as desired before enabling eye data
+ capture. Unlike [CAPTURE_EDGEMODE], this signal sets the mode within
+ the eye monitor only.
+ For 00 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=0.
+ For 01 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=1.
+ For 10 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=1.
+ For 11 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=0. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t dbl_shift_doute : 1; /**< [ 9: 9](R/W) Set to shift the doute_cal (receiver eye calibration path) data
+ by 2 UI earlier to align with doutq for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t shift_doute : 1; /**< [ 8: 8](R/W) Set to shift the doute_cal (receiver eye path) data by 1 UI
+ earlier to align with doutq for eye and scope comparison logic. Only
+ data captured in the eye or scope logic is impacted by this
+ setting. Program as desired before enabling eye data capture. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t dbl_shift_doutq : 1; /**< [ 1: 1](R/W) Set to shift the doutq (receiver normal quadrature path) data by
+ 2 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t shift_doutq : 1; /**< [ 0: 0](R/W) Set to shift the doutq (receiver normal quadrature path) data by
+ 1 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. Program as desired before enabling eye data capture. */
+#else /* Word 0 - Little Endian */
+ uint64_t shift_doutq : 1; /**< [ 0: 0](R/W) Set to shift the doutq (receiver normal quadrature path) data by
+ 1 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. Program as desired before enabling eye data capture. */
+ uint64_t dbl_shift_doutq : 1; /**< [ 1: 1](R/W) Set to shift the doutq (receiver normal quadrature path) data by
+ 2 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t shift_doute : 1; /**< [ 8: 8](R/W) Set to shift the doute_cal (receiver eye path) data by 1 UI
+ earlier to align with doutq for eye and scope comparison logic. Only
+ data captured in the eye or scope logic is impacted by this
+ setting. Program as desired before enabling eye data capture. */
+ uint64_t dbl_shift_doute : 1; /**< [ 9: 9](R/W) Set to shift the doute_cal (receiver eye calibration path) data
+ by 2 UI earlier to align with doutq for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t capture_trans : 1; /**< [ 16: 16](R/W) Set to choose capture of eye data for bits that transitioned in
+ serial order in the received data stream. Clear to choose capture
+ of eye data for bits that did not transitioned in serial order in
+ the received data stream. Program as desired before enabling eye data
+ capture. Unlike [CAPTURE_EDGEMODE], this signal sets the mode within
+ the eye monitor only.
+ For 00 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=0.
+ For 01 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=1.
+ For 10 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=1.
+ For 11 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=0. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t capture_edgemode : 1; /**< [ 24: 24](R/W) Set to choose capture of eye data for bits that transitioned in
+ serial order in the received data stream. Clear to choose capture
+ of eye data for bits that did not transitioned in serial order in
+ the received data stream. Program as desired before enabling eye data
+ capture. Unlike [CAPTURE_TRANS] and GSERN()_LANE()_RX_8_BCFG[DFE_EDGEMODE_OVRD], this signal
+ controls the calculation of the c1 bits for the eye/E path. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t eye_adapt_en : 1; /**< [ 32: 32](R/W) Set to enable eye path in the RX calibration DFE (rxcaldfe).
+ It can be asserted/deasserted with GSERN()_LANE()_EYE_CTL[EYE_EN]. It must be
+ enabled for [CAPTURE_EDGEMODE] and GSERN()_LANE()_RX_OS_5_BCFG[C1_E_ADJUST] to
+ be applied to the eye/E path. */
+ uint64_t reserved_33_38 : 6;
+ uint64_t capture_ones : 1; /**< [ 39: 39](R/W) Set to choose to capture eye data for ones bits in the serial
+ order in the received data stream. Clear to choose to capture
+ eye data for zero bits in serial order in the received data stream.
+ Program as desired before enabling eye data capture. Unlike
+ [CAPTURE_EDGEMODE], this signal sets the mode within the eye monitor
+ only.
+ For 00 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=0.
+ For 01 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=1.
+ For 10 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=1.
+ For 11 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=0. */
+ uint64_t capture_ones_en : 1; /**< [ 40: 40](R/W) Set to enable capture ones, so that a full eye
+ diagram can be generated. deassert to capture half an eye. The
+ default is to enable the full eye. */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eye_ctl_2_s cn; */
+};
+typedef union bdk_gsernx_lanex_eye_ctl_2 bdk_gsernx_lanex_eye_ctl_2_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EYE_CTL_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EYE_CTL_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900007c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EYE_CTL_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) bdk_gsernx_lanex_eye_ctl_2_t
+#define bustype_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) "GSERNX_LANEX_EYE_CTL_2"
+#define device_bar_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eye_dat
+ *
+ * GSER Lane PCS Lite Eye Data Gathering Result Register
+ */
+union bdk_gsernx_lanex_eye_dat
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eye_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_EYE_CTL[CYCLE_CNT] has expired if
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] is asserted. If
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] is deasserted, this bit will always
+ read as asserted. */
+ uint64_t reserved_48 : 1;
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When set indicates GSERN()_LANE()_EYE_DAT[ERR_CNT] overflowed and is
+ not accurate. */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of bit errors seen in doute_cal relative to doutq. If
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] and GSERN()_LANE()_EYE_DAT[CYCLE_CNT_DONE]
+ are not both asserted, GSERN()_LANE()_EYE_DAT[ERR_CNT] may not be reliable
+ unless GSERN()_LANE()_EYE_CTL[EYE_EN] is first cleared (to stop the
+ error counter). */
+#else /* Word 0 - Little Endian */
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of bit errors seen in doute_cal relative to doutq. If
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] and GSERN()_LANE()_EYE_DAT[CYCLE_CNT_DONE]
+ are not both asserted, GSERN()_LANE()_EYE_DAT[ERR_CNT] may not be reliable
+ unless GSERN()_LANE()_EYE_CTL[EYE_EN] is first cleared (to stop the
+ error counter). */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When set indicates GSERN()_LANE()_EYE_DAT[ERR_CNT] overflowed and is
+ not accurate. */
+ uint64_t reserved_48 : 1;
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_EYE_CTL[CYCLE_CNT] has expired if
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] is asserted. If
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] is deasserted, this bit will always
+ read as asserted. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eye_dat_s cn; */
+};
+typedef union bdk_gsernx_lanex_eye_dat bdk_gsernx_lanex_eye_dat_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EYE_DAT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EYE_DAT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900007d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EYE_DAT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EYE_DAT(a,b) bdk_gsernx_lanex_eye_dat_t
+#define bustype_BDK_GSERNX_LANEX_EYE_DAT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EYE_DAT(a,b) "GSERNX_LANEX_EYE_DAT"
+#define device_bar_BDK_GSERNX_LANEX_EYE_DAT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EYE_DAT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EYE_DAT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_idledet_hys
+ *
+ * GSER Lane Receiver Idle Detector Hysteresis Control Register
+ * Parameters controlling hystersis in the custom receiver's idle detector. When
+ * enabled, the hysteresis function adjusts the idle detector offset to bias the
+ * detector in favor of the current idle state after the current state has been stable
+ * for some time. The [HYS_CNT], [HYS_POS], and [HYS_NEG] control fields should be set
+ * before or concurrently with writing [HYS_EN] to 1 when the hystersis function is to
+ * be used.
+ */
+union bdk_gsernx_lanex_idledet_hys
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_idledet_hys_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ uint64_t hys_en : 1; /**< [ 16: 16](R/W) Enable the hysteresis function. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t hys_cnt : 6; /**< [ 13: 8](R/W) Count of 10 ns cycles after a change in idle offset hysteresis direction before a new
+ hysteresis direction will be applied. */
+ uint64_t hys_pos : 4; /**< [ 7: 4](R/W) Offset shift to bias the idle detector in favor of not idle after the the
+ detector has reported not idle for [HYS_CNT] cycles. The offset shift is
+ incremented approximately 5 mV per step. */
+ uint64_t hys_neg : 4; /**< [ 3: 0](R/W) Offset shift to bias the idle detector in favor of idle after the detector has
+ reported idle for [HYS_CNT] cycles. The offset shift is incremented
+ approximately 5 mV per step. */
+#else /* Word 0 - Little Endian */
+ uint64_t hys_neg : 4; /**< [ 3: 0](R/W) Offset shift to bias the idle detector in favor of idle after the detector has
+ reported idle for [HYS_CNT] cycles. The offset shift is incremented
+ approximately 5 mV per step. */
+ uint64_t hys_pos : 4; /**< [ 7: 4](R/W) Offset shift to bias the idle detector in favor of not idle after the the
+ detector has reported not idle for [HYS_CNT] cycles. The offset shift is
+ incremented approximately 5 mV per step. */
+ uint64_t hys_cnt : 6; /**< [ 13: 8](R/W) Count of 10 ns cycles after a change in idle offset hysteresis direction before a new
+ hysteresis direction will be applied. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t hys_en : 1; /**< [ 16: 16](R/W) Enable the hysteresis function. */
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_idledet_hys_s cn; */
+};
+typedef union bdk_gsernx_lanex_idledet_hys bdk_gsernx_lanex_idledet_hys_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_IDLEDET_HYS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_IDLEDET_HYS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900010f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_IDLEDET_HYS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) bdk_gsernx_lanex_idledet_hys_t
+#define bustype_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) "GSERNX_LANEX_IDLEDET_HYS"
+#define device_bar_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_imapsel_bcfg
+ *
+ * GSER Lane Interpolator Map Selection Register
+ * Selection control for the interpolator map. Set prior to bringing the analog
+ * receiver out of reset.
+ */
+union bdk_gsernx_lanex_imapsel_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_imapsel_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t map_case : 5; /**< [ 4: 0](R/W) Interpolator map case selector.
+ 0x0 = data_500_erc_2_c_0_20_mean.
+ 0x1 = data_407_erc_2_c_0_20_mean.
+ 0x2 = data_333_erc_3_c_0_20_mean.
+ 0x3 = data_167_erc_5_c_0_20_mean.
+ 0x4 = data_80_erc_8_c_0_20_mean.
+ 0x5 = data_63_erc_10_c_0_20_mean.
+ 0x6 = data_50_erc_11_c_0_20_mean.
+ 0x7 = data_40_erc_13_c_0_20_mean.
+ 0x8 = data_39_erc_14_c_0_20_mean.
+ 0x9 = data_36_erc_15_c_0_20_mean.
+ 0xa = data_31_erc_15_c_0_20_mean.
+ 0xf = {GSERN()_LANE()_MAP1, GSERN()_LANE()_MAP0}.
+ all others = 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t map_case : 5; /**< [ 4: 0](R/W) Interpolator map case selector.
+ 0x0 = data_500_erc_2_c_0_20_mean.
+ 0x1 = data_407_erc_2_c_0_20_mean.
+ 0x2 = data_333_erc_3_c_0_20_mean.
+ 0x3 = data_167_erc_5_c_0_20_mean.
+ 0x4 = data_80_erc_8_c_0_20_mean.
+ 0x5 = data_63_erc_10_c_0_20_mean.
+ 0x6 = data_50_erc_11_c_0_20_mean.
+ 0x7 = data_40_erc_13_c_0_20_mean.
+ 0x8 = data_39_erc_14_c_0_20_mean.
+ 0x9 = data_36_erc_15_c_0_20_mean.
+ 0xa = data_31_erc_15_c_0_20_mean.
+ 0xf = {GSERN()_LANE()_MAP1, GSERN()_LANE()_MAP0}.
+ all others = 0. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_imapsel_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_imapsel_bcfg bdk_gsernx_lanex_imapsel_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_IMAPSEL_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_IMAPSEL_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001df0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_IMAPSEL_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) bdk_gsernx_lanex_imapsel_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) "GSERNX_LANEX_IMAPSEL_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_init_bsts
+ *
+ * GSER Lane Initialization Base-level Status Register
+ */
+union bdk_gsernx_lanex_init_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_init_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t eye_deep_idle : 1; /**< [ 42: 42](RO/H) Receiver eye path state is deep idle. */
+ uint64_t eye_rst_sm_complete : 1; /**< [ 41: 41](RO/H) Indicates that the lane eye receive reset state machine has
+ completed. If [EYE_RST_SM_COMPLETE] is set and [EYE_READY] is not,
+ there may be CSR register setting which prevent the receiver eye data
+ path from being ready for use, e.g., power-down or reset overrides. */
+ uint64_t eye_ready : 1; /**< [ 40: 40](RO/H) Lane analog receiver eye data path reset state machine completion
+ status indicating that the lane receiver eye path ready for use. */
+ uint64_t tx_pcie_p2 : 1; /**< [ 39: 39](RO/H) Transmitter state is PCIe power state P2. */
+ uint64_t tx_pcie_p1s2 : 1; /**< [ 38: 38](RO/H) Transmitter state is PCIe power state P1 substate 2. */
+ uint64_t tx_pcie_p1s1 : 1; /**< [ 37: 37](RO/H) Transmitter state is PCIe power state P1 substate 1. */
+ uint64_t tx_pcie_p1cpm : 1; /**< [ 36: 36](RO/H) Transmitter state is PCIe power state P1.CPM (entry to P1 substates
+ or clock disabled state for normal P1 with clock PM support). */
+ uint64_t tx_pcie_p1 : 1; /**< [ 35: 35](RO/H) Transmitter state is PCIe power state P1. */
+ uint64_t tx_deep_idle : 1; /**< [ 34: 34](RO/H) Transmitter state is deep idle. */
+ uint64_t tx_rst_sm_complete : 1; /**< [ 33: 33](RO/H) Indicates that the lane transmitter reset state machine has
+ completed. If [TX_RST_SM_COMPLETE] is set and [TX_READY] is not,
+ there may be CSR register setting which prevent the transmitter from
+ being ready for use, e.g., power-down or reset overrides. */
+ uint64_t tx_ready : 1; /**< [ 32: 32](RO/H) Lane analog transmitter reset state machine completion status
+ indicating that the lane transmitter is in "idle" configuration and
+ ready to start transmitting data after changing the transmitter drive
+ settings to transmit data. */
+ uint64_t rx_pcie_p2 : 1; /**< [ 31: 31](RO/H) Receiver state is PCIe power state P2. */
+ uint64_t rx_pcie_p1s2 : 1; /**< [ 30: 30](RO/H) Receiver state is PCIe power state P1 substate 2. */
+ uint64_t rx_pcie_p1s1 : 1; /**< [ 29: 29](RO/H) Receiver state is PCIe power state P1 substate 1. */
+ uint64_t rx_pcie_p1cpm : 1; /**< [ 28: 28](RO/H) Receiver state is PCIe power state P1.CPM (entry to P1 substates or
+ clock disabled state for normal P1 with clock PM support). */
+ uint64_t rx_pcie_p1 : 1; /**< [ 27: 27](RO/H) Receiver state is PCIe power state P1. */
+ uint64_t rx_deep_idle : 1; /**< [ 26: 26](RO/H) Receiver state is deep idle. */
+ uint64_t rx_rst_sm_complete : 1; /**< [ 25: 25](RO/H) Indicates that the lane receiver reset state machine has
+ completed. If [RX_RST_SM_COMPLETE] is set and [RX_READY] is not,
+ there may be CSR register setting which prevent the receiver from
+ being ready for use, e.g., power-down or reset overrides. */
+ uint64_t rx_ready : 1; /**< [ 24: 24](RO/H) Lane analog receiver reset state machine completion status that the
+ reset sequence has completed and the lane receiver is ready for afe
+ and dfe adaptation. */
+ uint64_t pll_cp_cal : 4; /**< [ 23: 20](RO/H) PLL calibration state machine's resulting charge pump setting. Only
+ valid if [CAL_READY] is set. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t pll_band_cal : 5; /**< [ 16: 12](RO/H) PLL calibration state machine's resulting VCO band setting. Only valid
+ if [CAL_READY] is set. */
+ uint64_t pll_pcie_p2 : 1; /**< [ 11: 11](RO/H) Lane PLL state is PCIe power state P2. */
+ uint64_t pll_pcie_p1s2 : 1; /**< [ 10: 10](RO/H) Lane PLL state is PCIe power state P1 substate 2. */
+ uint64_t pll_pcie_p1s1 : 1; /**< [ 9: 9](RO/H) Lane PLL state is PCIe power state P1 substate 1. */
+ uint64_t pll_pcie_p1cpm : 1; /**< [ 8: 8](RO/H) Lane PLL state is PCIe power state P1.CPM (entry to P1 substates or
+ clock disabled state for normal P1 with clock PM support). */
+ uint64_t pll_pcie_p1 : 1; /**< [ 7: 7](RO/H) Lane PLL state is PCIe power state P1. */
+ uint64_t pll_deep_idle : 1; /**< [ 6: 6](RO/H) Lane PLL state is deep idle. */
+ uint64_t rst_sm_complete : 1; /**< [ 5: 5](RO/H) PLL reset state machine has completed. If
+ [RST_SM_COMPLETE] is set and [RST_SM_READY] is not, there may still
+ be CSR register settings preventing the PLL from being ready
+ for use, e.g., power-down or reset overrides. */
+ uint64_t rst_sm_ready : 1; /**< [ 4: 4](RO/H) PLL reset state machine status indicating that the reset
+ sequence has completed and this PLL is ready for use. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) PLL lock status; only valid if [LOCK_READY] is set. */
+ uint64_t lock_ready : 1; /**< [ 2: 2](RO/H) PLL lock status check is complete following most recent PLL
+ reset or assertion of GSERN()_LANE()_RST1_BCFG[LOCK_CHECK]. */
+ uint64_t cal_fail : 1; /**< [ 1: 1](RO/H) PLL calibration failed; valid only if [CAL_READY] is set. */
+ uint64_t cal_ready : 1; /**< [ 0: 0](RO/H) PLL calibration completed */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_ready : 1; /**< [ 0: 0](RO/H) PLL calibration completed */
+ uint64_t cal_fail : 1; /**< [ 1: 1](RO/H) PLL calibration failed; valid only if [CAL_READY] is set. */
+ uint64_t lock_ready : 1; /**< [ 2: 2](RO/H) PLL lock status check is complete following most recent PLL
+ reset or assertion of GSERN()_LANE()_RST1_BCFG[LOCK_CHECK]. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) PLL lock status; only valid if [LOCK_READY] is set. */
+ uint64_t rst_sm_ready : 1; /**< [ 4: 4](RO/H) PLL reset state machine status indicating that the reset
+ sequence has completed and this PLL is ready for use. */
+ uint64_t rst_sm_complete : 1; /**< [ 5: 5](RO/H) PLL reset state machine has completed. If
+ [RST_SM_COMPLETE] is set and [RST_SM_READY] is not, there may still
+ be CSR register settings preventing the PLL from being ready
+ for use, e.g., power-down or reset overrides. */
+ uint64_t pll_deep_idle : 1; /**< [ 6: 6](RO/H) Lane PLL state is deep idle. */
+ uint64_t pll_pcie_p1 : 1; /**< [ 7: 7](RO/H) Lane PLL state is PCIe power state P1. */
+ uint64_t pll_pcie_p1cpm : 1; /**< [ 8: 8](RO/H) Lane PLL state is PCIe power state P1.CPM (entry to P1 substates or
+ clock disabled state for normal P1 with clock PM support). */
+ uint64_t pll_pcie_p1s1 : 1; /**< [ 9: 9](RO/H) Lane PLL state is PCIe power state P1 substate 1. */
+ uint64_t pll_pcie_p1s2 : 1; /**< [ 10: 10](RO/H) Lane PLL state is PCIe power state P1 substate 2. */
+ uint64_t pll_pcie_p2 : 1; /**< [ 11: 11](RO/H) Lane PLL state is PCIe power state P2. */
+ uint64_t pll_band_cal : 5; /**< [ 16: 12](RO/H) PLL calibration state machine's resulting VCO band setting. Only valid
+ if [CAL_READY] is set. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t pll_cp_cal : 4; /**< [ 23: 20](RO/H) PLL calibration state machine's resulting charge pump setting. Only
+ valid if [CAL_READY] is set. */
+ uint64_t rx_ready : 1; /**< [ 24: 24](RO/H) Lane analog receiver reset state machine completion status that the
+ reset sequence has completed and the lane receiver is ready for afe
+ and dfe adaptation. */
+ uint64_t rx_rst_sm_complete : 1; /**< [ 25: 25](RO/H) Indicates that the lane receiver reset state machine has
+ completed. If [RX_RST_SM_COMPLETE] is set and [RX_READY] is not,
+ there may be CSR register setting which prevent the receiver from
+ being ready for use, e.g., power-down or reset overrides. */
+ uint64_t rx_deep_idle : 1; /**< [ 26: 26](RO/H) Receiver state is deep idle. */
+ uint64_t rx_pcie_p1 : 1; /**< [ 27: 27](RO/H) Receiver state is PCIe power state P1. */
+ uint64_t rx_pcie_p1cpm : 1; /**< [ 28: 28](RO/H) Receiver state is PCIe power state P1.CPM (entry to P1 substates or
+ clock disabled state for normal P1 with clock PM support). */
+ uint64_t rx_pcie_p1s1 : 1; /**< [ 29: 29](RO/H) Receiver state is PCIe power state P1 substate 1. */
+ uint64_t rx_pcie_p1s2 : 1; /**< [ 30: 30](RO/H) Receiver state is PCIe power state P1 substate 2. */
+ uint64_t rx_pcie_p2 : 1; /**< [ 31: 31](RO/H) Receiver state is PCIe power state P2. */
+ uint64_t tx_ready : 1; /**< [ 32: 32](RO/H) Lane analog transmitter reset state machine completion status
+ indicating that the lane transmitter is in "idle" configuration and
+ ready to start transmitting data after changing the transmitter drive
+ settings to transmit data. */
+ uint64_t tx_rst_sm_complete : 1; /**< [ 33: 33](RO/H) Indicates that the lane transmitter reset state machine has
+ completed. If [TX_RST_SM_COMPLETE] is set and [TX_READY] is not,
+ there may be CSR register setting which prevent the transmitter from
+ being ready for use, e.g., power-down or reset overrides. */
+ uint64_t tx_deep_idle : 1; /**< [ 34: 34](RO/H) Transmitter state is deep idle. */
+ uint64_t tx_pcie_p1 : 1; /**< [ 35: 35](RO/H) Transmitter state is PCIe power state P1. */
+ uint64_t tx_pcie_p1cpm : 1; /**< [ 36: 36](RO/H) Transmitter state is PCIe power state P1.CPM (entry to P1 substates
+ or clock disabled state for normal P1 with clock PM support). */
+ uint64_t tx_pcie_p1s1 : 1; /**< [ 37: 37](RO/H) Transmitter state is PCIe power state P1 substate 1. */
+ uint64_t tx_pcie_p1s2 : 1; /**< [ 38: 38](RO/H) Transmitter state is PCIe power state P1 substate 2. */
+ uint64_t tx_pcie_p2 : 1; /**< [ 39: 39](RO/H) Transmitter state is PCIe power state P2. */
+ uint64_t eye_ready : 1; /**< [ 40: 40](RO/H) Lane analog receiver eye data path reset state machine completion
+ status indicating that the lane receiver eye path ready for use. */
+ uint64_t eye_rst_sm_complete : 1; /**< [ 41: 41](RO/H) Indicates that the lane eye receive reset state machine has
+ completed. If [EYE_RST_SM_COMPLETE] is set and [EYE_READY] is not,
+ there may be CSR register setting which prevent the receiver eye data
+ path from being ready for use, e.g., power-down or reset overrides. */
+ uint64_t eye_deep_idle : 1; /**< [ 42: 42](RO/H) Receiver eye path state is deep idle. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_init_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_init_bsts bdk_gsernx_lanex_init_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_INIT_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_INIT_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000480ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_INIT_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_INIT_BSTS(a,b) bdk_gsernx_lanex_init_bsts_t
+#define bustype_BDK_GSERNX_LANEX_INIT_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_INIT_BSTS(a,b) "GSERNX_LANEX_INIT_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_INIT_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_INIT_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_INIT_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_bcfg
+ *
+ * GSER Lane PCS Lite Configuration (Transmit, Receive, and Loopback) Register
+ */
+union bdk_gsernx_lanex_lt_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t inj_err_cnt_rst_n : 1; /**< [ 63: 63](R/W/H) Set to zero to hold the error injection counter in reset. */
+ uint64_t inj_err_cnt_en : 1; /**< [ 62: 62](R/W) PCS will inject a single bit error every other cycle in the transmit
+ data stream at some time following an assertion of
+ [INJ_ERR_CNT_EN]. The number of error cycles to insert is set by
+ [INJ_ERR_CNT_LEN] and it increments the error bit index each
+ cycle. Once all the errors have been transmitted GSER sets
+ GSERN()_LANE()_LT_BSTS[INJ_ERR_CNT_DONE]. Injection of a second set of
+ errors will require clearing the counter by holding [INJ_ERR_CNT_RST_N],
+ asserting [INJ_ERR_CNT_EN], then releasing [INJ_ERR_CNT_RST_N]. This mode
+ should be used separately from [INJ_ERR_BURST_EN] and only one of them
+ can be asserted at any time. */
+ uint64_t inj_err_cnt_len : 6; /**< [ 61: 56](R/W) Tells the PCS lite error injection logic the total number of bit errors
+ to insert in a walking pattern. Every other cycle 1 bit error will be
+ inserted in a walking index up to the count value specified. The max
+ value is set by the valid data width transmitted. For example, if 8
+ bits of valid data are transmitted each cycle only from 1-8 count
+ values can be set. The same for 10, 16, 20, 32, and 40 bits. */
+ uint64_t reserved_55 : 1;
+ uint64_t inj_err_burst_en : 1; /**< [ 54: 54](R/W) PCS will inject a contiguous set of error bits in the transmit data
+ stream at some time following an assertion of [INJ_ERR_BURST_EN]. The
+ length of contiguous errors is set by [INJ_ERR_BURST_LEN]. Injection
+ of a second set of errors will require deasserting and then
+ asserting [INJ_ERR_BURST_EN] again. This mode should be used separately
+ from [INJ_ERR_CNT_EN] and only one of them can be asserted at any time. */
+ uint64_t inj_err_burst_len : 6; /**< [ 53: 48](R/W) Tells the PCS lite error injection logic what length the burst error
+ mask should be. The max value is set by the valid data width
+ transmitted. For example, if 8 bits of valid data are transmitted
+ each cycle, only from 1-8 bits of contiguous errors can be set. The
+ same for 10, 16, 20, 32, and 40 bits. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t pat_dp_width : 3; /**< [ 43: 41](R/W/H) Tells the pattern memory generator/checker logic what width to use
+ in the generator and checker data paths.
+ 0x0 = 8 (requires bit-stuffing/unstuffing or for debug).
+ 0x1 = 10 (requires bit-stuffing/unstuffing or for debug).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ Checking of received data
+ works correctly only for clock divider ratios of 10, 20, and 40. The
+ transmit data sequence is correct for all clock ratios. */
+ uint64_t prbs_dp_width : 3; /**< [ 40: 38](R/W/H) Tells the PCS lite layer PRBS logic what width to use in the
+ generator and checker data paths.
+ 0x0 = 8 (requires bit-stuffing/unstuffing or for debug).
+ 0x1 = 10 (requires bit-stuffing/unstuffing or for debug).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40. */
+ uint64_t rx_dp_width : 3; /**< [ 37: 35](R/W/H) Tells the PCS lite layer logic what width to use in the receive data
+ path between the analog macro and downstream logic, hence what
+ data bits of the doutq[39:0] bus are in use.
+ 0x0 = 8 (reserved; debug only).
+ 0x1 = 10 (reserved; debug only).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ This value must only be changed while lite layer is in reset. */
+ uint64_t tx_dp_width : 3; /**< [ 34: 32](R/W/H) Tells the PCS lite layer logic what width to use in the transmit
+ data path between the lite layer FIFO and the analog macro, hence
+ what data bits of the tx_data[39:0] bus are in use. Values:
+ 0x0 = 8 (reserved; debug only).
+ 0x1 = 10 (reserved; debug only).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t core_loopback_mode : 1; /**< [ 25: 25](R/W/H) Enable the core-side loopback mode; controller transmit data are
+ looped back to the controller as receive data in the PCS lite layer.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t sloop_mode : 1; /**< [ 24: 24](R/W/H) Enable shallow loopback mode (SerDes receive data looped back to
+ SerDes transmit in the PCS lite layer).
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reserved_23 : 1;
+ uint64_t bitstuff_rx_drop_even : 1; /**< [ 22: 22](R/W/H) Tells the PCS lite receive datapath to drop even bits
+ in the vector of received data from the PMA when [BITSTUFF_RX_EN] is
+ set:
+ 0 = Drop bits 1, 3, 5, 7, ...
+ 1 = Drop bits 0, 2, 4, 6, ...
+
+ This bit is also used in the eye monitor to mask out the dropped
+ bits when counting mismatches.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t bitstuff_rx_en : 1; /**< [ 21: 21](R/W/H) Set to expect duplicates on the PMA RX data and drop bits after
+ alignment & ordering for PCS layer to consume. The drop ordering is
+ determined by [BITSTUFF_RX_DROP_EVEN]. This value must only be changed
+ while lite layer is in reset. */
+ uint64_t inv_rx_polarity : 1; /**< [ 20: 20](R/W/H) Set to invert the polarity of the received data bits. Note that
+ the PCS-lite PRBS checker will require [INV_RX_POLARITY] to be asserted
+ when it is in use to check standard PRBS data from an external
+ source. This value must only be changed while lite layer is in
+ reset. */
+ uint64_t reverse_rx_bit_order : 1; /**< [ 19: 19](R/W/H) While asserted, the normal receive order (lowest valid bit index
+ received first, highest valid index last) is reversed so the highest
+ valid bit index is received first and lowest valid index is received
+ last. This control needs to be asserted for PRBS testing using the
+ PRBS checker in the GSER macro and for PCIe Gen-1 and Gen-2. */
+ uint64_t reserved_18 : 1;
+ uint64_t use_bph_wrreq_psh : 1; /**< [ 17: 17](R/W) Reserved.
+ Internal:
+ Delay the transmit FIFO push request synchronization to the pop side by one
+ txdivclk phase. This is a diagnostic / debug tool to help with transmit lane
+ alignment issues. */
+ uint64_t fifo_algn_qlm_mask : 4; /**< [ 16: 13](R/W) Selection control for which QLMs in this QLM's link group to align in timing the
+ deassertion of reset to this lane's transmitter's clock alignment FIFO.
+ \<0\> = Wait for QLM 0.
+ \<1\> = Wait for QLM 1.
+ \<2\> = Wait for QLM 2.
+ \<3\> = Wait for QLM 3.
+
+ If a link is made up of lanes in multiple QLMs, the mask in each lane must
+ include all active QLMs (including the QLM containing the current lane). */
+ uint64_t fifo_algn_lane_mask : 4; /**< [ 12: 9](R/W) Selection control for which lanes in the current QLM to align in timing the
+ deassertion of reset to this lane's transmitter's clock alignment FIFO.
+ \<0\> = Wait for Lane 0.
+ \<1\> = Wait for Lane 1.
+ \<2\> = Wait for Lane 2.
+ \<3\> = Wait for Lane 3.
+
+ The bit corresponding to the current Lane is ignored. */
+ uint64_t fifo_bypass_en : 1; /**< [ 8: 8](R/W) For diagnostic use only.
+ Internal:
+ This control is currently inactive and is left as a placeholder for
+ possible re-inclusion in 7nm.
+
+ Set to bypass the PCS lite layer transmit asynchronous FIFO
+ with a single flop. This saves 1-2 cycles of latency in the transmit
+ path, but imposes additional constraints on static timing
+ closure. Note that shallow loopback data cannot bypass the FIFO. */
+ uint64_t tx_fifo_pop_start_addr : 3; /**< [ 7: 5](R/W) Reserved.
+ Internal:
+ Starting address for lite transmit FIFO pops
+ (reads). Changing this allows shifting the latency through the FIFO in steps of
+ 1 txdivclk cycle (8, 10, 16, 20, 32, or 40 UI, depending on data path width
+ setting). The function is similar to FIFO_UNLOAD_DLY, but provides a wider range
+ of adjustment. For diagnostic use only. */
+ uint64_t fifo_unload_dly : 1; /**< [ 4: 4](R/W/H) Set to add one cycle delay to the PCS lite layer transmit
+ asynchronous FIFO pop data. This value must only be changed before
+ releasing [FIFO_RST_N]. */
+ uint64_t fifo_rst_n : 1; /**< [ 3: 3](R/W/H) Clear to hold the PCS lite layer transmit asynchronous FIFO in
+ reset. */
+ uint64_t bitstuff_tx_en : 1; /**< [ 2: 2](R/W/H) Set to duplicate the first 20 bits of TX data before
+ alignment & ordering for lower data rates. This could be PCS TX
+ data, PRBS data, or shallow-loopback RX data depending on mode.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t inv_tx_polarity : 1; /**< [ 1: 1](R/W/H) Set to invert the polarity of the transmit data bits. Note
+ that the PCS-lite PRBS generator will require [INV_TX_POLARITY] to be
+ asserted when PRBS data are being transmitted to match the expected
+ polarity of the standard PRBS patterns.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reverse_tx_bit_order : 1; /**< [ 0: 0](R/W/H) Assertion causes the normal transmit order (lowest valid bit index
+ transmitted first, highest valid index last) to be reversed so the
+ highest valid bit index is transmitted first and lowest valid index
+ is transmitted last. Note that the PCS-lite PRBS generator will
+ require [REVERSE_TX_BIT_ORDER] to be asserted.
+ This value must only be changed while lite layer is in reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t reverse_tx_bit_order : 1; /**< [ 0: 0](R/W/H) Assertion causes the normal transmit order (lowest valid bit index
+ transmitted first, highest valid index last) to be reversed so the
+ highest valid bit index is transmitted first and lowest valid index
+ is transmitted last. Note that the PCS-lite PRBS generator will
+ require [REVERSE_TX_BIT_ORDER] to be asserted.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t inv_tx_polarity : 1; /**< [ 1: 1](R/W/H) Set to invert the polarity of the transmit data bits. Note
+ that the PCS-lite PRBS generator will require [INV_TX_POLARITY] to be
+ asserted when PRBS data are being transmitted to match the expected
+ polarity of the standard PRBS patterns.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t bitstuff_tx_en : 1; /**< [ 2: 2](R/W/H) Set to duplicate the first 20 bits of TX data before
+ alignment & ordering for lower data rates. This could be PCS TX
+ data, PRBS data, or shallow-loopback RX data depending on mode.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t fifo_rst_n : 1; /**< [ 3: 3](R/W/H) Clear to hold the PCS lite layer transmit asynchronous FIFO in
+ reset. */
+ uint64_t fifo_unload_dly : 1; /**< [ 4: 4](R/W/H) Set to add one cycle delay to the PCS lite layer transmit
+ asynchronous FIFO pop data. This value must only be changed before
+ releasing [FIFO_RST_N]. */
+ uint64_t tx_fifo_pop_start_addr : 3; /**< [ 7: 5](R/W) Reserved.
+ Internal:
+ Starting address for lite transmit FIFO pops
+ (reads). Changing this allows shifting the latency through the FIFO in steps of
+ 1 txdivclk cycle (8, 10, 16, 20, 32, or 40 UI, depending on data path width
+ setting). The function is similar to FIFO_UNLOAD_DLY, but provides a wider range
+ of adjustment. For diagnostic use only. */
+ uint64_t fifo_bypass_en : 1; /**< [ 8: 8](R/W) For diagnostic use only.
+ Internal:
+ This control is currently inactive and is left as a placeholder for
+ possible re-inclusion in 7nm.
+
+ Set to bypass the PCS lite layer transmit asynchronous FIFO
+ with a single flop. This saves 1-2 cycles of latency in the transmit
+ path, but imposes additional constraints on static timing
+ closure. Note that shallow loopback data cannot bypass the FIFO. */
+ uint64_t fifo_algn_lane_mask : 4; /**< [ 12: 9](R/W) Selection control for which lanes in the current QLM to align in timing the
+ deassertion of reset to this lane's transmitter's clock alignment FIFO.
+ \<0\> = Wait for Lane 0.
+ \<1\> = Wait for Lane 1.
+ \<2\> = Wait for Lane 2.
+ \<3\> = Wait for Lane 3.
+
+ The bit corresponding to the current Lane is ignored. */
+ uint64_t fifo_algn_qlm_mask : 4; /**< [ 16: 13](R/W) Selection control for which QLMs in this QLM's link group to align in timing the
+ deassertion of reset to this lane's transmitter's clock alignment FIFO.
+ \<0\> = Wait for QLM 0.
+ \<1\> = Wait for QLM 1.
+ \<2\> = Wait for QLM 2.
+ \<3\> = Wait for QLM 3.
+
+ If a link is made up of lanes in multiple QLMs, the mask in each lane must
+ include all active QLMs (including the QLM containing the current lane). */
+ uint64_t use_bph_wrreq_psh : 1; /**< [ 17: 17](R/W) Reserved.
+ Internal:
+ Delay the transmit FIFO push request synchronization to the pop side by one
+ txdivclk phase. This is a diagnostic / debug tool to help with transmit lane
+ alignment issues. */
+ uint64_t reserved_18 : 1;
+ uint64_t reverse_rx_bit_order : 1; /**< [ 19: 19](R/W/H) While asserted, the normal receive order (lowest valid bit index
+ received first, highest valid index last) is reversed so the highest
+ valid bit index is received first and lowest valid index is received
+ last. This control needs to be asserted for PRBS testing using the
+ PRBS checker in the GSER macro and for PCIe Gen-1 and Gen-2. */
+ uint64_t inv_rx_polarity : 1; /**< [ 20: 20](R/W/H) Set to invert the polarity of the received data bits. Note that
+ the PCS-lite PRBS checker will require [INV_RX_POLARITY] to be asserted
+ when it is in use to check standard PRBS data from an external
+ source. This value must only be changed while lite layer is in
+ reset. */
+ uint64_t bitstuff_rx_en : 1; /**< [ 21: 21](R/W/H) Set to expect duplicates on the PMA RX data and drop bits after
+ alignment & ordering for PCS layer to consume. The drop ordering is
+ determined by [BITSTUFF_RX_DROP_EVEN]. This value must only be changed
+ while lite layer is in reset. */
+ uint64_t bitstuff_rx_drop_even : 1; /**< [ 22: 22](R/W/H) Tells the PCS lite receive datapath to drop even bits
+ in the vector of received data from the PMA when [BITSTUFF_RX_EN] is
+ set:
+ 0 = Drop bits 1, 3, 5, 7, ...
+ 1 = Drop bits 0, 2, 4, 6, ...
+
+ This bit is also used in the eye monitor to mask out the dropped
+ bits when counting mismatches.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reserved_23 : 1;
+ uint64_t sloop_mode : 1; /**< [ 24: 24](R/W/H) Enable shallow loopback mode (SerDes receive data looped back to
+ SerDes transmit in the PCS lite layer).
+ This value must only be changed while lite layer is in reset. */
+ uint64_t core_loopback_mode : 1; /**< [ 25: 25](R/W/H) Enable the core-side loopback mode; controller transmit data are
+ looped back to the controller as receive data in the PCS lite layer.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t tx_dp_width : 3; /**< [ 34: 32](R/W/H) Tells the PCS lite layer logic what width to use in the transmit
+ data path between the lite layer FIFO and the analog macro, hence
+ what data bits of the tx_data[39:0] bus are in use. Values:
+ 0x0 = 8 (reserved; debug only).
+ 0x1 = 10 (reserved; debug only).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ This value must only be changed while lite layer is in reset. */
+ uint64_t rx_dp_width : 3; /**< [ 37: 35](R/W/H) Tells the PCS lite layer logic what width to use in the receive data
+ path between the analog macro and downstream logic, hence what
+ data bits of the doutq[39:0] bus are in use.
+ 0x0 = 8 (reserved; debug only).
+ 0x1 = 10 (reserved; debug only).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ This value must only be changed while lite layer is in reset. */
+ uint64_t prbs_dp_width : 3; /**< [ 40: 38](R/W/H) Tells the PCS lite layer PRBS logic what width to use in the
+ generator and checker data paths.
+ 0x0 = 8 (requires bit-stuffing/unstuffing or for debug).
+ 0x1 = 10 (requires bit-stuffing/unstuffing or for debug).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40. */
+ uint64_t pat_dp_width : 3; /**< [ 43: 41](R/W/H) Tells the pattern memory generator/checker logic what width to use
+ in the generator and checker data paths.
+ 0x0 = 8 (requires bit-stuffing/unstuffing or for debug).
+ 0x1 = 10 (requires bit-stuffing/unstuffing or for debug).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ Checking of received data
+ works correctly only for clock divider ratios of 10, 20, and 40. The
+ transmit data sequence is correct for all clock ratios. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t inj_err_burst_len : 6; /**< [ 53: 48](R/W) Tells the PCS lite error injection logic what length the burst error
+ mask should be. The max value is set by the valid data width
+ transmitted. For example, if 8 bits of valid data are transmitted
+ each cycle, only from 1-8 bits of contiguous errors can be set. The
+ same for 10, 16, 20, 32, and 40 bits. */
+ uint64_t inj_err_burst_en : 1; /**< [ 54: 54](R/W) PCS will inject a contiguous set of error bits in the transmit data
+ stream at some time following an assertion of [INJ_ERR_BURST_EN]. The
+ length of contiguous errors is set by [INJ_ERR_BURST_LEN]. Injection
+ of a second set of errors will require deasserting and then
+ asserting [INJ_ERR_BURST_EN] again. This mode should be used separately
+ from [INJ_ERR_CNT_EN] and only one of them can be asserted at any time. */
+ uint64_t reserved_55 : 1;
+ uint64_t inj_err_cnt_len : 6; /**< [ 61: 56](R/W) Tells the PCS lite error injection logic the total number of bit errors
+ to insert in a walking pattern. Every other cycle 1 bit error will be
+ inserted in a walking index up to the count value specified. The max
+ value is set by the valid data width transmitted. For example, if 8
+ bits of valid data are transmitted each cycle only from 1-8 count
+ values can be set. The same for 10, 16, 20, 32, and 40 bits. */
+ uint64_t inj_err_cnt_en : 1; /**< [ 62: 62](R/W) PCS will inject a single bit error every other cycle in the transmit
+ data stream at some time following an assertion of
+ [INJ_ERR_CNT_EN]. The number of error cycles to insert is set by
+ [INJ_ERR_CNT_LEN] and it increments the error bit index each
+ cycle. Once all the errors have been transmitted GSER sets
+ GSERN()_LANE()_LT_BSTS[INJ_ERR_CNT_DONE]. Injection of a second set of
+ errors will require clearing the counter by holding [INJ_ERR_CNT_RST_N],
+ asserting [INJ_ERR_CNT_EN], then releasing [INJ_ERR_CNT_RST_N]. This mode
+ should be used separately from [INJ_ERR_BURST_EN] and only one of them
+ can be asserted at any time. */
+ uint64_t inj_err_cnt_rst_n : 1; /**< [ 63: 63](R/W/H) Set to zero to hold the error injection counter in reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_bcfg bdk_gsernx_lanex_lt_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000580ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_BCFG(a,b) bdk_gsernx_lanex_lt_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_LT_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_BCFG(a,b) "GSERNX_LANEX_LT_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_LT_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_bsts
+ *
+ * GSER Lane PCS Lite Status Register
+ */
+union bdk_gsernx_lanex_lt_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t inj_err_cnt_done : 1; /**< [ 2: 2](RO/H) Indicates the PCS error injection counter is done. */
+ uint64_t bitstuff_rx_algn_is_odd : 1;/**< [ 1: 1](RO/H) Indicates the PCS receive data path has detected bit-stuffed
+ receive data that is aligned with duplicate bits in pairs as (1,2),
+ (3,4), (5.6), ... The indication is valid only if the receive data
+ are bit-stuffed and error-free. */
+ uint64_t bitstuff_rx_algn_is_even : 1;/**< [ 0: 0](RO/H) Indicates the PCS receive data path has detected bit-stuffed
+ receive data that is aligned with duplicate bits in pairs as (0,1),
+ (2,3), (4,5), ... The indication is valid only if the receive data
+ are bit-stuffed and error-free. */
+#else /* Word 0 - Little Endian */
+ uint64_t bitstuff_rx_algn_is_even : 1;/**< [ 0: 0](RO/H) Indicates the PCS receive data path has detected bit-stuffed
+ receive data that is aligned with duplicate bits in pairs as (0,1),
+ (2,3), (4,5), ... The indication is valid only if the receive data
+ are bit-stuffed and error-free. */
+ uint64_t bitstuff_rx_algn_is_odd : 1;/**< [ 1: 1](RO/H) Indicates the PCS receive data path has detected bit-stuffed
+ receive data that is aligned with duplicate bits in pairs as (1,2),
+ (3,4), (5.6), ... The indication is valid only if the receive data
+ are bit-stuffed and error-free. */
+ uint64_t inj_err_cnt_done : 1; /**< [ 2: 2](RO/H) Indicates the PCS error injection counter is done. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_bsts bdk_gsernx_lanex_lt_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000590ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_BSTS(a,b) bdk_gsernx_lanex_lt_bsts_t
+#define bustype_BDK_GSERNX_LANEX_LT_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_BSTS(a,b) "GSERNX_LANEX_LT_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_LT_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_prbs1_bcfg
+ *
+ * GSER Lane PCS Lite PRBS Checker Control Register 1
+ */
+union bdk_gsernx_lanex_lt_prbs1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_prbs1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t prbs_rx_rst_n : 1; /**< [ 59: 59](R/W/H) Clear to hold the receive PRBS pattern checker in reset. */
+ uint64_t prbs_rx_mode : 1; /**< [ 58: 58](R/W/H) Enables PRBS checking in the PCS lite layer receive data path. If
+ using PRBS checking, assert GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_MODE]
+ prior to deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_RST_N]. Software
+ can deassert this bit to stop accumulating error counts without
+ resetting the counter. */
+ uint64_t prbs_tx_rst_n : 1; /**< [ 57: 57](R/W/H) Clear to hold the transmit PRBS pattern generator in reset. */
+ uint64_t prbs_tx_mode : 1; /**< [ 56: 56](R/W/H) Enables PRBS generation and sending PRBS transmit data to the SERDES
+ macro. If using PRBS transmitting, set
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_MODE] prior to deasserting
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. Note that the PCS-lite PRBS
+ generator will require GSERN()_LANE()_LT_BCFG[REVERSE_TX_BIT_ORDER] to be
+ asserted. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t prbs_mode : 4; /**< [ 51: 48](R/W/H) Selects the PRBS pattern mode for both transmit generation and
+ receive checking:
+ 0 = Prbs07 (taps at 6 & 7; reset default).
+ 1 = Prbs7a (taps at 3 & 7).
+ 2 = Prbs09 (taps at 5 & 9).
+ 3 = Prbs11 (taps at 9 & 11).
+ 4 = Prbs15 (taps at 14 & 15).
+ 5 = Prbs20 (taps at 3 & 20).
+ 6 = Prbs23 (taps at 18 & 23).
+ 7 = Prbs29 (taps at 27 & 29).
+ 8 = Prbs31 (taps at 28 & 31).
+ others reserved. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W/H) Enable use of GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT] to limit number of
+ cycles of PCS RX clock over which PRBS errors are accumulated. */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W/H) When enabled, this contains the count of PCS receive-clock cycles
+ over which PRBS error counts are accumulated. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W/H) When enabled, this contains the count of PCS receive-clock cycles
+ over which PRBS error counts are accumulated. */
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W/H) Enable use of GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT] to limit number of
+ cycles of PCS RX clock over which PRBS errors are accumulated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t prbs_mode : 4; /**< [ 51: 48](R/W/H) Selects the PRBS pattern mode for both transmit generation and
+ receive checking:
+ 0 = Prbs07 (taps at 6 & 7; reset default).
+ 1 = Prbs7a (taps at 3 & 7).
+ 2 = Prbs09 (taps at 5 & 9).
+ 3 = Prbs11 (taps at 9 & 11).
+ 4 = Prbs15 (taps at 14 & 15).
+ 5 = Prbs20 (taps at 3 & 20).
+ 6 = Prbs23 (taps at 18 & 23).
+ 7 = Prbs29 (taps at 27 & 29).
+ 8 = Prbs31 (taps at 28 & 31).
+ others reserved. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t prbs_tx_mode : 1; /**< [ 56: 56](R/W/H) Enables PRBS generation and sending PRBS transmit data to the SERDES
+ macro. If using PRBS transmitting, set
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_MODE] prior to deasserting
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. Note that the PCS-lite PRBS
+ generator will require GSERN()_LANE()_LT_BCFG[REVERSE_TX_BIT_ORDER] to be
+ asserted. */
+ uint64_t prbs_tx_rst_n : 1; /**< [ 57: 57](R/W/H) Clear to hold the transmit PRBS pattern generator in reset. */
+ uint64_t prbs_rx_mode : 1; /**< [ 58: 58](R/W/H) Enables PRBS checking in the PCS lite layer receive data path. If
+ using PRBS checking, assert GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_MODE]
+ prior to deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_RST_N]. Software
+ can deassert this bit to stop accumulating error counts without
+ resetting the counter. */
+ uint64_t prbs_rx_rst_n : 1; /**< [ 59: 59](R/W/H) Clear to hold the receive PRBS pattern checker in reset. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_prbs1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_prbs1_bcfg bdk_gsernx_lanex_lt_prbs1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000690ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_PRBS1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) bdk_gsernx_lanex_lt_prbs1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) "GSERNX_LANEX_LT_PRBS1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_prbs2_bcfg
+ *
+ * GSER Lane PCS Lite PRBS Checker Control Register 2
+ */
+union bdk_gsernx_lanex_lt_prbs2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_prbs2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t lock_cnt : 8; /**< [ 55: 48](R/W/H) One less than the number of cycles of matching receive data the PRBS
+ checker needs to see before starting to count errors. Default is 31,
+ for 32 cycles of matching data before starting the PRBS error
+ counter; the maximum setting is 255. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[LOCK_CNT] as desired before deasserting
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_RST_N]. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t tx_lfsr_use_preload : 1; /**< [ 40: 40](R/W/H) Enables use of the GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE]
+ instead of all zeros in the transmitter LFSR PRBS generator. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD] and
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE] as desired before
+ deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. */
+ uint64_t tx_lfsr_preload_value : 40; /**< [ 39: 0](R/W/H) Initial state of the transmitter LFSR PRBS generator (if enabled by
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD]). When enabled, this
+ value will be loaded when GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]
+ asserts (low). Do not set to all ones, or the LFSR will lock up. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD] and
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE] as desired before
+ deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_lfsr_preload_value : 40; /**< [ 39: 0](R/W/H) Initial state of the transmitter LFSR PRBS generator (if enabled by
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD]). When enabled, this
+ value will be loaded when GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]
+ asserts (low). Do not set to all ones, or the LFSR will lock up. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD] and
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE] as desired before
+ deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. */
+ uint64_t tx_lfsr_use_preload : 1; /**< [ 40: 40](R/W/H) Enables use of the GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE]
+ instead of all zeros in the transmitter LFSR PRBS generator. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD] and
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE] as desired before
+ deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t lock_cnt : 8; /**< [ 55: 48](R/W/H) One less than the number of cycles of matching receive data the PRBS
+ checker needs to see before starting to count errors. Default is 31,
+ for 32 cycles of matching data before starting the PRBS error
+ counter; the maximum setting is 255. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[LOCK_CNT] as desired before deasserting
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_RST_N]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_prbs2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_prbs2_bcfg bdk_gsernx_lanex_lt_prbs2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900006a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_PRBS2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) bdk_gsernx_lanex_lt_prbs2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) "GSERNX_LANEX_LT_PRBS2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_prbs_sts
+ *
+ * GSER Lane PCS Lite PRBS Checker Status Register
+ */
+union bdk_gsernx_lanex_lt_prbs_sts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_prbs_sts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT] has expired
+ if GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] is set. If
+ GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] is clear, this bit will
+ always read as clear. */
+ uint64_t lock : 1; /**< [ 48: 48](RO/H) Indicates the PRBS checker logic has achieved lock prior to
+ starting error counting. */
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When asserted indicates GSERN()_LANE()_LT_PRBS_STS[ERR_CNT] overflowed and
+ is not accurate. */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of PRBS bit errors seen. If GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] and
+ GSERN()_LANE()_LT_PRBS_STS[CYCLE_CNT_DONE] are not both asserted,
+ GSERN()_LANE()_LT_PRBS_STS[ERR_CNT] may not be reliable unless
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_MODE] is first deasserted (to stop
+ the error counter). */
+#else /* Word 0 - Little Endian */
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of PRBS bit errors seen. If GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] and
+ GSERN()_LANE()_LT_PRBS_STS[CYCLE_CNT_DONE] are not both asserted,
+ GSERN()_LANE()_LT_PRBS_STS[ERR_CNT] may not be reliable unless
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_MODE] is first deasserted (to stop
+ the error counter). */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When asserted indicates GSERN()_LANE()_LT_PRBS_STS[ERR_CNT] overflowed and
+ is not accurate. */
+ uint64_t lock : 1; /**< [ 48: 48](RO/H) Indicates the PRBS checker logic has achieved lock prior to
+ starting error counting. */
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT] has expired
+ if GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] is set. If
+ GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] is clear, this bit will
+ always read as clear. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_prbs_sts_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_prbs_sts bdk_gsernx_lanex_lt_prbs_sts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS_STS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS_STS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900006b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_PRBS_STS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) bdk_gsernx_lanex_lt_prbs_sts_t
+#define bustype_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) "GSERNX_LANEX_LT_PRBS_STS"
+#define device_bar_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_map0
+ *
+ * GSER Lane Programmable Map Register 0
+ * Manually settable option for the interpolator map. If using
+ * GSERN()_LANE()_IMAPSEL_BCFG[MAP_CASE]=0xf, set these bits prior to bringing analog
+ * receiver out of reset.
+ */
+union bdk_gsernx_lanex_map0
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_map0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) map register 0, 64 LSB of map 128b vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) map register 0, 64 LSB of map 128b vector. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_map0_s cn; */
+};
+typedef union bdk_gsernx_lanex_map0 bdk_gsernx_lanex_map0_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_MAP0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_MAP0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001e00ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_MAP0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_MAP0(a,b) bdk_gsernx_lanex_map0_t
+#define bustype_BDK_GSERNX_LANEX_MAP0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_MAP0(a,b) "GSERNX_LANEX_MAP0"
+#define device_bar_BDK_GSERNX_LANEX_MAP0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_MAP0(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_MAP0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_map1
+ *
+ * GSER Lane Programmable Map Register 1
+ * Manually settable option for the interpolator map. If using
+ * (GSERN()_LANE()_IMAPSEL_BCFG[MAP_CASE]=0xf), set these bits prior to bringing
+ * analog receiver out of reset.
+ */
+union bdk_gsernx_lanex_map1
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_map1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) Map register 1, 64 most significant bits of map 128-bit vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) Map register 1, 64 most significant bits of map 128-bit vector. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_map1_s cn; */
+};
+typedef union bdk_gsernx_lanex_map1 bdk_gsernx_lanex_map1_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_MAP1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_MAP1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001e10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_MAP1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_MAP1(a,b) bdk_gsernx_lanex_map1_t
+#define bustype_BDK_GSERNX_LANEX_MAP1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_MAP1(a,b) "GSERNX_LANEX_MAP1"
+#define device_bar_BDK_GSERNX_LANEX_MAP1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_MAP1(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_MAP1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_max_oob_add_count
+ *
+ * GSER Lane RX OOB Maximum ADDER Durations Counted Register
+ * Observes the maximum number of times we had to delay the idle offset
+ * recalibration because of a collision with an OOB event.
+ */
+union bdk_gsernx_lanex_max_oob_add_count
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_max_oob_add_count_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t accumulated_oob_adders : 8; /**< [ 7: 0](RO/H) Observed maximum number of OOB ADDERS applied to the idle offset
+ recalibration FSM that delay the calibration. This is in terms of
+ how many GSERN()_LANE()_RX_IDLE_CAL_CFG[OOB_DELAY_ADDER_COUNT] ticks added to
+ the duration between recalibration. */
+#else /* Word 0 - Little Endian */
+ uint64_t accumulated_oob_adders : 8; /**< [ 7: 0](RO/H) Observed maximum number of OOB ADDERS applied to the idle offset
+ recalibration FSM that delay the calibration. This is in terms of
+ how many GSERN()_LANE()_RX_IDLE_CAL_CFG[OOB_DELAY_ADDER_COUNT] ticks added to
+ the duration between recalibration. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_max_oob_add_count_s cn; */
+};
+typedef union bdk_gsernx_lanex_max_oob_add_count bdk_gsernx_lanex_max_oob_add_count_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001550ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_MAX_OOB_ADD_COUNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) bdk_gsernx_lanex_max_oob_add_count_t
+#define bustype_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) "GSERNX_LANEX_MAX_OOB_ADD_COUNT"
+#define device_bar_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_ocx_txeq_bcfg
+ *
+ * GSER Lane OCX Tx Equalizer Base Configuration Register
+ * Register controls settings for the transmitter equalizer taps
+ * when the GSER is configured for OCX mode and KR training is not enabled.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL] is set to 'OCX'.
+ */
+union bdk_gsernx_lanex_ocx_txeq_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_ocx_txeq_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t tx_coeff_update : 1; /**< [ 27: 27](R/W/H) Transmitter coefficient update.
+ An asserting edge will start the transmitter coefficient update
+ sequencer. This field self-clears when the sequence has completed.
+ To update the GSER transmitter euqalizer coefficients program
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CSPD].
+
+ then write [TX_COEFF_UPDATE] to 1. */
+ uint64_t tx_enable : 1; /**< [ 26: 26](R/W) Transmitter enable.
+ 0 = Disable the serdes transmitter.
+ 1 = Enable the serdes transmitter.
+
+ Internal:
+ Drives the ocx_tx_enable input to the GSERN src_mux. */
+ uint64_t tx_stuff : 1; /**< [ 25: 25](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter bit stuffing.
+ Programs the transmitter PCS lite layer for bit stuffing.
+ Not used for OCX connections.
+ Leave programmed to 0x0.
+ Drives the ocx_tx_stuff input to the GSERN src_mux. */
+ uint64_t tx_oob : 1; /**< [ 24: 24](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter OOB signaling.
+ Not typically used for OCX connnections.
+ Leave programmed to 0x0.
+ Drives the ocx_tx_oob input to the GSERN src_mux. */
+ uint64_t tx_idle : 1; /**< [ 23: 23](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter electrical idle.
+ Used to force the transmitter to electrical idle.
+ Not typically used for OCX connections.
+ Leave progreammed to 0x0.
+ Drives the ocx_tx_idle input to the GSERN src_mux. */
+ uint64_t tx_cspd : 1; /**< [ 22: 22](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t tx_bs : 6; /**< [ 21: 16](R/W) TX bias/swing selection. This setting only takes effect if [TX_CSPD] is
+ deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t tx_cpost : 5; /**< [ 15: 11](R/W) Transmitter Post (C+1) equalizer tap coefficient value.
+ Programs the transmitter Post tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_cmain : 6; /**< [ 10: 5](R/W) Transmitter Main (C0) equalizer tap coefficient value.
+ Programs the serdes transmitter Main tap.
+ Valid range is 0x30 to 0x18.
+ When programing the transmitter Pre, Main, and Post
+ taps the following rules must be adhered to:
+ _ ([TX_CMAIN] + [TX_CPRE] + [TX_CPOST]) \<= 0x30.
+ _ ([TX_CMAIN] - [TX_CPRE] - [TX_CPOST]) \>= 0x6.
+ _ 0x30 \<= [TX_CMAIN] \<= 0x18.
+ _ 0x16 \>= [TX_CPRE] \>= 0x0.
+ _ 0x16 \>= [TX_CPOST] \>= 0x0.
+
+ [TX_CMAIN] should be adjusted when either [TX_CPRE] or
+ [TX_CPOST] is adjusted to provide constant power transmitter
+ amplitude adjustments.
+
+ To update the GSER serdes transmitter Pre, Main, and Post
+ equalizer taps from the [TX_CPOST], [TX_CMAIN], and [TX_CPRE]
+ fields write GSERN()_LANE()_OCX_TXEQ_BCFG[TX_COEFF_UPDATE]
+ to 1 and subsequently clear [TX_COEFF_UPDATE] to 0. This step
+ transfers the [TX_CPOST], [TX_CMAIN], and [TX_CPRE] to the
+ serdes transmitter equalizer.
+
+ Related CSRs:
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_COEFF_UPDATE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CSPD]. */
+ uint64_t tx_cpre : 5; /**< [ 4: 0](R/W) Transmitter Pre (C-1) equalizer tap coefficient value.
+ Programs the transmitter Pre tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_cpre : 5; /**< [ 4: 0](R/W) Transmitter Pre (C-1) equalizer tap coefficient value.
+ Programs the transmitter Pre tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_cmain : 6; /**< [ 10: 5](R/W) Transmitter Main (C0) equalizer tap coefficient value.
+ Programs the serdes transmitter Main tap.
+ Valid range is 0x30 to 0x18.
+ When programing the transmitter Pre, Main, and Post
+ taps the following rules must be adhered to:
+ _ ([TX_CMAIN] + [TX_CPRE] + [TX_CPOST]) \<= 0x30.
+ _ ([TX_CMAIN] - [TX_CPRE] - [TX_CPOST]) \>= 0x6.
+ _ 0x30 \<= [TX_CMAIN] \<= 0x18.
+ _ 0x16 \>= [TX_CPRE] \>= 0x0.
+ _ 0x16 \>= [TX_CPOST] \>= 0x0.
+
+ [TX_CMAIN] should be adjusted when either [TX_CPRE] or
+ [TX_CPOST] is adjusted to provide constant power transmitter
+ amplitude adjustments.
+
+ To update the GSER serdes transmitter Pre, Main, and Post
+ equalizer taps from the [TX_CPOST], [TX_CMAIN], and [TX_CPRE]
+ fields write GSERN()_LANE()_OCX_TXEQ_BCFG[TX_COEFF_UPDATE]
+ to 1 and subsequently clear [TX_COEFF_UPDATE] to 0. This step
+ transfers the [TX_CPOST], [TX_CMAIN], and [TX_CPRE] to the
+ serdes transmitter equalizer.
+
+ Related CSRs:
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_COEFF_UPDATE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CSPD]. */
+ uint64_t tx_cpost : 5; /**< [ 15: 11](R/W) Transmitter Post (C+1) equalizer tap coefficient value.
+ Programs the transmitter Post tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_bs : 6; /**< [ 21: 16](R/W) TX bias/swing selection. This setting only takes effect if [TX_CSPD] is
+ deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t tx_cspd : 1; /**< [ 22: 22](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t tx_idle : 1; /**< [ 23: 23](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter electrical idle.
+ Used to force the transmitter to electrical idle.
+ Not typically used for OCX connections.
+ Leave progreammed to 0x0.
+ Drives the ocx_tx_idle input to the GSERN src_mux. */
+ uint64_t tx_oob : 1; /**< [ 24: 24](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter OOB signaling.
+ Not typically used for OCX connnections.
+ Leave programmed to 0x0.
+ Drives the ocx_tx_oob input to the GSERN src_mux. */
+ uint64_t tx_stuff : 1; /**< [ 25: 25](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter bit stuffing.
+ Programs the transmitter PCS lite layer for bit stuffing.
+ Not used for OCX connections.
+ Leave programmed to 0x0.
+ Drives the ocx_tx_stuff input to the GSERN src_mux. */
+ uint64_t tx_enable : 1; /**< [ 26: 26](R/W) Transmitter enable.
+ 0 = Disable the serdes transmitter.
+ 1 = Enable the serdes transmitter.
+
+ Internal:
+ Drives the ocx_tx_enable input to the GSERN src_mux. */
+ uint64_t tx_coeff_update : 1; /**< [ 27: 27](R/W/H) Transmitter coefficient update.
+ An asserting edge will start the transmitter coefficient update
+ sequencer. This field self-clears when the sequence has completed.
+ To update the GSER transmitter euqalizer coefficients program
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CSPD].
+
+ then write [TX_COEFF_UPDATE] to 1. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_ocx_txeq_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_ocx_txeq_bcfg bdk_gsernx_lanex_ocx_txeq_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003550ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_OCX_TXEQ_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) bdk_gsernx_lanex_ocx_txeq_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) "GSERNX_LANEX_OCX_TXEQ_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pat#
+ *
+ * GSER Lane Pattern Memory Register
+ */
+union bdk_gsernx_lanex_patx
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_patx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t dat : 40; /**< [ 39: 0](R/W) Pattern Memory Registers. All 40b of both registers are used under
+ al clock ratios except 32:1. In 32b (32:1) mode bits [31:0] of each
+ register are used. The total pattern length is 64b in 32b mode and
+ 80b in all other clock modes.
+
+ The bit pattern in bits [N-1:0] of PAT[0], where N is the clock
+ ratio, must be unique within the overall pattern to allow the
+ pattern checker to correctly lock before checking for errors.
+
+ Internal:
+ If the pattern data in this register is written while pattern transmission
+ testing is in progress, the transmitted data may be briefly unpredictable. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 40; /**< [ 39: 0](R/W) Pattern Memory Registers. All 40b of both registers are used under
+ al clock ratios except 32:1. In 32b (32:1) mode bits [31:0] of each
+ register are used. The total pattern length is 64b in 32b mode and
+ 80b in all other clock modes.
+
+ The bit pattern in bits [N-1:0] of PAT[0], where N is the clock
+ ratio, must be unique within the overall pattern to allow the
+ pattern checker to correctly lock before checking for errors.
+
+ Internal:
+ If the pattern data in this register is written while pattern transmission
+ testing is in progress, the transmitted data may be briefly unpredictable. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_patx_s cn; */
+};
+typedef union bdk_gsernx_lanex_patx bdk_gsernx_lanex_patx_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PATX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PATX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4) && (c<=1)))
+ return 0x87e090007ff0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("GSERNX_LANEX_PATX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PATX(a,b,c) bdk_gsernx_lanex_patx_t
+#define bustype_BDK_GSERNX_LANEX_PATX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PATX(a,b,c) "GSERNX_LANEX_PATX"
+#define device_bar_BDK_GSERNX_LANEX_PATX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PATX(a,b,c) (a)
+#define arguments_BDK_GSERNX_LANEX_PATX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) gsern#_lane#_pat_ctrl
+ *
+ * GSER Lane PCS Lite Pattern Memory Stress Control Register
+ */
+union bdk_gsernx_lanex_pat_ctrl
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pat_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t tx_rst_n : 1; /**< [ 50: 50](R/W) Clear and then set to reset the pattern memory stress transmit
+ data path, specifically the pattern memory index counter. */
+ uint64_t rx_rst_n : 1; /**< [ 49: 49](R/W) Clear and then set to reset the pattern memory stress
+ receive checking data path, including the lock indication and the
+ error counts. */
+ uint64_t en : 1; /**< [ 48: 48](R/W) Enable (i.e., start, or stop if deasserted) pattern memory stress
+ generation and checking. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W) Enable use of GSERN()_LANE()_PAT_CTRL[CYCLE_CNT] to limit number of cycles
+ of PCS RX clock over which the pattern memory loopback errors are
+ accumulated. */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W) When enabled by GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN], this contains the
+ count of PCS receive-clock cycles over which pattern memory loopback
+ error counts are accumulated. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W) When enabled by GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN], this contains the
+ count of PCS receive-clock cycles over which pattern memory loopback
+ error counts are accumulated. */
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W) Enable use of GSERN()_LANE()_PAT_CTRL[CYCLE_CNT] to limit number of cycles
+ of PCS RX clock over which the pattern memory loopback errors are
+ accumulated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t en : 1; /**< [ 48: 48](R/W) Enable (i.e., start, or stop if deasserted) pattern memory stress
+ generation and checking. */
+ uint64_t rx_rst_n : 1; /**< [ 49: 49](R/W) Clear and then set to reset the pattern memory stress
+ receive checking data path, including the lock indication and the
+ error counts. */
+ uint64_t tx_rst_n : 1; /**< [ 50: 50](R/W) Clear and then set to reset the pattern memory stress transmit
+ data path, specifically the pattern memory index counter. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pat_ctrl_s cn; */
+};
+typedef union bdk_gsernx_lanex_pat_ctrl bdk_gsernx_lanex_pat_ctrl_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PAT_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PAT_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090007fd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PAT_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PAT_CTRL(a,b) bdk_gsernx_lanex_pat_ctrl_t
+#define bustype_BDK_GSERNX_LANEX_PAT_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PAT_CTRL(a,b) "GSERNX_LANEX_PAT_CTRL"
+#define device_bar_BDK_GSERNX_LANEX_PAT_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PAT_CTRL(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PAT_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pat_dat
+ *
+ * GSER Lane PCS Lite Pattern Memory Stress Data Result Register
+ */
+union bdk_gsernx_lanex_pat_dat
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pat_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t framing_match : 1; /**< [ 63: 63](RO/H) Indicates that the pattern memory checker found a framing match. This field is
+ valid only after enabling pattern memory generation and checking by setting
+ GSERN()_LANE()_PAT_CTRL[EN]. */
+ uint64_t reserved_62 : 1;
+ uint64_t framing_offset : 6; /**< [ 61: 56](RO/H) The offset the pattern memory checker found of the low bits of the pattern data
+ in the receive data frame. This field is valid only when [FRAMING_MATCH]
+ reads as asserted after enabling pattern memory generation and checking by
+ setting GSERN()_LANE()_PAT_CTRL[EN]. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_PAT_CTRL[CYCLE_CNT] has expired if
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] is asserted. If
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] is deasserted,
+ GSERN()_LANE()_PAT_DAT[CYCLE_CNT_DONE] will always read as asserted. */
+ uint64_t lock : 1; /**< [ 48: 48](RO/H) Indicates the pattern memory checker has achieved lock. */
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When asserted indicates GSERN()_LANE()_PAT_DAT[ERR_CNT] overflowed and is
+ not accurate. */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of bit errors seen in pattern memory loopback testing. If
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] and GSERN()_LANE()_PAT_DAT[CYCLE_CNT_DONE]
+ are not both asserted, GSERN()_LANE()_PAT_DAT[ERR_CNT] may not be reliable
+ unless GSERN()_LANE()_PAT_CTRL[EN] is first deasserted (to stop the error
+ counter). */
+#else /* Word 0 - Little Endian */
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of bit errors seen in pattern memory loopback testing. If
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] and GSERN()_LANE()_PAT_DAT[CYCLE_CNT_DONE]
+ are not both asserted, GSERN()_LANE()_PAT_DAT[ERR_CNT] may not be reliable
+ unless GSERN()_LANE()_PAT_CTRL[EN] is first deasserted (to stop the error
+ counter). */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When asserted indicates GSERN()_LANE()_PAT_DAT[ERR_CNT] overflowed and is
+ not accurate. */
+ uint64_t lock : 1; /**< [ 48: 48](RO/H) Indicates the pattern memory checker has achieved lock. */
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_PAT_CTRL[CYCLE_CNT] has expired if
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] is asserted. If
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] is deasserted,
+ GSERN()_LANE()_PAT_DAT[CYCLE_CNT_DONE] will always read as asserted. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t framing_offset : 6; /**< [ 61: 56](RO/H) The offset the pattern memory checker found of the low bits of the pattern data
+ in the receive data frame. This field is valid only when [FRAMING_MATCH]
+ reads as asserted after enabling pattern memory generation and checking by
+ setting GSERN()_LANE()_PAT_CTRL[EN]. */
+ uint64_t reserved_62 : 1;
+ uint64_t framing_match : 1; /**< [ 63: 63](RO/H) Indicates that the pattern memory checker found a framing match. This field is
+ valid only after enabling pattern memory generation and checking by setting
+ GSERN()_LANE()_PAT_CTRL[EN]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pat_dat_s cn; */
+};
+typedef union bdk_gsernx_lanex_pat_dat bdk_gsernx_lanex_pat_dat_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PAT_DAT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PAT_DAT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090007fe0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PAT_DAT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PAT_DAT(a,b) bdk_gsernx_lanex_pat_dat_t
+#define bustype_BDK_GSERNX_LANEX_PAT_DAT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PAT_DAT(a,b) "GSERNX_LANEX_PAT_DAT"
+#define device_bar_BDK_GSERNX_LANEX_PAT_DAT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PAT_DAT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PAT_DAT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_pcs2_bcfg
+ *
+ * GSER Lane PCIe PCS Control 2 Register
+ * Control settings for PCIe PCS functionality.
+ */
+union bdk_gsernx_lanex_pcie_pcs2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_pcs2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pause_adpt_rxstandby : 4; /**< [ 63: 60](R/W) Set to one to allow the PIPE RxStandby to pause all adaptation functions and
+ hold the CDRFSM when the PCIe lane is operating at the corresponding rate.
+ The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t frc_unalgn_rxstandby : 4; /**< [ 59: 56](R/W) Enables use of RxStandby to force the RX PCS into unalign state with
+ an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t frc_unalgn_rxelecidle : 4; /**< [ 55: 52](R/W) Enables use of detected RxElecIdle to force the RX PCS into unalign state
+ with an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t frc_unalgn_blkalgnctl : 2; /**< [ 51: 50](R/W) Enables use of BlockAlignControl assertion to force the RX PCS into unalign state
+ with an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen3.
+ \<1\> = PCIe gen4. */
+ uint64_t pipe_tx_sel : 2; /**< [ 49: 48](R/W) Selects the source for the transmit PIPE controls:
+ \<0\> = PCIe pipe 0 transmit.
+ \<1\> = PCIe pipe 1 transmit.
+ \<2\> = PCIe pipe 2 transmit.
+ \<3\> = Reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t gen34_pll_div_f : 18; /**< [ 45: 28](R/W) PLL feedback divider fractional portion. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t gen12_pll_div_f : 18; /**< [ 25: 8](R/W) PLL feedback divider fractional portion. */
+ uint64_t pause_adpt_on_idle : 4; /**< [ 7: 4](R/W) Set to one to allow the Rx Electrical Idle to pause all adaptation functions and
+ hold the CDRFSM when the PCIe lane is operating at the corresponding rate.
+ The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_prevga_gn_adpt : 4; /**< [ 3: 0](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+#else /* Word 0 - Little Endian */
+ uint64_t do_prevga_gn_adpt : 4; /**< [ 3: 0](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t pause_adpt_on_idle : 4; /**< [ 7: 4](R/W) Set to one to allow the Rx Electrical Idle to pause all adaptation functions and
+ hold the CDRFSM when the PCIe lane is operating at the corresponding rate.
+ The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t gen12_pll_div_f : 18; /**< [ 25: 8](R/W) PLL feedback divider fractional portion. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t gen34_pll_div_f : 18; /**< [ 45: 28](R/W) PLL feedback divider fractional portion. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t pipe_tx_sel : 2; /**< [ 49: 48](R/W) Selects the source for the transmit PIPE controls:
+ \<0\> = PCIe pipe 0 transmit.
+ \<1\> = PCIe pipe 1 transmit.
+ \<2\> = PCIe pipe 2 transmit.
+ \<3\> = Reserved. */
+ uint64_t frc_unalgn_blkalgnctl : 2; /**< [ 51: 50](R/W) Enables use of BlockAlignControl assertion to force the RX PCS into unalign state
+ with an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen3.
+ \<1\> = PCIe gen4. */
+ uint64_t frc_unalgn_rxelecidle : 4; /**< [ 55: 52](R/W) Enables use of detected RxElecIdle to force the RX PCS into unalign state
+ with an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t frc_unalgn_rxstandby : 4; /**< [ 59: 56](R/W) Enables use of RxStandby to force the RX PCS into unalign state with
+ an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t pause_adpt_rxstandby : 4; /**< [ 63: 60](R/W) Set to one to allow the PIPE RxStandby to pause all adaptation functions and
+ hold the CDRFSM when the PCIe lane is operating at the corresponding rate.
+ The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_pcs2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_pcs2_bcfg bdk_gsernx_lanex_pcie_pcs2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001f20ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_PCS2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) bdk_gsernx_lanex_pcie_pcs2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) "GSERNX_LANEX_PCIE_PCS2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_pcs3_bcfg
+ *
+ * GSER Lane PCIe PCS Control 3 Register
+ * Control settings for PCIe PCS functionality.
+ */
+union bdk_gsernx_lanex_pcie_pcs3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_pcs3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t tx_enfast : 4; /**< [ 35: 32](R/W) Enables fast slew on the TX preamp output with an individual control bit
+ per PCIe rate mapped as following:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_afeos_final : 4; /**< [ 31: 28](R/W) Set to one to allow AFEOS adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_AFEOS_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctlelte_final : 4; /**< [ 27: 24](R/W) Set to one to allow CTLELTE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLELTE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctlez_final : 4; /**< [ 23: 20](R/W) Set to one to allow CTLEZ adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLEZ_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctle_final : 4; /**< [ 19: 16](R/W) Set to one to allow CTLE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_dfe_final : 4; /**< [ 15: 12](R/W) Set to one to allow DFE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_DFE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_vga_final : 4; /**< [ 11: 8](R/W) Set to one to allow VGA adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_VGA_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_blwc_final : 4; /**< [ 7: 4](R/W) Set to one to allow BLWC adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_BLWC_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_prevga_gn_final : 4; /**< [ 3: 0](R/W) Set to one to allow PREVGA_GN adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS2_BCFG[DO_PREVGA_GN_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+#else /* Word 0 - Little Endian */
+ uint64_t do_prevga_gn_final : 4; /**< [ 3: 0](R/W) Set to one to allow PREVGA_GN adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS2_BCFG[DO_PREVGA_GN_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_blwc_final : 4; /**< [ 7: 4](R/W) Set to one to allow BLWC adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_BLWC_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_vga_final : 4; /**< [ 11: 8](R/W) Set to one to allow VGA adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_VGA_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_dfe_final : 4; /**< [ 15: 12](R/W) Set to one to allow DFE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_DFE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctle_final : 4; /**< [ 19: 16](R/W) Set to one to allow CTLE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctlez_final : 4; /**< [ 23: 20](R/W) Set to one to allow CTLEZ adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLEZ_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctlelte_final : 4; /**< [ 27: 24](R/W) Set to one to allow CTLELTE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLELTE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_afeos_final : 4; /**< [ 31: 28](R/W) Set to one to allow AFEOS adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_AFEOS_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t tx_enfast : 4; /**< [ 35: 32](R/W) Enables fast slew on the TX preamp output with an individual control bit
+ per PCIe rate mapped as following:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_pcs3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_pcs3_bcfg bdk_gsernx_lanex_pcie_pcs3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001f30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_PCS3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) bdk_gsernx_lanex_pcie_pcs3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) "GSERNX_LANEX_PCIE_PCS3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_pcs_bcfg
+ *
+ * GSER Lane PCIe PCS Control Register
+ * Control settings for PCIe PCS functionality.
+ */
+union bdk_gsernx_lanex_pcie_pcs_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_pcs_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t do_afeos_adpt : 4; /**< [ 63: 60](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctlelte_adpt : 4; /**< [ 59: 56](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctlez_adpt : 4; /**< [ 55: 52](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctle_adpt : 4; /**< [ 51: 48](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_dfe_adpt : 4; /**< [ 47: 44](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_vga_adpt : 4; /**< [ 43: 40](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_blwc_adpt : 4; /**< [ 39: 36](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t gen34_pll_div_n : 9; /**< [ 35: 27](R/W) PLL feedback divider integer portion. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t gen12_pll_div_n : 9; /**< [ 24: 16](R/W) PLL feedback divider integer portion. */
+ uint64_t skp_add_thr : 4; /**< [ 15: 12](R/W) SKP addition threshold.
+ The receive elastic store will add a SKP symbol (Gen1/2) or add four
+ SKP symbols (Gen3/4) when the store fill level is less than or equal
+ to this value. */
+ uint64_t skp_del_thr : 4; /**< [ 11: 8](R/W) SKP deletion threshold.
+ The receive elastic store will delete a SKP symbol (Gen1/2) or delete
+ four SKP symbols (Gen3/4) when the store fill level is greater than or
+ equal to this value plus 8. */
+ uint64_t comma_thr : 4; /**< [ 7: 4](R/W) COMMA detection threshold. The receive aligner must see this many
+ COMMA characters at the same rotation before declaring symbol
+ alignment (only used for Gen1/2). */
+ uint64_t error_thr : 4; /**< [ 3: 0](R/W) Error threshold. The receive aligner must see this many COMMA
+ characters at a different rotation than currently in use before
+ declaring loss of symbol alignment (Gen1/2). For Gen3/4 this is
+ the number of invalid Sync Headers needed to cause the aligner
+ to enter the Unaligned Phase and declare an alignment error. */
+#else /* Word 0 - Little Endian */
+ uint64_t error_thr : 4; /**< [ 3: 0](R/W) Error threshold. The receive aligner must see this many COMMA
+ characters at a different rotation than currently in use before
+ declaring loss of symbol alignment (Gen1/2). For Gen3/4 this is
+ the number of invalid Sync Headers needed to cause the aligner
+ to enter the Unaligned Phase and declare an alignment error. */
+ uint64_t comma_thr : 4; /**< [ 7: 4](R/W) COMMA detection threshold. The receive aligner must see this many
+ COMMA characters at the same rotation before declaring symbol
+ alignment (only used for Gen1/2). */
+ uint64_t skp_del_thr : 4; /**< [ 11: 8](R/W) SKP deletion threshold.
+ The receive elastic store will delete a SKP symbol (Gen1/2) or delete
+ four SKP symbols (Gen3/4) when the store fill level is greater than or
+ equal to this value plus 8. */
+ uint64_t skp_add_thr : 4; /**< [ 15: 12](R/W) SKP addition threshold.
+ The receive elastic store will add a SKP symbol (Gen1/2) or add four
+ SKP symbols (Gen3/4) when the store fill level is less than or equal
+ to this value. */
+ uint64_t gen12_pll_div_n : 9; /**< [ 24: 16](R/W) PLL feedback divider integer portion. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t gen34_pll_div_n : 9; /**< [ 35: 27](R/W) PLL feedback divider integer portion. */
+ uint64_t do_blwc_adpt : 4; /**< [ 39: 36](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_vga_adpt : 4; /**< [ 43: 40](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_dfe_adpt : 4; /**< [ 47: 44](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctle_adpt : 4; /**< [ 51: 48](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctlez_adpt : 4; /**< [ 55: 52](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctlelte_adpt : 4; /**< [ 59: 56](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_afeos_adpt : 4; /**< [ 63: 60](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_pcs_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_pcs_bcfg bdk_gsernx_lanex_pcie_pcs_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001f10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_PCS_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) bdk_gsernx_lanex_pcie_pcs_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) "GSERNX_LANEX_PCIE_PCS_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_pcs_bsts
+ *
+ * GSER Lane PCIe PCS Status Register
+ * Error Status for PCIe PCS functionality.
+ */
+union bdk_gsernx_lanex_pcie_pcs_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_pcs_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t pcs_rx_eq_raw_fom : 12; /**< [ 27: 16](RO/H) Raw 12-bit figure of merit for last receiver equalization evaluation. */
+ uint64_t reserved_5_15 : 11;
+ uint64_t pcs_8b10b_disp_error : 1; /**< [ 4: 4](R/W1C/H) 8B10B disparity error (PCIe Gen1/2 only).
+ A valid 8B10B code word was received with invalid disparity. */
+ uint64_t pcs_decode_error : 1; /**< [ 3: 3](R/W1C/H) 8B10B decode error (PCIe Gen1/2).
+ An invalid 8B10B code word was detected. The invalid code word was
+ replaced by an EDB symbol (0xFE).
+
+ 128B130B decode error (PCIe Gen3/4).
+ An error was detected in the first 4N+1 symbols of a SKP ordered set. */
+ uint64_t es_underflow : 1; /**< [ 2: 2](R/W1C/H) Elastic store underflow.
+ A read was attempted from the receive Elastic Store while it was empty.
+ This would indicate a receive data rate slower than supported or a
+ lack of SKP ordered sets to allow SKP symbol additions. */
+ uint64_t es_overflow : 1; /**< [ 1: 1](R/W1C/H) Elastic store overflow.
+ A write was attempted to the receive Elastic Store while it was full.
+ This would indicate a receive data rate faster than supported or a
+ lack of SKP ordered sets to allow SKP symbol deletions. */
+ uint64_t align_error : 1; /**< [ 0: 0](R/W1C/H) Alignment error.
+ The receive aligner has detected an error. For PCIe Gen1/2, an error is
+ declared if GSERN()_LANE()_PCIE_PCS_BCFG[ERROR_THR]
+ COMMA characters are detected at a 10 bit rotation that does not match
+ the active rotation. The COMMAs do not have to all be at the same rotation.
+ For PCIe Gen3/4, an error is declared if GSERN()_LANE()_PCIE_PCS_BCFG[ERROR_THR]
+ invalid sync headers are detected at the current block alignment. */
+#else /* Word 0 - Little Endian */
+ uint64_t align_error : 1; /**< [ 0: 0](R/W1C/H) Alignment error.
+ The receive aligner has detected an error. For PCIe Gen1/2, an error is
+ declared if GSERN()_LANE()_PCIE_PCS_BCFG[ERROR_THR]
+ COMMA characters are detected at a 10 bit rotation that does not match
+ the active rotation. The COMMAs do not have to all be at the same rotation.
+ For PCIe Gen3/4, an error is declared if GSERN()_LANE()_PCIE_PCS_BCFG[ERROR_THR]
+ invalid sync headers are detected at the current block alignment. */
+ uint64_t es_overflow : 1; /**< [ 1: 1](R/W1C/H) Elastic store overflow.
+ A write was attempted to the receive Elastic Store while it was full.
+ This would indicate a receive data rate faster than supported or a
+ lack of SKP ordered sets to allow SKP symbol deletions. */
+ uint64_t es_underflow : 1; /**< [ 2: 2](R/W1C/H) Elastic store underflow.
+ A read was attempted from the receive Elastic Store while it was empty.
+ This would indicate a receive data rate slower than supported or a
+ lack of SKP ordered sets to allow SKP symbol additions. */
+ uint64_t pcs_decode_error : 1; /**< [ 3: 3](R/W1C/H) 8B10B decode error (PCIe Gen1/2).
+ An invalid 8B10B code word was detected. The invalid code word was
+ replaced by an EDB symbol (0xFE).
+
+ 128B130B decode error (PCIe Gen3/4).
+ An error was detected in the first 4N+1 symbols of a SKP ordered set. */
+ uint64_t pcs_8b10b_disp_error : 1; /**< [ 4: 4](R/W1C/H) 8B10B disparity error (PCIe Gen1/2 only).
+ A valid 8B10B code word was received with invalid disparity. */
+ uint64_t reserved_5_15 : 11;
+ uint64_t pcs_rx_eq_raw_fom : 12; /**< [ 27: 16](RO/H) Raw 12-bit figure of merit for last receiver equalization evaluation. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_pcs_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_pcs_bsts bdk_gsernx_lanex_pcie_pcs_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002a30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_PCS_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) bdk_gsernx_lanex_pcie_pcs_bsts_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) "GSERNX_LANEX_PCIE_PCS_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp1_bcfg
+ *
+ * GSER Lane PCIe PowerDown P1 Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P1.
+ */
+union bdk_gsernx_lanex_pcie_rstp1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1 PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1 PowerDown state, but is only used when P1 is entered for
+ lanes that were active in a link and that link has now returned to LTSSM.DETECT
+ state and there are other lanes rejoining the link after having been turned off. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1 PowerDown state, but is only used when P1 is entered
+ for lanes that were active in a link and that link has now returned to LTSSM.DETECT
+ state and there are other lanes rejoining the link after having been turned off.
+ Note: this value is never likely to be changed from the normal run state (0x8). */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1 PowerDown state, but is only used when P1 is entered
+ for lanes that were active in a link and that link has now returned to LTSSM.DETECT
+ state and there are other lanes rejoining the link after having been turned off.
+ Note: this value is never likely to be changed from the normal run state (0x8). */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1 PowerDown state, but is only used when P1 is entered for
+ lanes that were active in a link and that link has now returned to LTSSM.DETECT
+ state and there are other lanes rejoining the link after having been turned off. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1 PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1 PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp1_bcfg bdk_gsernx_lanex_pcie_rstp1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002030ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp1s0_bcfg
+ *
+ * GSER Lane PCIe PowerDown P1 CPM Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P1 CPM (P1 substates entry).
+ */
+union bdk_gsernx_lanex_pcie_rstp1s0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp1s0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1 CPM PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1 CPM PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1 CPM PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1 CPM PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1 CPM PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1 CPM PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1 CPM PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp1s0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp1s0_bcfg bdk_gsernx_lanex_pcie_rstp1s0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002040ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP1S0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp1s0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP1S0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp1s1_bcfg
+ *
+ * GSER Lane PCIe PowerDown P1.1 Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P1.1 (P1 substate).
+ */
+union bdk_gsernx_lanex_pcie_rstp1s1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp1s1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1.1 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1.1 PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1.1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1.1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1.1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1.1 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1.1 PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1.1 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1.1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1.1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1.1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1.1 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1.1 PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1.1 PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp1s1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp1s1_bcfg bdk_gsernx_lanex_pcie_rstp1s1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002050ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP1S1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp1s1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP1S1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp1s2_bcfg
+ *
+ * GSER Lane PCIe PowerDown P1.2 Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P1.2 (P1 substate).
+ */
+union bdk_gsernx_lanex_pcie_rstp1s2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp1s2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1.2 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1.2 PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1.2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1.2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1.2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1.2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1.2 PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1.2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1.2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1.2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1.2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1.2 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1.2 PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1.2 PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp1s2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp1s2_bcfg bdk_gsernx_lanex_pcie_rstp1s2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002060ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP1S2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp1s2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP1S2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp2_bcfg
+ *
+ * GSER Lane PCIe PowerDown P2 Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P2.
+ */
+union bdk_gsernx_lanex_pcie_rstp2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P2 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P2 PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P2 PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P2 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P2 PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P2 PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp2_bcfg bdk_gsernx_lanex_pcie_rstp2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002070ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstrate_bcfg
+ *
+ * GSER Lane PCIe Lane Rate Change Reset States Control Register
+ * This register controls the reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor)
+ * required for PCIe lane rate change.
+ */
+union bdk_gsernx_lanex_pcie_rstrate_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstrate_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during lane rate change. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during lane rate change. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during lane rate change. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during lane rate change. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during lane rate change. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during lane rate change. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during lane rate change. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during lane rate change. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during lane rate change. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during lane rate change. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during lane rate change. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during lane rate change. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during lane rate change. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during lane rate change. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstrate_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstrate_bcfg bdk_gsernx_lanex_pcie_rstrate_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002090ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTRATE_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) bdk_gsernx_lanex_pcie_rstrate_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTRATE_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstshdn_bcfg
+ *
+ * GSER Lane PCIe Lane Shutdown Reset States Control Register
+ * This register controls the reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor)
+ * corresponding to PCIe Lane Shutdown state enabled by the assertion of TxCompliance &
+ * TxElecIdle.
+ */
+union bdk_gsernx_lanex_pcie_rstshdn_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstshdn_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable TX common mode voltage during lane shutdown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx electric idle detection during lane shutdown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during lane shutdown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during lane shutdown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during lane shutdown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during lane shutdown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during lane shutdown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during lane shutdown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during lane shutdown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during lane shutdown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during lane shutdown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during lane shutdown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx electric idle detection during lane shutdown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable TX common mode voltage during lane shutdown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstshdn_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstshdn_bcfg bdk_gsernx_lanex_pcie_rstshdn_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002080ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTSHDN_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) bdk_gsernx_lanex_pcie_rstshdn_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTSHDN_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq1_1_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during PCIe Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq1_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq1_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t pcie_g1_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t pcie_g1_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g1_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g1_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g1_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g1_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g1_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g1_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g1_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g1_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t pcie_g1_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g1_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g1_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g1_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g1_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g1_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g1_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g1_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq1_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq1_1_bcfg bdk_gsernx_lanex_pcie_rxeq1_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002300ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ1_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq1_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ1_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq1_2_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during PCIe Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq1_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq1_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g1_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g1_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq1_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq1_2_bcfg bdk_gsernx_lanex_pcie_rxeq1_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002310ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ1_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq1_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ1_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq1_3_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during PCIe Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq1_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq1_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pcie_g1_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t pcie_g1_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g1_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g1_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g1_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g1_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t pcie_g1_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g1_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g1_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g1_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g1_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t pcie_g1_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g1_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g1_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g1_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g1_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t pcie_g1_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g1_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g1_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g1_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq1_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq1_3_bcfg bdk_gsernx_lanex_pcie_rxeq1_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002320ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ1_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq1_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ1_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq1_4_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Equalizer Control Register 4
+ * Parameters controlling the custom receiver equalization during PCIe Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq1_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq1_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g1_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g1_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq1_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq1_4_bcfg bdk_gsernx_lanex_pcie_rxeq1_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002330ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ1_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq1_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ1_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq2_1_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during PCIe Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq2_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq2_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t pcie_g2_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t pcie_g2_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g2_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g2_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g2_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g2_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g2_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g2_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g2_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g2_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t pcie_g2_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g2_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g2_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g2_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g2_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g2_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g2_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g2_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq2_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq2_1_bcfg bdk_gsernx_lanex_pcie_rxeq2_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002340ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ2_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq2_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ2_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq2_2_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during PCIe Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq2_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq2_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g2_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g2_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq2_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq2_2_bcfg bdk_gsernx_lanex_pcie_rxeq2_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002350ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ2_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq2_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ2_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq2_3_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during PCIe Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq2_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq2_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pcie_g2_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t pcie_g2_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g2_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g2_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g2_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g2_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t pcie_g2_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g2_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g2_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g2_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g2_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t pcie_g2_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g2_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g2_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g2_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g2_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t pcie_g2_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g2_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g2_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g2_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq2_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq2_3_bcfg bdk_gsernx_lanex_pcie_rxeq2_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002360ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ2_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq2_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ2_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq2_4_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Equalizer Control Register 4
+ * Parameters controlling the custom receiver equalization during PCIe Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq2_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq2_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g2_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g2_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq2_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq2_4_bcfg bdk_gsernx_lanex_pcie_rxeq2_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002370ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ2_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq2_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ2_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq3_1_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during PCIe Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq3_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq3_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t pcie_g3_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t pcie_g3_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g3_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g3_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g3_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g3_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g3_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g3_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g3_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g3_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t pcie_g3_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g3_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g3_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g3_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g3_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g3_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g3_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g3_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq3_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq3_1_bcfg bdk_gsernx_lanex_pcie_rxeq3_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002380ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ3_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq3_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ3_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq3_2_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during PCIe Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq3_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq3_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g3_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g3_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq3_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq3_2_bcfg bdk_gsernx_lanex_pcie_rxeq3_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002390ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ3_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq3_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ3_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq3_3_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during PCIe Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq3_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq3_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pcie_g3_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t pcie_g3_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g3_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g3_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g3_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g3_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t pcie_g3_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g3_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g3_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g3_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g3_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t pcie_g3_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g3_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g3_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g3_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g3_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t pcie_g3_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g3_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g3_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g3_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq3_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq3_3_bcfg bdk_gsernx_lanex_pcie_rxeq3_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ3_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq3_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ3_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq3_4_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Equalizer Control Register 4
+ * Parameters controlling the custom receiver equalization during PCIe Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq3_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq3_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g3_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g3_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq3_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq3_4_bcfg bdk_gsernx_lanex_pcie_rxeq3_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ3_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq3_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ3_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq4_1_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during PCIe Gen4 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq4_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq4_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t pcie_g4_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t pcie_g4_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g4_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g4_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g4_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g4_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g4_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g4_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g4_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g4_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t pcie_g4_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g4_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g4_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g4_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g4_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g4_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g4_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g4_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq4_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq4_1_bcfg bdk_gsernx_lanex_pcie_rxeq4_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ4_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq4_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ4_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq4_2_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during PCIe Gen4 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq4_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq4_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g4_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g4_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq4_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq4_2_bcfg bdk_gsernx_lanex_pcie_rxeq4_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ4_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq4_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ4_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq4_3_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during PCIe Gen4 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq4_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq4_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pcie_g4_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t pcie_g4_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g4_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g4_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g4_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g4_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t pcie_g4_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g4_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g4_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g4_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g4_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t pcie_g4_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g4_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g4_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g4_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g4_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t pcie_g4_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g4_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g4_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g4_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq4_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq4_3_bcfg bdk_gsernx_lanex_pcie_rxeq4_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ4_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq4_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ4_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq4_4_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Equalizer Control Register 4
+ * Parameters controlling the custom receiver equalization during PCIe Gen4 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq4_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq4_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g4_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g4_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq4_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq4_4_bcfg bdk_gsernx_lanex_pcie_rxeq4_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ4_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq4_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ4_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidl1a_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 1. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidl1a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidl1a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidl1a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidl1a_bcfg bdk_gsernx_lanex_pcie_rxidl1a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDL1A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidl1a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDL1A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidl2a_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 2. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidl2a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidl2a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidl2a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidl2a_bcfg bdk_gsernx_lanex_pcie_rxidl2a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDL2A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidl2a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDL2A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidl3a_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 3. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidl3a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidl3a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidl3a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidl3a_bcfg bdk_gsernx_lanex_pcie_rxidl3a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDL3A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidl3a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDL3A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidl4a_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 4. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidl4a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidl4a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidl4a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidl4a_bcfg bdk_gsernx_lanex_pcie_rxidl4a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002200ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDL4A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidl4a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDL4A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidle1_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 1. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidle1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidle1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidle1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidle1_bcfg bdk_gsernx_lanex_pcie_rxidle1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002190ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDLE1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidle1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDLE1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidle2_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 2. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidle2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidle2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidle2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidle2_bcfg bdk_gsernx_lanex_pcie_rxidle2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDLE2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidle2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDLE2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidle3_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 3. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidle3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidle3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidle3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidle3_bcfg bdk_gsernx_lanex_pcie_rxidle3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDLE3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidle3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDLE3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidle4_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 4. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidle4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidle4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidle4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidle4_bcfg bdk_gsernx_lanex_pcie_rxidle4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDLE4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidle4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDLE4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txbias_bcfg
+ *
+ * GSER Lane PCIe TX Margin BIAS Control Register
+ * TX BIAS values corresponding to Full Scale, Half Scale and Margin levels for both.
+ */
+union bdk_gsernx_lanex_pcie_txbias_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txbias_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t tx_margin_h4 : 6; /**< [ 59: 54](R/W) TX BIAS setting for half scale, Margin 4 output drive. */
+ uint64_t tx_margin_h3 : 6; /**< [ 53: 48](R/W) TX BIAS setting for half scale, Margin 3 output drive. */
+ uint64_t tx_margin_h2 : 6; /**< [ 47: 42](R/W) TX BIAS setting for half scale, Margin 2 output drive. */
+ uint64_t tx_margin_h1 : 6; /**< [ 41: 36](R/W) TX BIAS setting for half scale, Margin 1 output drive. */
+ uint64_t tx_bias_half : 6; /**< [ 35: 30](R/W) TX BIAS setting for half scale output drive. */
+ uint64_t tx_margin_f4 : 6; /**< [ 29: 24](R/W) TX BIAS setting for full scale, Margin 4 output drive. */
+ uint64_t tx_margin_f3 : 6; /**< [ 23: 18](R/W) TX BIAS setting for full scale, Margin 3 output drive. */
+ uint64_t tx_margin_f2 : 6; /**< [ 17: 12](R/W) TX BIAS setting for full scale, Margin 2 output drive. */
+ uint64_t tx_margin_f1 : 6; /**< [ 11: 6](R/W) TX BIAS setting for full scale, Margin 1 output drive. */
+ uint64_t tx_bias_full : 6; /**< [ 5: 0](R/W) TX BIAS setting for full scale output drive. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_bias_full : 6; /**< [ 5: 0](R/W) TX BIAS setting for full scale output drive. */
+ uint64_t tx_margin_f1 : 6; /**< [ 11: 6](R/W) TX BIAS setting for full scale, Margin 1 output drive. */
+ uint64_t tx_margin_f2 : 6; /**< [ 17: 12](R/W) TX BIAS setting for full scale, Margin 2 output drive. */
+ uint64_t tx_margin_f3 : 6; /**< [ 23: 18](R/W) TX BIAS setting for full scale, Margin 3 output drive. */
+ uint64_t tx_margin_f4 : 6; /**< [ 29: 24](R/W) TX BIAS setting for full scale, Margin 4 output drive. */
+ uint64_t tx_bias_half : 6; /**< [ 35: 30](R/W) TX BIAS setting for half scale output drive. */
+ uint64_t tx_margin_h1 : 6; /**< [ 41: 36](R/W) TX BIAS setting for half scale, Margin 1 output drive. */
+ uint64_t tx_margin_h2 : 6; /**< [ 47: 42](R/W) TX BIAS setting for half scale, Margin 2 output drive. */
+ uint64_t tx_margin_h3 : 6; /**< [ 53: 48](R/W) TX BIAS setting for half scale, Margin 3 output drive. */
+ uint64_t tx_margin_h4 : 6; /**< [ 59: 54](R/W) TX BIAS setting for half scale, Margin 4 output drive. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txbias_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txbias_bcfg bdk_gsernx_lanex_pcie_txbias_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002930ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXBIAS_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) bdk_gsernx_lanex_pcie_txbias_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) "GSERNX_LANEX_PCIE_TXBIAS_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txdrv_bcfg
+ *
+ * GSER Lane PCIe TX Drive Reserved Presets, FS & LF Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for the Reserved Presets
+ * for Gen3 and Gen4 (the default coefficient values correspond to preset P4).
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the full
+ * 6 bits defined in the PCIe specification are not needed.
+ * This register also contains the control registers for the Local FS and LF.
+ */
+union bdk_gsernx_lanex_pcie_txdrv_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txdrv_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t g4_rsv_cpost : 5; /**< [ 60: 56](R/W) Gen4 Cpost value for all reserved presets. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t g4_rsv_cmain : 6; /**< [ 53: 48](R/W) Gen4 Cmain value for all reserved presets. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t g4_rsv_cpre : 4; /**< [ 43: 40](R/W) Gen4 Cpost value for all reserved presets. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t local_lf : 6; /**< [ 37: 32](R/W) Local LF value advertised to the MAC. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t local_fs : 6; /**< [ 29: 24](R/W) Local FS value advertised to the MAC. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t g3_rsv_cpost : 5; /**< [ 20: 16](R/W) Gen3 Cpost value for all reserved presets. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_rsv_cmain : 6; /**< [ 13: 8](R/W) Gen3 Cmain value for all reserved presets. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_rsv_cpre : 4; /**< [ 3: 0](R/W) Gen3 Cpost value for all reserved presets. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_rsv_cpre : 4; /**< [ 3: 0](R/W) Gen3 Cpost value for all reserved presets. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_rsv_cmain : 6; /**< [ 13: 8](R/W) Gen3 Cmain value for all reserved presets. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_rsv_cpost : 5; /**< [ 20: 16](R/W) Gen3 Cpost value for all reserved presets. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t local_fs : 6; /**< [ 29: 24](R/W) Local FS value advertised to the MAC. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t local_lf : 6; /**< [ 37: 32](R/W) Local LF value advertised to the MAC. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t g4_rsv_cpre : 4; /**< [ 43: 40](R/W) Gen4 Cpost value for all reserved presets. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t g4_rsv_cmain : 6; /**< [ 53: 48](R/W) Gen4 Cmain value for all reserved presets. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t g4_rsv_cpost : 5; /**< [ 60: 56](R/W) Gen4 Cpost value for all reserved presets. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txdrv_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txdrv_bcfg bdk_gsernx_lanex_pcie_txdrv_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002830ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXDRV_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) bdk_gsernx_lanex_pcie_txdrv_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) "GSERNX_LANEX_PCIE_TXDRV_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst0_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P0.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p0_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P0. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p0_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P0. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p0_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P0. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p0_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P0. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p0_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P0. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p0_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P0. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst0_bcfg bdk_gsernx_lanex_pcie_txpst0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900024f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst10_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P10.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst10_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst10_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p10_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P10. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p10_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P10. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p10_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P10. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p10_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P10. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p10_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P10. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p10_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P10. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst10_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst10_bcfg bdk_gsernx_lanex_pcie_txpst10_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002590ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST10_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst10_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST10_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst11_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P0.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst11_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst11_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p0_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P0. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p0_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P0. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p0_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P0. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p0_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P0. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p0_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P0. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p0_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P0. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst11_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst11_bcfg bdk_gsernx_lanex_pcie_txpst11_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002690ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST11_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst11_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST11_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst12_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P1.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst12_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst12_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p1_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P1. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p1_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p1_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P1. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p1_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p1_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P1. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p1_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P1. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst12_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst12_bcfg bdk_gsernx_lanex_pcie_txpst12_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST12_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst12_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST12_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst13_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P2.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst13_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst13_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p2_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P2. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p2_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P2. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p2_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P2. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p2_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P2. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p2_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P2. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p2_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P2. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst13_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst13_bcfg bdk_gsernx_lanex_pcie_txpst13_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST13_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst13_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST13_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst14_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P3.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst14_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst14_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p3_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P3. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p3_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P3. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p3_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P3. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p3_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P3. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p3_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P3. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p3_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P3. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst14_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst14_bcfg bdk_gsernx_lanex_pcie_txpst14_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST14_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst14_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST14_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst15_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P4.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst15_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst15_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p4_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P4. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p4_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P4. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p4_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P4. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p4_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P4. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p4_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P4. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p4_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P4. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst15_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst15_bcfg bdk_gsernx_lanex_pcie_txpst15_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST15_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst15_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST15_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst16_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P5.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst16_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst16_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p5_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P5. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p5_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P5. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p5_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P5. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p5_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P5. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p5_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P5. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p5_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P5. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst16_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst16_bcfg bdk_gsernx_lanex_pcie_txpst16_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST16_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst16_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST16_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst17_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P6.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst17_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst17_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p6_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P6. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p6_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P6. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p6_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P6. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p6_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P6. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p6_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P6. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p6_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P6. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst17_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst17_bcfg bdk_gsernx_lanex_pcie_txpst17_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST17_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst17_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST17_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst18_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P7.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst18_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst18_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p7_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P7. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p7_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P7. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p7_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P7. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p7_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P7. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p7_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P7. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p7_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P7. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst18_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst18_bcfg bdk_gsernx_lanex_pcie_txpst18_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002700ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST18_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst18_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST18_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst19_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P8.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst19_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst19_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p8_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P8. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p8_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P8. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p8_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P8. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p8_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P8. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p8_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P8. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p8_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P8. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst19_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst19_bcfg bdk_gsernx_lanex_pcie_txpst19_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002710ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST19_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst19_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST19_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst1_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P1.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p1_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P1. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p1_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p1_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P1. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p1_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p1_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P1. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p1_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P1. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst1_bcfg bdk_gsernx_lanex_pcie_txpst1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002500ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst20_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P9.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst20_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst20_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p9_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P9. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p9_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P9. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p9_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P9. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p9_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P9. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p9_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P9. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p9_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P9. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst20_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst20_bcfg bdk_gsernx_lanex_pcie_txpst20_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002720ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST20_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst20_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST20_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst21_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P10.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst21_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst21_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p10_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P10. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p10_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P10. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p10_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P10. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p10_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P10. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p10_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P10. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p10_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P10. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst21_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst21_bcfg bdk_gsernx_lanex_pcie_txpst21_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002730ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST21_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst21_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST21_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst2_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P2.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p2_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P2. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p2_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P2. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p2_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P2. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p2_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P2. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p2_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P2. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p2_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P2. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst2_bcfg bdk_gsernx_lanex_pcie_txpst2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002510ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst3_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P3.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p3_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P3. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p3_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P3. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p3_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P3. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p3_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P3. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p3_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P3. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p3_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P3. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst3_bcfg bdk_gsernx_lanex_pcie_txpst3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002520ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst4_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P4.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p4_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P4. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p4_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P4. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p4_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P4. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p4_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P4. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p4_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P4. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p4_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P4. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst4_bcfg bdk_gsernx_lanex_pcie_txpst4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002530ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst5_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P5.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p5_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P5. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p5_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P5. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p5_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P5. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p5_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P5. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p5_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P5. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p5_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P5. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst5_bcfg bdk_gsernx_lanex_pcie_txpst5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002540ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst6_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P6.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst6_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst6_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p6_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P6. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p6_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P6. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p6_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P6. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p6_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P6. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p6_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P6. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p6_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P6. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst6_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst6_bcfg bdk_gsernx_lanex_pcie_txpst6_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002550ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST6_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst6_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST6_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst7_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P7.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst7_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst7_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p7_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P7. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p7_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P7. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p7_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P7. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p7_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P7. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p7_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P7. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p7_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P7. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst7_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst7_bcfg bdk_gsernx_lanex_pcie_txpst7_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002560ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST7_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst7_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST7_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst8_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P8.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst8_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst8_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p8_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P8. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p8_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P8. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p8_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P8. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p8_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P8. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p8_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P8. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p8_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P8. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst8_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst8_bcfg bdk_gsernx_lanex_pcie_txpst8_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002570ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST8_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst8_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST8_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst9_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P9.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst9_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst9_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p9_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P9. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p9_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P9. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p9_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P9. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p9_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P9. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p9_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P9. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p9_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P9. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst9_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst9_bcfg bdk_gsernx_lanex_pcie_txpst9_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002580ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST9_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst9_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST9_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcs_802p3_bcfg
+ *
+ * GSER Lane 802.3 PCS Base Configuration Register 0
+ * This register controls settings for Ethernet IEEE 802.3 PCS layer.
+ */
+union bdk_gsernx_lanex_pcs_802p3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcs_802p3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rx_wpk_order : 1; /**< [ 3: 3](R/W) Receiver word packing order. Used when the Ethernet MAC is configured for SGMII
+ 1.25 GBaud. When GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_20B40B] is set two
+ consecutive 20-bit RX data words from the PCS Lite Layer are packed into a
+ 40-bit word for the Ethernet SGMII MAC.
+
+ 0 = The first 20-bit word from the PCS Lite Layer is transferred to the lower
+ 20-bit word position, bits[19:0] of the 40-bit word and the next consecutive
+ 20-bit word from the PCS Lite layer is transferred to the upper 20-bit word
+ position, bits[39:20] of the 40-bit word. The assembled 40-bit word is then
+ forwarded the SGMII Ethernet MAC.
+
+ 1 = The first 20-bit word from the PCS Lite Layer is transferred to the upper
+ 20-bit word position, bits[39:20] of the 40-bit word and the next consecutive
+ 20-bit word from the PCS Lite layer is transferred to the lower 20-bit word
+ position, bits[19:0] of the 40-bit word. The assembled 40-bit word is then
+ forwarded the SGMII Ethernet MAC.
+
+ For diagnostic use only. */
+ uint64_t tx_wup_order : 1; /**< [ 2: 2](R/W) Transmitter word unpacking order. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud. When GSERN()_LANE()_PCS_802P3_BCFG[TX_WUP_40B20B] is set the
+ 20-bit consecutive RX data word from the PCS Lite Layer are packed into 40-bit
+ words for the Ethernet SGMII MAC.
+
+ 0 = The lower 20-bit word, bits[19:0] of the 40-bit
+ word are transferred to the PCS Lite layer followed by the upper 20-bit word,
+ bits[39:20] of the 40-bit word..
+
+ 1 = The upper 20-bit word, bits[39:20], are transferred to the PCS Lite layer
+ followed by the lower 20-bit word, bits[19:0], of the 40-bit word.
+
+ For diagnostic use only. */
+ uint64_t rx_wpk_20b40b : 1; /**< [ 1: 1](R/W) RX Word Packing 20 bits to 40 bits. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud.
+ When set, consecutive 20-bit RX data
+ words from the PCS Lite Layer are packed into 40-bit words for the Ethernet SGMII MAC.
+ Used in conjunction with GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER]. Refer to
+ the description for GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER].
+ For diagnostic use only. */
+ uint64_t tx_wup_40b20b : 1; /**< [ 0: 0](R/W) TX Word UnPacking 40 bits to 20 bits. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud.
+ When set, the 40-bit TX data words from
+ the Ethernet SGMII MAC are transferred to the PCS Lite Layer using two consecutive
+ 20-bit word transfers.
+ Used in conjunction with GSERN()_LANE()_PCS_802P3_BCFG[TX_WUP_ORDER]. Refer to
+ the description for GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER].
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_wup_40b20b : 1; /**< [ 0: 0](R/W) TX Word UnPacking 40 bits to 20 bits. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud.
+ When set, the 40-bit TX data words from
+ the Ethernet SGMII MAC are transferred to the PCS Lite Layer using two consecutive
+ 20-bit word transfers.
+ Used in conjunction with GSERN()_LANE()_PCS_802P3_BCFG[TX_WUP_ORDER]. Refer to
+ the description for GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER].
+ For diagnostic use only. */
+ uint64_t rx_wpk_20b40b : 1; /**< [ 1: 1](R/W) RX Word Packing 20 bits to 40 bits. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud.
+ When set, consecutive 20-bit RX data
+ words from the PCS Lite Layer are packed into 40-bit words for the Ethernet SGMII MAC.
+ Used in conjunction with GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER]. Refer to
+ the description for GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER].
+ For diagnostic use only. */
+ uint64_t tx_wup_order : 1; /**< [ 2: 2](R/W) Transmitter word unpacking order. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud. When GSERN()_LANE()_PCS_802P3_BCFG[TX_WUP_40B20B] is set the
+ 20-bit consecutive RX data word from the PCS Lite Layer are packed into 40-bit
+ words for the Ethernet SGMII MAC.
+
+ 0 = The lower 20-bit word, bits[19:0] of the 40-bit
+ word are transferred to the PCS Lite layer followed by the upper 20-bit word,
+ bits[39:20] of the 40-bit word..
+
+ 1 = The upper 20-bit word, bits[39:20], are transferred to the PCS Lite layer
+ followed by the lower 20-bit word, bits[19:0], of the 40-bit word.
+
+ For diagnostic use only. */
+ uint64_t rx_wpk_order : 1; /**< [ 3: 3](R/W) Receiver word packing order. Used when the Ethernet MAC is configured for SGMII
+ 1.25 GBaud. When GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_20B40B] is set two
+ consecutive 20-bit RX data words from the PCS Lite Layer are packed into a
+ 40-bit word for the Ethernet SGMII MAC.
+
+ 0 = The first 20-bit word from the PCS Lite Layer is transferred to the lower
+ 20-bit word position, bits[19:0] of the 40-bit word and the next consecutive
+ 20-bit word from the PCS Lite layer is transferred to the upper 20-bit word
+ position, bits[39:20] of the 40-bit word. The assembled 40-bit word is then
+ forwarded the SGMII Ethernet MAC.
+
+ 1 = The first 20-bit word from the PCS Lite Layer is transferred to the upper
+ 20-bit word position, bits[39:20] of the 40-bit word and the next consecutive
+ 20-bit word from the PCS Lite layer is transferred to the lower 20-bit word
+ position, bits[19:0] of the 40-bit word. The assembled 40-bit word is then
+ forwarded the SGMII Ethernet MAC.
+
+ For diagnostic use only. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcs_802p3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcs_802p3_bcfg bdk_gsernx_lanex_pcs_802p3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCS_802P3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCS_802P3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003350ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCS_802P3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) bdk_gsernx_lanex_pcs_802p3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) "GSERNX_LANEX_PCS_802P3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pll_1_bcfg
+ *
+ * GSER Lane PLL Base Configuration Register 1
+ */
+union bdk_gsernx_lanex_pll_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pll_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t reserved_29_37 : 9;
+ uint64_t post_div : 2; /**< [ 28: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV].
+ 0x0 = Divide PLL frequency by 1.
+ 0x1 = Divide PLL frequency by 2.
+ 0x2 = Divide PLL frequency by 4.
+ 0x3 = Divide PLL frequency by 8. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion. */
+#else /* Word 0 - Little Endian */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t post_div : 2; /**< [ 28: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV].
+ 0x0 = Divide PLL frequency by 1.
+ 0x1 = Divide PLL frequency by 2.
+ 0x2 = Divide PLL frequency by 4.
+ 0x3 = Divide PLL frequency by 8. */
+ uint64_t reserved_29_37 : 9;
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gsernx_lanex_pll_1_bcfg_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t reserved_36_37 : 2;
+ uint64_t reserved_29_35 : 7;
+ uint64_t post_div : 2; /**< [ 28: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV].
+ 0x0 = Divide PLL frequency by 1.
+ 0x1 = Divide PLL frequency by 2.
+ 0x2 = Divide PLL frequency by 4.
+ 0x3 = Divide PLL frequency by 8. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion. */
+#else /* Word 0 - Little Endian */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t post_div : 2; /**< [ 28: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV].
+ 0x0 = Divide PLL frequency by 1.
+ 0x1 = Divide PLL frequency by 2.
+ 0x2 = Divide PLL frequency by 4.
+ 0x3 = Divide PLL frequency by 8. */
+ uint64_t reserved_29_35 : 7;
+ uint64_t reserved_36_37 : 2;
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_gsernx_lanex_pll_1_bcfg bdk_gsernx_lanex_pll_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PLL_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PLL_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000200ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PLL_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) bdk_gsernx_lanex_pll_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) "GSERNX_LANEX_PLL_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pll_2_bcfg
+ *
+ * GSER Lane PLL Base Configuration Register 2
+ */
+union bdk_gsernx_lanex_pll_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pll_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t lock_check_cnt_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [LOCK_CHECK_CNT_OVRD]. */
+ uint64_t lock_check_cnt_ovrd : 15; /**< [ 54: 40](R/W) Lock check counter value override. This counter is used to wait for PLL lock to
+ be valid. It counts every REFCLK cycle and once its done asserts
+ GSERN()_LANE()_INIT_BSTS[LOCK_READY]. For Common PLL, REFCLK is the input from the
+ pad. For Lane PLL, REFCLK is the output of the common PLL. To use value assert
+ GSERN()_LANE()_RST1_BCFG[LOCK_CHECK] or trigger a PLL reset sequence. */
+ uint64_t reserved_34_39 : 6;
+ uint64_t vcm_sel : 1; /**< [ 33: 33](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t cp_boost : 1; /**< [ 32: 32](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t ssc_sata_mode : 2; /**< [ 31: 30](R/W) PLL SATA spread spectrum control.
+ 0x0 = Down spreading. PPM triangle wave total peak-to-peak spread subtracted from
+ nominal frequency.
+ 0x1 = Up spreading. PPM triangle wave total peak-to-peak spread added to nominal
+ frequency.
+ 0x2 = Center spreading. PPM triangle wave total peak-to-peak spread centered at nominal
+ frequency.
+ 0x3 = Square wave subtracted from nominal frequency. */
+ uint64_t ssc_ppm : 2; /**< [ 29: 28](R/W) Spread-spectrum clocking total peak-to-peak spread.
+ 0x0 = 5000 PPM.
+ 0x1 = 3000 PPM.
+ 0x2 = 2500 PPM.
+ 0x3 = 1000 PPM. */
+ uint64_t pnr_refclk_en : 1; /**< [ 27: 27](R/W) Enable PLL reference clock to internal logic. */
+ uint64_t ssc_en : 1; /**< [ 26: 26](R/W) Spread-spectrum clocking enable. */
+ uint64_t shlb_en : 1; /**< [ 25: 25](R/W) Used when in shallow loopback mode to mux the CDR receive clock onto
+ the transmit data path clock to ensure that the clock frequencies
+ are matched (to prevent data overrun). */
+ uint64_t pfd_offset : 1; /**< [ 24: 24](R/W) PLL PFD offset enable. */
+ uint64_t opamp : 4; /**< [ 23: 20](R/W) PLL loop filter op-amp configuration. */
+ uint64_t res : 4; /**< [ 19: 16](R/W) PLL loop filter configuration. */
+ uint64_t reserved_15 : 1;
+ uint64_t vco_bias : 3; /**< [ 14: 12](R/W) VCO bias control. */
+ uint64_t cal_dac_low : 4; /**< [ 11: 8](R/W) PLL calibration DAC low control. */
+ uint64_t cal_dac_mid : 4; /**< [ 7: 4](R/W) PLL calibration DAC middle control. */
+ uint64_t cal_dac_high : 4; /**< [ 3: 0](R/W) PLL calibration DAC high control. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_dac_high : 4; /**< [ 3: 0](R/W) PLL calibration DAC high control. */
+ uint64_t cal_dac_mid : 4; /**< [ 7: 4](R/W) PLL calibration DAC middle control. */
+ uint64_t cal_dac_low : 4; /**< [ 11: 8](R/W) PLL calibration DAC low control. */
+ uint64_t vco_bias : 3; /**< [ 14: 12](R/W) VCO bias control. */
+ uint64_t reserved_15 : 1;
+ uint64_t res : 4; /**< [ 19: 16](R/W) PLL loop filter configuration. */
+ uint64_t opamp : 4; /**< [ 23: 20](R/W) PLL loop filter op-amp configuration. */
+ uint64_t pfd_offset : 1; /**< [ 24: 24](R/W) PLL PFD offset enable. */
+ uint64_t shlb_en : 1; /**< [ 25: 25](R/W) Used when in shallow loopback mode to mux the CDR receive clock onto
+ the transmit data path clock to ensure that the clock frequencies
+ are matched (to prevent data overrun). */
+ uint64_t ssc_en : 1; /**< [ 26: 26](R/W) Spread-spectrum clocking enable. */
+ uint64_t pnr_refclk_en : 1; /**< [ 27: 27](R/W) Enable PLL reference clock to internal logic. */
+ uint64_t ssc_ppm : 2; /**< [ 29: 28](R/W) Spread-spectrum clocking total peak-to-peak spread.
+ 0x0 = 5000 PPM.
+ 0x1 = 3000 PPM.
+ 0x2 = 2500 PPM.
+ 0x3 = 1000 PPM. */
+ uint64_t ssc_sata_mode : 2; /**< [ 31: 30](R/W) PLL SATA spread spectrum control.
+ 0x0 = Down spreading. PPM triangle wave total peak-to-peak spread subtracted from
+ nominal frequency.
+ 0x1 = Up spreading. PPM triangle wave total peak-to-peak spread added to nominal
+ frequency.
+ 0x2 = Center spreading. PPM triangle wave total peak-to-peak spread centered at nominal
+ frequency.
+ 0x3 = Square wave subtracted from nominal frequency. */
+ uint64_t cp_boost : 1; /**< [ 32: 32](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t vcm_sel : 1; /**< [ 33: 33](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t reserved_34_39 : 6;
+ uint64_t lock_check_cnt_ovrd : 15; /**< [ 54: 40](R/W) Lock check counter value override. This counter is used to wait for PLL lock to
+ be valid. It counts every REFCLK cycle and once its done asserts
+ GSERN()_LANE()_INIT_BSTS[LOCK_READY]. For Common PLL, REFCLK is the input from the
+ pad. For Lane PLL, REFCLK is the output of the common PLL. To use value assert
+ GSERN()_LANE()_RST1_BCFG[LOCK_CHECK] or trigger a PLL reset sequence. */
+ uint64_t lock_check_cnt_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [LOCK_CHECK_CNT_OVRD]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pll_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pll_2_bcfg bdk_gsernx_lanex_pll_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PLL_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PLL_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000210ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PLL_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) bdk_gsernx_lanex_pll_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) "GSERNX_LANEX_PLL_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rev
+ *
+ * GSER Lane Revision Register
+ * Revision number
+ */
+union bdk_gsernx_lanex_rev
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rev : 8; /**< [ 7: 0](RO/H) Revision number for GSERN lane subblock.
+ Internal:
+ Used primarily for E5. */
+#else /* Word 0 - Little Endian */
+ uint64_t rev : 8; /**< [ 7: 0](RO/H) Revision number for GSERN lane subblock.
+ Internal:
+ Used primarily for E5. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rev_s cn; */
+};
+typedef union bdk_gsernx_lanex_rev bdk_gsernx_lanex_rev_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_REV(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_REV(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000000ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_REV", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_REV(a,b) bdk_gsernx_lanex_rev_t
+#define bustype_BDK_GSERNX_LANEX_REV(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_REV(a,b) "GSERNX_LANEX_REV"
+#define device_bar_BDK_GSERNX_LANEX_REV(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_REV(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_REV(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst1_bcfg
+ *
+ * GSER Lane Reset State Machine Controls and Overrides Register 1
+ */
+union bdk_gsernx_lanex_rst1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t domain_rst_en : 1; /**< [ 55: 55](R/W) Domain reset enable.
+ 0 = Prevent reseting lane logic with domain reset.
+ 1 = Enable reseting all lane logic with domain reset.
+
+ For PCIe configurations, typically 1 for a root complex and 0 for an endpoint. */
+ uint64_t reserved_48_54 : 7;
+ uint64_t rx_go2deep_idle : 1; /**< [ 47: 47](R/W) Set to sequence the receiver into deep idle. */
+ uint64_t rx_pd_qac_q : 1; /**< [ 46: 46](R/W) Power control for the custom analog quadrature accuracy corrector
+ (QAC). This QAC corrects for phase error between the I clock and the Q
+ (quadrature, doutq) clock.
+ 0 = Power up the I/Q QAC.
+ 1 = Power down the I/Q QAC. When in this state,
+ GSERN()_LANE()_RX_QAC_BCFG[CDR_QAC_SELQ] should also be set to zero to
+ disconnect the QAC from the clock data recovery (CDR) loop. */
+ uint64_t rx_pd_qac_e : 1; /**< [ 45: 45](R/W) Power control for the custom analog quadrature accuracy corrector
+ (QAC). This QAC corrects for phase error between the I clock and the E
+ (eye, doute) clock.
+ 0 = Power up the I/E QAC.
+ 1 = Power down the I/E QAC. When in this state,
+ GSERN()_LANE()_RX_QAC_BCFG[CDR_QAC_SELQ] should also be set to zero to
+ disconnect the QAC from the clock data recovery (CDR) loop. */
+ uint64_t rx_pd_idle : 1; /**< [ 44: 44](R/W) Set to power down the idle detector in the custom analog
+ receiver. */
+ uint64_t rx_rst_deser : 1; /**< [ 43: 43](R/W) Set to reset the deserializers to the offset DAC, current
+ bias DAC, and interpolator re-mapping. */
+ uint64_t rx_rst_dcc_q : 1; /**< [ 42: 42](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the Q (quadrature, data, doutq) path. */
+ uint64_t rx_rst_dcc_i : 1; /**< [ 41: 41](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the I (in-phase, edge, douti) path. */
+ uint64_t rx_rst_dcc_e : 1; /**< [ 40: 40](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the E (eye, doute) path */
+ uint64_t idle : 1; /**< [ 39: 39](R/W) Set to idle the custom receiver and baseline wander
+ compensation (bwlc). */
+ uint64_t rx_rst_qac_q : 1; /**< [ 38: 38](R/W) Set reset to the doutq datapath quadrature corrector
+ filter and associated logic. */
+ uint64_t rx_rst_qac_e : 1; /**< [ 37: 37](R/W) Set reset to the doute quadrature corrector filter and
+ associated logic. */
+ uint64_t rx_rst_blwc : 1; /**< [ 36: 36](R/W) Set to reset the analog baseline wander compensation
+ block. */
+ uint64_t rx_rst_cdrfsm : 1; /**< [ 35: 35](R/W) Set to reset the CDR FSM. */
+ uint64_t rx_rst_voter : 1; /**< [ 34: 34](R/W) Set to reset the analog voter block. */
+ uint64_t rx_rst_div_e : 1; /**< [ 33: 33](R/W) Set to reset the analog CDR clock dividers in the eye data path for
+ div{5, 8, 10, 16, 20}. */
+ uint64_t rx_rst_div : 1; /**< [ 32: 32](R/W) Set to reset the analog CDR clock dividers in the quadrature data path
+ for div{5, 8, 10, 16, 20}. */
+ uint64_t rx_rst_interp_q : 1; /**< [ 31: 31](R/W) Set to reset the Q (quadrature, doutq) pipe analog
+ interpolator logic (only, not the full datapaths). */
+ uint64_t rx_rst_interp_i : 1; /**< [ 30: 30](R/W) Set to reset the I (in-phase, douti) pipe analog
+ interpolator logic (only, not the full datapath). */
+ uint64_t rx_rst_interp_e : 1; /**< [ 29: 29](R/W) Set to reset the E (eye, doute) analog interpolator logic
+ (only, not the full datapath). */
+ uint64_t rx_pd_interp_q : 1; /**< [ 28: 28](R/W) Set to power down the I (in-phase, douti) analog
+ interpolator logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_interp_i : 1; /**< [ 27: 27](R/W) Set to power down the I (in-phase, douti) analog
+ interpolator logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_interp_e : 1; /**< [ 26: 26](R/W) Set to power down the E (eye, doute) analog interpolator
+ logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_dfe_x : 1; /**< [ 25: 25](R/W) Set to power down the DFE X path. The X path is passed to
+ the DFE I (edge, douti) pipe depending on edgesel_{even,odd}. */
+ uint64_t rx_pd_dfe_q : 1; /**< [ 24: 24](R/W) Set to power down the DFE Q (data, doutq) path (only, not
+ the full datapath) */
+ uint64_t rx_pd_dfe_i : 1; /**< [ 23: 23](R/W) Set to power down the DFE I (edge, douti) path (only, not
+ the full datapath). */
+ uint64_t rx_pd_dfe_e : 1; /**< [ 22: 22](R/W) Set to power down the DFE E (eye, doute) path (only, not
+ the full datapath). */
+ uint64_t rx_pd_dcc_q : 1; /**< [ 21: 21](R/W) Set to power down the duty-cycle corrector (DCC) of the Q
+ (quadrature, doutq) clock after the interpolator and before the
+ divider (only, not the full clock path). */
+ uint64_t rx_pd_dcc_i : 1; /**< [ 20: 20](R/W) Set to power down the duty-cycle corrector (DCC) of the I
+ (in-phase, douti) clock after the interpolator and before the divider
+ (not the full clock path). */
+ uint64_t rx_pd_dcc_e : 1; /**< [ 19: 19](R/W) Set to power down the duty-cycle corrector (DCC) of the E
+ (eye, doute) clock after the interpolator and before the divider (not
+ the full clock path). */
+ uint64_t rx_pd_biasdac : 1; /**< [ 18: 18](R/W) Set to power down the current bias DAC, which would power
+ down any amplifier in the RX (CTLE, VGA, DFE summer, DCC, QAC, etc.). */
+ uint64_t rx_pd_afe : 1; /**< [ 17: 17](R/W) Set to power down the analog front-end (AFE). */
+ uint64_t rx_en_cdrfsm : 1; /**< [ 16: 16](R/W) Set to enable (power-up) the CDR FSM. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pll_go2deep_idle : 1; /**< [ 12: 12](R/W) Set to cycle the PLL into deep idle. */
+ uint64_t lock_ppm : 2; /**< [ 11: 10](R/W) PLL lock PPM setting; after GSERN()_LANE()_RST1_BCFG[LOCK_WAIT], compare
+ reference clock and divided VCO clock for this many cycles:
+ 0x0 = Compare after 5000 reference clock cycles.
+ 0x1 = Compare after 10000 reference clock cycles.
+ 0x2 = Compare after 20000 reference clock cycles.
+ 0x3 = Compare after 2500 reference clock cycles. */
+ uint64_t lock_wait : 2; /**< [ 9: 8](R/W) Wait time for PLL lock check function to start:
+ 0x0 = Wait 2500 reference clock cycles.
+ 0x1 = Wait 5000 reference clock cycles.
+ 0x2 = Wait 10000 reference clock cycles.
+ 0x3 = Wait 1250 reference clock cycles. */
+ uint64_t lock_check : 1; /**< [ 7: 7](R/W) Trigger a PLL lock status check; result returned in
+ GSERN()_LANE()_INIT_BSTS[LOCK] when GSERN()_LANE()_INIT_BSTS[LOCK_READY]
+ asserts. deassert and re-assert to repeat checking. */
+ uint64_t vco_cal_reset : 1; /**< [ 6: 6](R/W) PLL VCO calibration state machine reset. */
+ uint64_t fracn_reset : 1; /**< [ 5: 5](R/W) PLL fractional-N state machine reset. */
+ uint64_t ssc_reset : 1; /**< [ 4: 4](R/W) PLL SSC state machine reset. */
+ uint64_t post_div_reset : 1; /**< [ 3: 3](RO) Reserved.
+ Internal:
+ Was common PLL post divider reset. No longer used. */
+ uint64_t reset : 1; /**< [ 2: 2](R/W) PLL primary reset; must assert [POST_DIV_RESET] if [RESET] is asserted. */
+ uint64_t cal_en : 1; /**< [ 1: 1](R/W) Enable PLL calibration procedure. */
+ uint64_t pwdn : 1; /**< [ 0: 0](R/W) PLL power down control. */
+#else /* Word 0 - Little Endian */
+ uint64_t pwdn : 1; /**< [ 0: 0](R/W) PLL power down control. */
+ uint64_t cal_en : 1; /**< [ 1: 1](R/W) Enable PLL calibration procedure. */
+ uint64_t reset : 1; /**< [ 2: 2](R/W) PLL primary reset; must assert [POST_DIV_RESET] if [RESET] is asserted. */
+ uint64_t post_div_reset : 1; /**< [ 3: 3](RO) Reserved.
+ Internal:
+ Was common PLL post divider reset. No longer used. */
+ uint64_t ssc_reset : 1; /**< [ 4: 4](R/W) PLL SSC state machine reset. */
+ uint64_t fracn_reset : 1; /**< [ 5: 5](R/W) PLL fractional-N state machine reset. */
+ uint64_t vco_cal_reset : 1; /**< [ 6: 6](R/W) PLL VCO calibration state machine reset. */
+ uint64_t lock_check : 1; /**< [ 7: 7](R/W) Trigger a PLL lock status check; result returned in
+ GSERN()_LANE()_INIT_BSTS[LOCK] when GSERN()_LANE()_INIT_BSTS[LOCK_READY]
+ asserts. deassert and re-assert to repeat checking. */
+ uint64_t lock_wait : 2; /**< [ 9: 8](R/W) Wait time for PLL lock check function to start:
+ 0x0 = Wait 2500 reference clock cycles.
+ 0x1 = Wait 5000 reference clock cycles.
+ 0x2 = Wait 10000 reference clock cycles.
+ 0x3 = Wait 1250 reference clock cycles. */
+ uint64_t lock_ppm : 2; /**< [ 11: 10](R/W) PLL lock PPM setting; after GSERN()_LANE()_RST1_BCFG[LOCK_WAIT], compare
+ reference clock and divided VCO clock for this many cycles:
+ 0x0 = Compare after 5000 reference clock cycles.
+ 0x1 = Compare after 10000 reference clock cycles.
+ 0x2 = Compare after 20000 reference clock cycles.
+ 0x3 = Compare after 2500 reference clock cycles. */
+ uint64_t pll_go2deep_idle : 1; /**< [ 12: 12](R/W) Set to cycle the PLL into deep idle. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t rx_en_cdrfsm : 1; /**< [ 16: 16](R/W) Set to enable (power-up) the CDR FSM. */
+ uint64_t rx_pd_afe : 1; /**< [ 17: 17](R/W) Set to power down the analog front-end (AFE). */
+ uint64_t rx_pd_biasdac : 1; /**< [ 18: 18](R/W) Set to power down the current bias DAC, which would power
+ down any amplifier in the RX (CTLE, VGA, DFE summer, DCC, QAC, etc.). */
+ uint64_t rx_pd_dcc_e : 1; /**< [ 19: 19](R/W) Set to power down the duty-cycle corrector (DCC) of the E
+ (eye, doute) clock after the interpolator and before the divider (not
+ the full clock path). */
+ uint64_t rx_pd_dcc_i : 1; /**< [ 20: 20](R/W) Set to power down the duty-cycle corrector (DCC) of the I
+ (in-phase, douti) clock after the interpolator and before the divider
+ (not the full clock path). */
+ uint64_t rx_pd_dcc_q : 1; /**< [ 21: 21](R/W) Set to power down the duty-cycle corrector (DCC) of the Q
+ (quadrature, doutq) clock after the interpolator and before the
+ divider (only, not the full clock path). */
+ uint64_t rx_pd_dfe_e : 1; /**< [ 22: 22](R/W) Set to power down the DFE E (eye, doute) path (only, not
+ the full datapath). */
+ uint64_t rx_pd_dfe_i : 1; /**< [ 23: 23](R/W) Set to power down the DFE I (edge, douti) path (only, not
+ the full datapath). */
+ uint64_t rx_pd_dfe_q : 1; /**< [ 24: 24](R/W) Set to power down the DFE Q (data, doutq) path (only, not
+ the full datapath) */
+ uint64_t rx_pd_dfe_x : 1; /**< [ 25: 25](R/W) Set to power down the DFE X path. The X path is passed to
+ the DFE I (edge, douti) pipe depending on edgesel_{even,odd}. */
+ uint64_t rx_pd_interp_e : 1; /**< [ 26: 26](R/W) Set to power down the E (eye, doute) analog interpolator
+ logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_interp_i : 1; /**< [ 27: 27](R/W) Set to power down the I (in-phase, douti) analog
+ interpolator logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_interp_q : 1; /**< [ 28: 28](R/W) Set to power down the I (in-phase, douti) analog
+ interpolator logic and output clocks (only, not the full clock path). */
+ uint64_t rx_rst_interp_e : 1; /**< [ 29: 29](R/W) Set to reset the E (eye, doute) analog interpolator logic
+ (only, not the full datapath). */
+ uint64_t rx_rst_interp_i : 1; /**< [ 30: 30](R/W) Set to reset the I (in-phase, douti) pipe analog
+ interpolator logic (only, not the full datapath). */
+ uint64_t rx_rst_interp_q : 1; /**< [ 31: 31](R/W) Set to reset the Q (quadrature, doutq) pipe analog
+ interpolator logic (only, not the full datapaths). */
+ uint64_t rx_rst_div : 1; /**< [ 32: 32](R/W) Set to reset the analog CDR clock dividers in the quadrature data path
+ for div{5, 8, 10, 16, 20}. */
+ uint64_t rx_rst_div_e : 1; /**< [ 33: 33](R/W) Set to reset the analog CDR clock dividers in the eye data path for
+ div{5, 8, 10, 16, 20}. */
+ uint64_t rx_rst_voter : 1; /**< [ 34: 34](R/W) Set to reset the analog voter block. */
+ uint64_t rx_rst_cdrfsm : 1; /**< [ 35: 35](R/W) Set to reset the CDR FSM. */
+ uint64_t rx_rst_blwc : 1; /**< [ 36: 36](R/W) Set to reset the analog baseline wander compensation
+ block. */
+ uint64_t rx_rst_qac_e : 1; /**< [ 37: 37](R/W) Set reset to the doute quadrature corrector filter and
+ associated logic. */
+ uint64_t rx_rst_qac_q : 1; /**< [ 38: 38](R/W) Set reset to the doutq datapath quadrature corrector
+ filter and associated logic. */
+ uint64_t idle : 1; /**< [ 39: 39](R/W) Set to idle the custom receiver and baseline wander
+ compensation (bwlc). */
+ uint64_t rx_rst_dcc_e : 1; /**< [ 40: 40](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the E (eye, doute) path */
+ uint64_t rx_rst_dcc_i : 1; /**< [ 41: 41](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the I (in-phase, edge, douti) path. */
+ uint64_t rx_rst_dcc_q : 1; /**< [ 42: 42](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the Q (quadrature, data, doutq) path. */
+ uint64_t rx_rst_deser : 1; /**< [ 43: 43](R/W) Set to reset the deserializers to the offset DAC, current
+ bias DAC, and interpolator re-mapping. */
+ uint64_t rx_pd_idle : 1; /**< [ 44: 44](R/W) Set to power down the idle detector in the custom analog
+ receiver. */
+ uint64_t rx_pd_qac_e : 1; /**< [ 45: 45](R/W) Power control for the custom analog quadrature accuracy corrector
+ (QAC). This QAC corrects for phase error between the I clock and the E
+ (eye, doute) clock.
+ 0 = Power up the I/E QAC.
+ 1 = Power down the I/E QAC. When in this state,
+ GSERN()_LANE()_RX_QAC_BCFG[CDR_QAC_SELQ] should also be set to zero to
+ disconnect the QAC from the clock data recovery (CDR) loop. */
+ uint64_t rx_pd_qac_q : 1; /**< [ 46: 46](R/W) Power control for the custom analog quadrature accuracy corrector
+ (QAC). This QAC corrects for phase error between the I clock and the Q
+ (quadrature, doutq) clock.
+ 0 = Power up the I/Q QAC.
+ 1 = Power down the I/Q QAC. When in this state,
+ GSERN()_LANE()_RX_QAC_BCFG[CDR_QAC_SELQ] should also be set to zero to
+ disconnect the QAC from the clock data recovery (CDR) loop. */
+ uint64_t rx_go2deep_idle : 1; /**< [ 47: 47](R/W) Set to sequence the receiver into deep idle. */
+ uint64_t reserved_48_54 : 7;
+ uint64_t domain_rst_en : 1; /**< [ 55: 55](R/W) Domain reset enable.
+ 0 = Prevent reseting lane logic with domain reset.
+ 1 = Enable reseting all lane logic with domain reset.
+
+ For PCIe configurations, typically 1 for a root complex and 0 for an endpoint. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst1_bcfg bdk_gsernx_lanex_rst1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000310ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST1_BCFG(a,b) bdk_gsernx_lanex_rst1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST1_BCFG(a,b) "GSERNX_LANEX_RST1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst2_bcfg
+ *
+ * GSER Lane Reset State Machine Controls and Overrides Register 2
+ */
+union bdk_gsernx_lanex_rst2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t adpt_trigger_wait : 4; /**< [ 57: 54](R/W) Wait time for after triggering adaptation before checking adaptation status. Set
+ to a minimum of 3. Set to the desired value before or at the same time as
+ setting [RST_ADPT_RST_SM] to zero. */
+ uint64_t reserved_50_53 : 4;
+ uint64_t adpt_wait : 18; /**< [ 49: 32](R/W) Wait time for adaptation to complete. Set at least as long as the maximum of:
+ * GSERN()_LANE()_RX_5_BCFG[VGA_TIMER_MAX].
+ * GSERN()_LANE()_RX_5_BCFG[DFE_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLELTE_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLEZ_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLE_TIMER_MAX].
+ * GSERN()_LANE()_RX_12_BCFG[AFEOS_TIMER_MAX].
+ * GSERN()_LANE()_RX_19_BCFG[BLWC_TIMER_MAX].
+ * GSERN()_LANE()_RX_23_BCFG[PREVGA_GN_TIMER_MAX].
+
+ The adaptation state machine will move on when all enabled adaptation operations
+ complete within the [ADPT_WAIT] count. If they do not complete within the wait
+ time, the state machine will move on when the counter expires. Set to the
+ desired value before or at the same time as setting [RST_ADPT_RST_SM] to zero. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t do_prevga_gn_adpt : 1; /**< [ 29: 29](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_blwc_adpt : 1; /**< [ 28: 28](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_afeos_adpt : 1; /**< [ 27: 27](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctlelte_adpt : 1; /**< [ 26: 26](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctlez_adpt : 1; /**< [ 25: 25](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctle_adpt : 1; /**< [ 24: 24](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_dfe_adpt : 1; /**< [ 23: 23](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_vga_adpt : 1; /**< [ 22: 22](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t rst_adpt_rst_sm : 1; /**< [ 21: 21](R/W) Set to one to reset the adaptation reset state machine; set to zero to allow the
+ adaptation reset state machine to run. Leave set to one to run adaptation
+ entirely under SW control through the GSERN()_LANE()_RX_7_BCFG[*_RST]
+ controls. Write to zero at the same time or after the desired [DO_*_ADPT]
+ controls are enabled to allow the reset state machine to initiate
+ adaptation. Note - for pausing and restarting adaptation associated with PCIe
+ rate changes and all power state transitions, the reset state machine should
+ control adaptation. */
+ uint64_t rst_eye_rst_sm : 1; /**< [ 20: 20](R/W) Set to reset the eye data path reset and power-up/power-down
+ state machine; set low to allow the eye data path reset and soft
+ power-up/power-down state machine to run (if [LN_RESET_USE_EYE] is
+ asserted). */
+ uint64_t ln_reset_use_eye : 1; /**< [ 19: 19](R/W) Set to enable the eye (doute) data path reset and
+ power-up/power-down state machine to run at cold reset when
+ [RST_EYE_RST_SM] deasserts. After cold reset, assert or deassert
+ [LN_RESET_USE_EYE] to run the eye data path soft power-up or
+ power-down sequence. */
+ uint64_t rst_rx_rst_sm : 1; /**< [ 18: 18](R/W) Set to reset the receiver reset state machine; set low to run
+ the receiver reset initialization state machine. */
+ uint64_t rst_tx_rst_sm : 1; /**< [ 17: 17](R/W) Set to reset the transmitter reset state machine; set low to
+ run the transmitter reset initialization state machine. */
+ uint64_t rst_pll_rst_sm : 1; /**< [ 16: 16](R/W) Set to reset the full lane reset state machine (PLL, TX,
+ and RX); set low to run the complete reset initialization sequence
+ starting with lane PLL initialization. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t tx_dcc_iboost : 1; /**< [ 12: 12](R/W) Set to assert the iboost control bit of the
+ transmit duty cycle correcter. Should be programmed as desired before
+ sequencing the transmitter reset state machine. Differs
+ from [TX_DCC_LOWF] in the data rate range that it is set at. */
+ uint64_t tx_go2deep_idle : 1; /**< [ 11: 11](R/W) Set to sequence the transmitter into deep idle. */
+ uint64_t tx_dcc_lowf : 1; /**< [ 10: 10](R/W) Set to assert the low-frequency control bit of the transmit duty cycle
+ correcter. Should be programmed as desired before sequencing the transmitter
+ reset state machine. Set to 1 for data rates below 4 Gbaud. */
+ uint64_t tx_idle : 1; /**< [ 9: 9](R/W) Set to put the transmitter into idle (weak terminate). */
+ uint64_t tx_div_rst : 1; /**< [ 8: 8](R/W) Set to reset the counter in the analog transmitter clock
+ divider. */
+ uint64_t tx_dcc_rst : 1; /**< [ 7: 7](R/W) Set to reset the analog duty cycle corrector in the
+ transmitter. */
+ uint64_t reserved_6 : 1;
+ uint64_t tx_enctl : 1; /**< [ 5: 5](R/W) Set to enable the analog TX controls (c*, en*). */
+ uint64_t tx_cdrdiv3 : 1; /**< [ 4: 4](R/W) Set to enable the analog divide by 3 post scalar divider in the
+ TX divider. If GSERN()_LANE()_CDRFSM_BCFG[CLK_SEL] is set to use the div3clk from
+ the transmitter this bit needs to be enabled. */
+ uint64_t tx_endiv5 : 1; /**< [ 3: 3](R/W) Set to enable the analog divide by 4 or 5 post scalar dividers
+ in the TX divider. */
+ uint64_t reserved_2 : 1;
+ uint64_t tx_pdb : 1; /**< [ 1: 1](R/W) Set to zero to power down the entire analog TX driver, disabling
+ current mirrors, current DACs, and op-amps. */
+ uint64_t tx_dcc_pdb : 1; /**< [ 0: 0](R/W) Set to zero to power-down the low-swing input, CML to CMOS shifter,
+ and duty cycle corrector. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_dcc_pdb : 1; /**< [ 0: 0](R/W) Set to zero to power-down the low-swing input, CML to CMOS shifter,
+ and duty cycle corrector. */
+ uint64_t tx_pdb : 1; /**< [ 1: 1](R/W) Set to zero to power down the entire analog TX driver, disabling
+ current mirrors, current DACs, and op-amps. */
+ uint64_t reserved_2 : 1;
+ uint64_t tx_endiv5 : 1; /**< [ 3: 3](R/W) Set to enable the analog divide by 4 or 5 post scalar dividers
+ in the TX divider. */
+ uint64_t tx_cdrdiv3 : 1; /**< [ 4: 4](R/W) Set to enable the analog divide by 3 post scalar divider in the
+ TX divider. If GSERN()_LANE()_CDRFSM_BCFG[CLK_SEL] is set to use the div3clk from
+ the transmitter this bit needs to be enabled. */
+ uint64_t tx_enctl : 1; /**< [ 5: 5](R/W) Set to enable the analog TX controls (c*, en*). */
+ uint64_t reserved_6 : 1;
+ uint64_t tx_dcc_rst : 1; /**< [ 7: 7](R/W) Set to reset the analog duty cycle corrector in the
+ transmitter. */
+ uint64_t tx_div_rst : 1; /**< [ 8: 8](R/W) Set to reset the counter in the analog transmitter clock
+ divider. */
+ uint64_t tx_idle : 1; /**< [ 9: 9](R/W) Set to put the transmitter into idle (weak terminate). */
+ uint64_t tx_dcc_lowf : 1; /**< [ 10: 10](R/W) Set to assert the low-frequency control bit of the transmit duty cycle
+ correcter. Should be programmed as desired before sequencing the transmitter
+ reset state machine. Set to 1 for data rates below 4 Gbaud. */
+ uint64_t tx_go2deep_idle : 1; /**< [ 11: 11](R/W) Set to sequence the transmitter into deep idle. */
+ uint64_t tx_dcc_iboost : 1; /**< [ 12: 12](R/W) Set to assert the iboost control bit of the
+ transmit duty cycle correcter. Should be programmed as desired before
+ sequencing the transmitter reset state machine. Differs
+ from [TX_DCC_LOWF] in the data rate range that it is set at. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t rst_pll_rst_sm : 1; /**< [ 16: 16](R/W) Set to reset the full lane reset state machine (PLL, TX,
+ and RX); set low to run the complete reset initialization sequence
+ starting with lane PLL initialization. */
+ uint64_t rst_tx_rst_sm : 1; /**< [ 17: 17](R/W) Set to reset the transmitter reset state machine; set low to
+ run the transmitter reset initialization state machine. */
+ uint64_t rst_rx_rst_sm : 1; /**< [ 18: 18](R/W) Set to reset the receiver reset state machine; set low to run
+ the receiver reset initialization state machine. */
+ uint64_t ln_reset_use_eye : 1; /**< [ 19: 19](R/W) Set to enable the eye (doute) data path reset and
+ power-up/power-down state machine to run at cold reset when
+ [RST_EYE_RST_SM] deasserts. After cold reset, assert or deassert
+ [LN_RESET_USE_EYE] to run the eye data path soft power-up or
+ power-down sequence. */
+ uint64_t rst_eye_rst_sm : 1; /**< [ 20: 20](R/W) Set to reset the eye data path reset and power-up/power-down
+ state machine; set low to allow the eye data path reset and soft
+ power-up/power-down state machine to run (if [LN_RESET_USE_EYE] is
+ asserted). */
+ uint64_t rst_adpt_rst_sm : 1; /**< [ 21: 21](R/W) Set to one to reset the adaptation reset state machine; set to zero to allow the
+ adaptation reset state machine to run. Leave set to one to run adaptation
+ entirely under SW control through the GSERN()_LANE()_RX_7_BCFG[*_RST]
+ controls. Write to zero at the same time or after the desired [DO_*_ADPT]
+ controls are enabled to allow the reset state machine to initiate
+ adaptation. Note - for pausing and restarting adaptation associated with PCIe
+ rate changes and all power state transitions, the reset state machine should
+ control adaptation. */
+ uint64_t do_vga_adpt : 1; /**< [ 22: 22](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_dfe_adpt : 1; /**< [ 23: 23](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctle_adpt : 1; /**< [ 24: 24](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctlez_adpt : 1; /**< [ 25: 25](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctlelte_adpt : 1; /**< [ 26: 26](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_afeos_adpt : 1; /**< [ 27: 27](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_blwc_adpt : 1; /**< [ 28: 28](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_prevga_gn_adpt : 1; /**< [ 29: 29](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t adpt_wait : 18; /**< [ 49: 32](R/W) Wait time for adaptation to complete. Set at least as long as the maximum of:
+ * GSERN()_LANE()_RX_5_BCFG[VGA_TIMER_MAX].
+ * GSERN()_LANE()_RX_5_BCFG[DFE_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLELTE_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLEZ_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLE_TIMER_MAX].
+ * GSERN()_LANE()_RX_12_BCFG[AFEOS_TIMER_MAX].
+ * GSERN()_LANE()_RX_19_BCFG[BLWC_TIMER_MAX].
+ * GSERN()_LANE()_RX_23_BCFG[PREVGA_GN_TIMER_MAX].
+
+ The adaptation state machine will move on when all enabled adaptation operations
+ complete within the [ADPT_WAIT] count. If they do not complete within the wait
+ time, the state machine will move on when the counter expires. Set to the
+ desired value before or at the same time as setting [RST_ADPT_RST_SM] to zero. */
+ uint64_t reserved_50_53 : 4;
+ uint64_t adpt_trigger_wait : 4; /**< [ 57: 54](R/W) Wait time for after triggering adaptation before checking adaptation status. Set
+ to a minimum of 3. Set to the desired value before or at the same time as
+ setting [RST_ADPT_RST_SM] to zero. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst2_bcfg bdk_gsernx_lanex_rst2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000320ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST2_BCFG(a,b) bdk_gsernx_lanex_rst2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST2_BCFG(a,b) "GSERNX_LANEX_RST2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt1_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 1
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t cal_en_wait : 15; /**< [ 62: 48](R/W) Wait count in service clock cycles after calibration enable before deasserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. The service clock for the GSER PHY is connected to
+ the reference clock used by the primary chip clock PLLs. Typically service clock
+ is 100 MHz. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t pre_cal_en_wait : 12; /**< [ 43: 32](R/W) Wait count in service clock cycles after deasserting pwdn before asserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t pre_pll_sm_reset_wait : 9; /**< [ 24: 16](R/W) Wait count in service clock cycles after deasserting pwdn before
+ asserting calibration enable to the PLL. Set this field to one less than the
+ desired number of cycles of delay. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pre_pwup_wait : 13; /**< [ 12: 0](R/W) Wait count in service clock cycles after initial trigger before deasserting
+ power down to the PLL. The actual delay will be three cycles more than set
+ here. The common block PLL state machine will typically wait 2^12 cycles before
+ triggering the lane PLL to start. This field allows for staggering startup of
+ different lanes by up to about 80us. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_pwup_wait : 13; /**< [ 12: 0](R/W) Wait count in service clock cycles after initial trigger before deasserting
+ power down to the PLL. The actual delay will be three cycles more than set
+ here. The common block PLL state machine will typically wait 2^12 cycles before
+ triggering the lane PLL to start. This field allows for staggering startup of
+ different lanes by up to about 80us. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pre_pll_sm_reset_wait : 9; /**< [ 24: 16](R/W) Wait count in service clock cycles after deasserting pwdn before
+ asserting calibration enable to the PLL. Set this field to one less than the
+ desired number of cycles of delay. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t pre_cal_en_wait : 12; /**< [ 43: 32](R/W) Wait count in service clock cycles after deasserting pwdn before asserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t cal_en_wait : 15; /**< [ 62: 48](R/W) Wait count in service clock cycles after calibration enable before deasserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. The service clock for the GSER PHY is connected to
+ the reference clock used by the primary chip clock PLLs. Typically service clock
+ is 100 MHz. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt1_bcfg bdk_gsernx_lanex_rst_cnt1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000330ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) bdk_gsernx_lanex_rst_cnt1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) "GSERNX_LANEX_RST_CNT1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt2_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 2
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t rx_pre_qac_sel_wait : 9; /**< [ 56: 48](R/W) Wait count in service clock cycles after the deasserting reset to
+ the QAC filter logic before asserting select to the q and e pipe qac
+ filters. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t txrx_pre_pwup_wait : 14; /**< [ 45: 32](R/W) Wait count in service clock cycles after the lane PLL exits reset before
+ deasserting power down signals to the transmitter and receiver. Set this field
+ to three less than the desired number of cycles of delay. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t pre_pdiv_reset_wait : 13; /**< [ 28: 16](R/W) Reserved.
+ Internal:
+ The lane PLL no longer has a postdivider
+ reset. (This was the wait count in service clock cycles after
+ deasserting reset before deasserting reset to the PLL
+ postdivider. Set this field to one less than the desired number of
+ cycles of delay.) */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pre_pll_reset_wait : 12; /**< [ 11: 0](R/W) Wait count in service clock cycles after calibration enable deasserts
+ before deasserting reset to the PLL. Set this field to one less
+ than the desired number of cycles of delay. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_pll_reset_wait : 12; /**< [ 11: 0](R/W) Wait count in service clock cycles after calibration enable deasserts
+ before deasserting reset to the PLL. Set this field to one less
+ than the desired number of cycles of delay. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pre_pdiv_reset_wait : 13; /**< [ 28: 16](R/W) Reserved.
+ Internal:
+ The lane PLL no longer has a postdivider
+ reset. (This was the wait count in service clock cycles after
+ deasserting reset before deasserting reset to the PLL
+ postdivider. Set this field to one less than the desired number of
+ cycles of delay.) */
+ uint64_t reserved_29_31 : 3;
+ uint64_t txrx_pre_pwup_wait : 14; /**< [ 45: 32](R/W) Wait count in service clock cycles after the lane PLL exits reset before
+ deasserting power down signals to the transmitter and receiver. Set this field
+ to three less than the desired number of cycles of delay. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t rx_pre_qac_sel_wait : 9; /**< [ 56: 48](R/W) Wait count in service clock cycles after the deasserting reset to
+ the QAC filter logic before asserting select to the q and e pipe qac
+ filters. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt2_bcfg bdk_gsernx_lanex_rst_cnt2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000340ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) bdk_gsernx_lanex_rst_cnt2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) "GSERNX_LANEX_RST_CNT2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt3_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 3
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t rx_pre_run_wait : 11; /**< [ 58: 48](R/W) Wait count in service clock cycles after deasserting reset to the
+ baseline wander correction logic before indicating that the receiver
+ is ready. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t pre_rst_iq_wait : 9; /**< [ 40: 32](R/W) Wait count in service clock cycles after deasserting reset to the
+ receiver clock divider before deasserting reset to the i, q, and e
+ pipe interpolators. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t pre_tx_div_rst_wait : 9; /**< [ 24: 16](R/W) Wait count in service clock cycles after deasserting reset to the duty cycle
+ correctors in the transmitter before deasserting reset to the transmitter clock
+ divider. Set this field to one less than the desired number of cycles of
+ delay. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t pre_en_cdrfsm_wait : 9; /**< [ 8: 0](R/W) Wait count in service clock cycles after asserting power up to the
+ custom receiver before enabling the CDR finite state machine. Set
+ this field to one less than the desired number of cycles of delay. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_en_cdrfsm_wait : 9; /**< [ 8: 0](R/W) Wait count in service clock cycles after asserting power up to the
+ custom receiver before enabling the CDR finite state machine. Set
+ this field to one less than the desired number of cycles of delay. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t pre_tx_div_rst_wait : 9; /**< [ 24: 16](R/W) Wait count in service clock cycles after deasserting reset to the duty cycle
+ correctors in the transmitter before deasserting reset to the transmitter clock
+ divider. Set this field to one less than the desired number of cycles of
+ delay. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t pre_rst_iq_wait : 9; /**< [ 40: 32](R/W) Wait count in service clock cycles after deasserting reset to the
+ receiver clock divider before deasserting reset to the i, q, and e
+ pipe interpolators. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t rx_pre_run_wait : 11; /**< [ 58: 48](R/W) Wait count in service clock cycles after deasserting reset to the
+ baseline wander correction logic before indicating that the receiver
+ is ready. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt3_bcfg bdk_gsernx_lanex_rst_cnt3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000350ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) bdk_gsernx_lanex_rst_cnt3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) "GSERNX_LANEX_RST_CNT3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt4_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 4
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t svc_clk_freq : 1; /**< [ 56: 56](R/W) For diagnostic use only.
+ Internal:
+ This bit reserved for future enhancements. The RTL to use it is not coded. Freq selection
+ for service clock as used in the reset state machine. 0 = 100 MHz. 1 = 156.25 MHz. This
+ scales only the wait counts not set via CSR registers. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t blwc_reset_wait : 18; /**< [ 49: 32](R/W) Wait count in service clock cycles after deasserting reset to the
+ CDR FSM before deasserting reset to the baseline wander correction
+ circuit (BLWC). The power-up document specifies this as 16 service
+ clock cycles, but verbal communication says that's only correct for
+ cases of small frequency offset between the lane PLL and the
+ received data stream clock, i.e., it doesn't apply for SSC (except
+ PCIe). Since the actual requirement is not specified, this field
+ allows for the full range of the counter in the receiver reset state
+ machine. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t dfe_afe_oscal_wait : 20; /**< [ 19: 0](R/W) Maximum wait count in service clock cycles after triggering the dfe
+ and afe offset calibration sequences before deasserting
+ reset_voter. Normally the receiver reset state machine will move on
+ when DFE and AFE offset calibration is complete. This is a time-out
+ parameter in case the offset calibration state machines do not
+ complete. Set this field to one less than the desired number of
+ cycles of delay. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_afe_oscal_wait : 20; /**< [ 19: 0](R/W) Maximum wait count in service clock cycles after triggering the dfe
+ and afe offset calibration sequences before deasserting
+ reset_voter. Normally the receiver reset state machine will move on
+ when DFE and AFE offset calibration is complete. This is a time-out
+ parameter in case the offset calibration state machines do not
+ complete. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t blwc_reset_wait : 18; /**< [ 49: 32](R/W) Wait count in service clock cycles after deasserting reset to the
+ CDR FSM before deasserting reset to the baseline wander correction
+ circuit (BLWC). The power-up document specifies this as 16 service
+ clock cycles, but verbal communication says that's only correct for
+ cases of small frequency offset between the lane PLL and the
+ received data stream clock, i.e., it doesn't apply for SSC (except
+ PCIe). Since the actual requirement is not specified, this field
+ allows for the full range of the counter in the receiver reset state
+ machine. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t svc_clk_freq : 1; /**< [ 56: 56](R/W) For diagnostic use only.
+ Internal:
+ This bit reserved for future enhancements. The RTL to use it is not coded. Freq selection
+ for service clock as used in the reset state machine. 0 = 100 MHz. 1 = 156.25 MHz. This
+ scales only the wait counts not set via CSR registers. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt4_bcfg bdk_gsernx_lanex_rst_cnt4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000360ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) bdk_gsernx_lanex_rst_cnt4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) "GSERNX_LANEX_RST_CNT4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt5_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 4
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t idle_exit_wait_en : 1; /**< [ 32: 32](R/W) Enable use of [IDLE_EXIT_WAIT] as a limit on the wait time for the receiver
+ electrical idle indicator to deassert after resetting the voter. When
+ [IDLE_EXIT_WAIT_EN] is low, the state machine will wait forever for the
+ electrical idle signal to deassert. Note that the reset state machine will not
+ see idle deassert until after the first idle offset calibration has completed
+ after exiting reset. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t idle_exit_wait : 28; /**< [ 27: 0](R/W) Maximum wait count in service clock cycles for the receiver electrical idle
+ indicator to deassert after resetting the voter. If the receiver electrical idle
+ indication remains asserted, the reset state machine will move on after this
+ count expires. */
+#else /* Word 0 - Little Endian */
+ uint64_t idle_exit_wait : 28; /**< [ 27: 0](R/W) Maximum wait count in service clock cycles for the receiver electrical idle
+ indicator to deassert after resetting the voter. If the receiver electrical idle
+ indication remains asserted, the reset state machine will move on after this
+ count expires. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t idle_exit_wait_en : 1; /**< [ 32: 32](R/W) Enable use of [IDLE_EXIT_WAIT] as a limit on the wait time for the receiver
+ electrical idle indicator to deassert after resetting the voter. When
+ [IDLE_EXIT_WAIT_EN] is low, the state machine will wait forever for the
+ electrical idle signal to deassert. Note that the reset state machine will not
+ see idle deassert until after the first idle offset calibration has completed
+ after exiting reset. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt5_bcfg bdk_gsernx_lanex_rst_cnt5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000370ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) bdk_gsernx_lanex_rst_cnt5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) "GSERNX_LANEX_RST_CNT5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rstclkmsk_bcfg
+ *
+ * GSER Lane Reset State Machine Transmit Clock Alignment Register
+ * Controls for transmit alignment of lanes within a link requiring aligned transmit
+ * data.
+ */
+union bdk_gsernx_lanex_rstclkmsk_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rstclkmsk_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_44_63 : 20;
+ uint64_t txdivrst_algn_qlm_mask : 4; /**< [ 43: 40](R/W) Selection control for which QLMs in this QLM's link group to align in timing the
+ deassertion of reset to this lane's transmitter's clock divider.
+ \<0\> = Wait for QLM 0.
+ \<1\> = Wait for QLM 1.
+ \<2\> = Wait for QLM 2.
+ \<3\> = Wait for QLM 3.
+
+ The bit corresponding to the current QLM is ignored. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t txdivrst_algn_lane_mask : 4;/**< [ 35: 32](R/W) Selection control for which lanes in the current QLM to align in timing the
+ deassertion of reset to this lane's transmitter's clock divider.
+ \<0\> = Wait for lane 0.
+ \<1\> = Wait for lane 1.
+ \<2\> = Wait for lane 2.
+ \<3\> = Wait for lane 3.
+
+ The bit corresponding to the current Lane is ignored. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t txdivrst_algn_wait_en : 1; /**< [ 20: 20](R/W) Enable use of [TXDIVRST_ALGN_WAIT] as a time out waiting for other lanes to be
+ ready to start their divided transmit clocks. With this bit cleared the lane
+ will wait indefinitely. */
+ uint64_t txdivrst_algn_wait : 20; /**< [ 19: 0](R/W) Maximum wait count in service clock cycles, after this lane is ready to start
+ its divided transmit clock, for other lanes in the link to be ready to start
+ their divided transmit clocks. This is the maximum wait time, after which the
+ state machine will move on, whether the other lanes have indicated ready or not. */
+#else /* Word 0 - Little Endian */
+ uint64_t txdivrst_algn_wait : 20; /**< [ 19: 0](R/W) Maximum wait count in service clock cycles, after this lane is ready to start
+ its divided transmit clock, for other lanes in the link to be ready to start
+ their divided transmit clocks. This is the maximum wait time, after which the
+ state machine will move on, whether the other lanes have indicated ready or not. */
+ uint64_t txdivrst_algn_wait_en : 1; /**< [ 20: 20](R/W) Enable use of [TXDIVRST_ALGN_WAIT] as a time out waiting for other lanes to be
+ ready to start their divided transmit clocks. With this bit cleared the lane
+ will wait indefinitely. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t txdivrst_algn_lane_mask : 4;/**< [ 35: 32](R/W) Selection control for which lanes in the current QLM to align in timing the
+ deassertion of reset to this lane's transmitter's clock divider.
+ \<0\> = Wait for lane 0.
+ \<1\> = Wait for lane 1.
+ \<2\> = Wait for lane 2.
+ \<3\> = Wait for lane 3.
+
+ The bit corresponding to the current Lane is ignored. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t txdivrst_algn_qlm_mask : 4; /**< [ 43: 40](R/W) Selection control for which QLMs in this QLM's link group to align in timing the
+ deassertion of reset to this lane's transmitter's clock divider.
+ \<0\> = Wait for QLM 0.
+ \<1\> = Wait for QLM 1.
+ \<2\> = Wait for QLM 2.
+ \<3\> = Wait for QLM 3.
+
+ The bit corresponding to the current QLM is ignored. */
+ uint64_t reserved_44_63 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rstclkmsk_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rstclkmsk_bcfg bdk_gsernx_lanex_rstclkmsk_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000470ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RSTCLKMSK_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) bdk_gsernx_lanex_rstclkmsk_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) "GSERNX_LANEX_RSTCLKMSK_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_0_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 0
+ * Register controls for postcursor overrides from c2 through c9. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t c9_ovrd_en : 1; /**< [ 62: 62](R/W) Enable use of [C9_OVRD]. */
+ uint64_t c9_ovrd : 6; /**< [ 61: 56](R/W) 9th postcursor override value. */
+ uint64_t reserved_55 : 1;
+ uint64_t c8_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [C8_OVRD]. */
+ uint64_t c8_ovrd : 6; /**< [ 53: 48](R/W) 8th postcursor override value. */
+ uint64_t reserved_47 : 1;
+ uint64_t c7_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C7_OVRD]. */
+ uint64_t c7_ovrd : 6; /**< [ 45: 40](R/W) 7th postcursor override value. */
+ uint64_t reserved_39 : 1;
+ uint64_t c6_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C6_OVRD]. */
+ uint64_t c6_ovrd : 6; /**< [ 37: 32](R/W) 6th postcursor override value. */
+ uint64_t reserved_31 : 1;
+ uint64_t c5_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C5_OVRD]. */
+ uint64_t c5_ovrd : 6; /**< [ 29: 24](R/W) 5th postcursor override value. */
+ uint64_t reserved_23 : 1;
+ uint64_t c4_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C4_OVRD]. */
+ uint64_t c4_ovrd : 6; /**< [ 21: 16](R/W) 4th postcursor value override. */
+ uint64_t reserved_15 : 1;
+ uint64_t c3_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C3_OVRD]. */
+ uint64_t c3_ovrd : 6; /**< [ 13: 8](R/W) 3rd postcursor override value. */
+ uint64_t reserved_7 : 1;
+ uint64_t c2_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C2_OVRD]. */
+ uint64_t c2_ovrd : 6; /**< [ 5: 0](R/W) Second postcursor override value. */
+#else /* Word 0 - Little Endian */
+ uint64_t c2_ovrd : 6; /**< [ 5: 0](R/W) Second postcursor override value. */
+ uint64_t c2_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C2_OVRD]. */
+ uint64_t reserved_7 : 1;
+ uint64_t c3_ovrd : 6; /**< [ 13: 8](R/W) 3rd postcursor override value. */
+ uint64_t c3_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C3_OVRD]. */
+ uint64_t reserved_15 : 1;
+ uint64_t c4_ovrd : 6; /**< [ 21: 16](R/W) 4th postcursor value override. */
+ uint64_t c4_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C4_OVRD]. */
+ uint64_t reserved_23 : 1;
+ uint64_t c5_ovrd : 6; /**< [ 29: 24](R/W) 5th postcursor override value. */
+ uint64_t c5_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C5_OVRD]. */
+ uint64_t reserved_31 : 1;
+ uint64_t c6_ovrd : 6; /**< [ 37: 32](R/W) 6th postcursor override value. */
+ uint64_t c6_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C6_OVRD]. */
+ uint64_t reserved_39 : 1;
+ uint64_t c7_ovrd : 6; /**< [ 45: 40](R/W) 7th postcursor override value. */
+ uint64_t c7_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C7_OVRD]. */
+ uint64_t reserved_47 : 1;
+ uint64_t c8_ovrd : 6; /**< [ 53: 48](R/W) 8th postcursor override value. */
+ uint64_t c8_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [C8_OVRD]. */
+ uint64_t reserved_55 : 1;
+ uint64_t c9_ovrd : 6; /**< [ 61: 56](R/W) 9th postcursor override value. */
+ uint64_t c9_ovrd_en : 1; /**< [ 62: 62](R/W) Enable use of [C9_OVRD]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_0_bcfg bdk_gsernx_lanex_rx_0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000c60ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) bdk_gsernx_lanex_rx_0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) "GSERNX_LANEX_RX_0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_0_bsts
+ *
+ * GSER Lane RX Base Status Register 0
+ * Status registers for postcursor values (either calibration results or
+ * overrides) from c2 through c9. Values in this register are only valid if
+ * GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted (indicating DFE adaptation has
+ * completed), or if the corresponding CSR override enable is asserted.
+ */
+union bdk_gsernx_lanex_rx_0_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_0_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t c9 : 6; /**< [ 61: 56](RO/H) 9th postcursor value. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t c8 : 6; /**< [ 53: 48](RO/H) 8th postcursor value. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c7 : 6; /**< [ 45: 40](RO/H) 7th postcursor value. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c6 : 6; /**< [ 37: 32](RO/H) 6th postcursor value. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c5 : 6; /**< [ 29: 24](RO/H) 5th postcursor value. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c4 : 6; /**< [ 21: 16](RO/H) 4th postcursor value. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c3 : 6; /**< [ 13: 8](RO/H) 3rd postcursor value. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c2 : 6; /**< [ 5: 0](RO/H) 2nd postcursor value. */
+#else /* Word 0 - Little Endian */
+ uint64_t c2 : 6; /**< [ 5: 0](RO/H) 2nd postcursor value. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c3 : 6; /**< [ 13: 8](RO/H) 3rd postcursor value. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c4 : 6; /**< [ 21: 16](RO/H) 4th postcursor value. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c5 : 6; /**< [ 29: 24](RO/H) 5th postcursor value. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c6 : 6; /**< [ 37: 32](RO/H) 6th postcursor value. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c7 : 6; /**< [ 45: 40](RO/H) 7th postcursor value. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c8 : 6; /**< [ 53: 48](RO/H) 8th postcursor value. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t c9 : 6; /**< [ 61: 56](RO/H) 9th postcursor value. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_0_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_0_bsts bdk_gsernx_lanex_rx_0_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_0_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_0_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001650ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_0_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) bdk_gsernx_lanex_rx_0_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) "GSERNX_LANEX_RX_0_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_10_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 10
+ * Configuration registers for LMS adaptation. Deadband increment settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_10_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_10_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ctlelte_deadband_inc : 12; /**< [ 59: 48](R/W) CTLELTE adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctlez_deadband_inc : 12; /**< [ 47: 36](R/W) CTLEZ adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctle_deadband_inc : 12; /**< [ 35: 24](R/W) CTLE adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t dfe_deadband_inc : 12; /**< [ 23: 12](R/W) Coeff adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t vga_deadband_inc : 12; /**< [ 11: 0](R/W) VGA adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_deadband_inc : 12; /**< [ 11: 0](R/W) VGA adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t dfe_deadband_inc : 12; /**< [ 23: 12](R/W) Coeff adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctle_deadband_inc : 12; /**< [ 35: 24](R/W) CTLE adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctlez_deadband_inc : 12; /**< [ 47: 36](R/W) CTLEZ adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctlelte_deadband_inc : 12; /**< [ 59: 48](R/W) CTLELTE adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_10_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_10_bcfg bdk_gsernx_lanex_rx_10_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_10_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_10_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d00ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_10_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) bdk_gsernx_lanex_rx_10_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) "GSERNX_LANEX_RX_10_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_10_bsts
+ *
+ * GSER Lane RX Base Status Register 10
+ * Status registers for BLWC LMS adaptation. Current BLWC Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_10_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_10_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t blwc_subrate_now : 16; /**< [ 63: 48](RO/H) BLWC subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t blwc_upv_count : 16; /**< [ 43: 28](RO/H) BLWC up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t blwc_adapt_status : 1; /**< [ 27: 27](RO/H) BLWC adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t blwc_adapt_count : 15; /**< [ 26: 12](RO/H) BLWC adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t blwc_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of BLWC adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t blwc_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of BLWC adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t blwc_adapt_count : 15; /**< [ 26: 12](RO/H) BLWC adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t blwc_adapt_status : 1; /**< [ 27: 27](RO/H) BLWC adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t blwc_upv_count : 16; /**< [ 43: 28](RO/H) BLWC up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t blwc_subrate_now : 16; /**< [ 63: 48](RO/H) BLWC subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_10_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_10_bsts bdk_gsernx_lanex_rx_10_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_10_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_10_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_10_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) bdk_gsernx_lanex_rx_10_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) "GSERNX_LANEX_RX_10_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_11_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 11
+ * Configuration registers for Offset Compensation.
+ */
+union bdk_gsernx_lanex_rx_11_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_11_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t afe_oscomp_delay : 8; /**< [ 15: 8](R/W) Start delay for the AFE offset compensation, after DFE offset
+ compensation completes. */
+ uint64_t dfe_oscomp_delay : 8; /**< [ 7: 0](R/W) Start delay for the DFE offset compensation. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_oscomp_delay : 8; /**< [ 7: 0](R/W) Start delay for the DFE offset compensation. */
+ uint64_t afe_oscomp_delay : 8; /**< [ 15: 8](R/W) Start delay for the AFE offset compensation, after DFE offset
+ compensation completes. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_11_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_11_bcfg bdk_gsernx_lanex_rx_11_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_11_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_11_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_11_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) bdk_gsernx_lanex_rx_11_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) "GSERNX_LANEX_RX_11_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_11_bsts
+ *
+ * GSER Lane RX Base Status Register 11
+ * Status registers for PREVGA_GN LMS adaptation. Current PREVGA_GN Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_11_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_11_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t prevga_gn_subrate_now : 16; /**< [ 63: 48](RO/H) PREVGA_GN subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t prevga_gn_upv_count : 16; /**< [ 43: 28](RO/H) PREVGA_GN up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t prevga_gn_adapt_status : 1; /**< [ 27: 27](RO/H) PREVGA_GN adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t prevga_gn_adapt_count : 15; /**< [ 26: 12](RO/H) PREVGA_GN adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t prevga_gn_deadband_now : 12;/**< [ 11: 0](RO/H) Current 12-bit integer value of PREVGA_GN adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t prevga_gn_deadband_now : 12;/**< [ 11: 0](RO/H) Current 12-bit integer value of PREVGA_GN adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t prevga_gn_adapt_count : 15; /**< [ 26: 12](RO/H) PREVGA_GN adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t prevga_gn_adapt_status : 1; /**< [ 27: 27](RO/H) PREVGA_GN adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t prevga_gn_upv_count : 16; /**< [ 43: 28](RO/H) PREVGA_GN up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t prevga_gn_subrate_now : 16; /**< [ 63: 48](RO/H) PREVGA_GN subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_11_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_11_bsts bdk_gsernx_lanex_rx_11_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_11_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_11_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001700ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_11_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) bdk_gsernx_lanex_rx_11_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) "GSERNX_LANEX_RX_11_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_12_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 12
+ * Configuration registers for AFE Offset Adaptation.
+ */
+union bdk_gsernx_lanex_rx_12_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_12_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t afeos_leak_sgn : 1; /**< [ 51: 51](R/W) AFEOS leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t afeos_deadband : 12; /**< [ 50: 39](R/W) AFE OS adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t afeos_deadband_inc : 12; /**< [ 38: 27](R/W) AFE OS adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t afeos_leak : 3; /**< [ 26: 24](R/W) AFEOS adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t afeos_mu : 3; /**< [ 18: 16](R/W) AFEOS adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_15 : 1;
+ uint64_t afeos_timer_max : 15; /**< [ 14: 0](R/W) AFEOS adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t afeos_timer_max : 15; /**< [ 14: 0](R/W) AFEOS adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t reserved_15 : 1;
+ uint64_t afeos_mu : 3; /**< [ 18: 16](R/W) AFEOS adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t afeos_leak : 3; /**< [ 26: 24](R/W) AFEOS adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t afeos_deadband_inc : 12; /**< [ 38: 27](R/W) AFE OS adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t afeos_deadband : 12; /**< [ 50: 39](R/W) AFE OS adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t afeos_leak_sgn : 1; /**< [ 51: 51](R/W) AFEOS leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_12_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_12_bcfg bdk_gsernx_lanex_rx_12_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_12_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_12_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d20ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_12_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) bdk_gsernx_lanex_rx_12_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) "GSERNX_LANEX_RX_12_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_13_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 13
+ * Configuration registers for AFE LMS adaptation
+ * Adaptation controls for Subrate parameters.
+ */
+union bdk_gsernx_lanex_rx_13_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_13_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t afeos_subrate_scale : 3; /**< [ 34: 32](R/W) AFE subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t afeos_subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the starting value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t afeos_subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t afeos_subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t afeos_subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the starting value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t afeos_subrate_scale : 3; /**< [ 34: 32](R/W) AFE subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_13_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_13_bcfg bdk_gsernx_lanex_rx_13_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_13_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_13_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_13_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) bdk_gsernx_lanex_rx_13_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) "GSERNX_LANEX_RX_13_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_14_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 14
+ * This register configures LMS adaptation.
+ */
+union bdk_gsernx_lanex_rx_14_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_14_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_44_63 : 20;
+ uint64_t c6_c15_limit_hi : 6; /**< [ 43: 38](R/W) C6 to C15 postcursor limit high. */
+ uint64_t c6_c15_limit_lo : 6; /**< [ 37: 32](R/W) C6 to C15 postcursor limit low. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t dfe_c1_deadband : 12; /**< [ 23: 12](R/W) DFE C1 adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t dfe_c1_deadband_inc : 12; /**< [ 11: 0](R/W) DFE C1 adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_c1_deadband_inc : 12; /**< [ 11: 0](R/W) DFE C1 adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t dfe_c1_deadband : 12; /**< [ 23: 12](R/W) DFE C1 adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t c6_c15_limit_lo : 6; /**< [ 37: 32](R/W) C6 to C15 postcursor limit low. */
+ uint64_t c6_c15_limit_hi : 6; /**< [ 43: 38](R/W) C6 to C15 postcursor limit high. */
+ uint64_t reserved_44_63 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_14_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_14_bcfg bdk_gsernx_lanex_rx_14_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_14_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_14_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d40ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_14_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) bdk_gsernx_lanex_rx_14_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) "GSERNX_LANEX_RX_14_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_15_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 15
+ * This register configures LMS adaptation.
+ */
+union bdk_gsernx_lanex_rx_15_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_15_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_15_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_15_bcfg bdk_gsernx_lanex_rx_15_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_15_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_15_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d50ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_15_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) bdk_gsernx_lanex_rx_15_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) "GSERNX_LANEX_RX_15_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_16_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 16
+ * Override registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_16_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_16_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t ctlez_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [CTLEZ_DEADBAND_NOW_OVRD]. */
+ uint64_t ctlez_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) CTLEZ adaptation deadband now override. */
+ uint64_t ctle_deadband_now_ovrd_en : 1;/**< [ 38: 38](R/W) Enable use of [CTLE_DEADBAND_NOW_OVRD]. */
+ uint64_t ctle_deadband_now_ovrd : 12;/**< [ 37: 26](R/W) CTLE adaptation deadband now override. */
+ uint64_t dfe_deadband_now_ovrd_en : 1;/**< [ 25: 25](R/W) Enable use of [DFE_DEADBAND_NOW_OVRD]. */
+ uint64_t dfe_deadband_now_ovrd : 12; /**< [ 24: 13](R/W) Coeff Adaptation deadband now override. */
+ uint64_t vga_deadband_now_ovrd_en : 1;/**< [ 12: 12](R/W) Enable use of [VGA_DEADBAND_NOW_OVRD]. */
+ uint64_t vga_deadband_now_ovrd : 12; /**< [ 11: 0](R/W) VGA adaptation deadband now override. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_deadband_now_ovrd : 12; /**< [ 11: 0](R/W) VGA adaptation deadband now override. */
+ uint64_t vga_deadband_now_ovrd_en : 1;/**< [ 12: 12](R/W) Enable use of [VGA_DEADBAND_NOW_OVRD]. */
+ uint64_t dfe_deadband_now_ovrd : 12; /**< [ 24: 13](R/W) Coeff Adaptation deadband now override. */
+ uint64_t dfe_deadband_now_ovrd_en : 1;/**< [ 25: 25](R/W) Enable use of [DFE_DEADBAND_NOW_OVRD]. */
+ uint64_t ctle_deadband_now_ovrd : 12;/**< [ 37: 26](R/W) CTLE adaptation deadband now override. */
+ uint64_t ctle_deadband_now_ovrd_en : 1;/**< [ 38: 38](R/W) Enable use of [CTLE_DEADBAND_NOW_OVRD]. */
+ uint64_t ctlez_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) CTLEZ adaptation deadband now override. */
+ uint64_t ctlez_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [CTLEZ_DEADBAND_NOW_OVRD]. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_16_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_16_bcfg bdk_gsernx_lanex_rx_16_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_16_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_16_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d60ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_16_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) bdk_gsernx_lanex_rx_16_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) "GSERNX_LANEX_RX_16_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_17_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 17
+ * Override registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_17_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_17_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t blwc_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [BLWC_DEADBAND_NOW_OVRD]. */
+ uint64_t blwc_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) BLWC adaptation deadband now override. */
+ uint64_t dfe_c1_deadband_now_ovrd_en : 1;/**< [ 38: 38](R/W) Enable use of [DFE_C1_DEADBAND_NOW_OVRD]. */
+ uint64_t dfe_c1_deadband_now_ovrd : 12;/**< [ 37: 26](R/W) DFE C1 Adaptation deadband now override. */
+ uint64_t afeos_deadband_now_ovrd_en : 1;/**< [ 25: 25](R/W) Enable use of [AFEOS_DEADBAND_NOW_OVRD]. */
+ uint64_t afeos_deadband_now_ovrd : 12;/**< [ 24: 13](R/W) AFE OS adaptation deadband now override. */
+ uint64_t ctlelte_deadband_now_ovrd_en : 1;/**< [ 12: 12](R/W) Enable use of [CTLELTE_DEADBAND_NOW_OVRD]. */
+ uint64_t ctlelte_deadband_now_ovrd : 12;/**< [ 11: 0](R/W) CTLELTE adaptation deadband now override. */
+#else /* Word 0 - Little Endian */
+ uint64_t ctlelte_deadband_now_ovrd : 12;/**< [ 11: 0](R/W) CTLELTE adaptation deadband now override. */
+ uint64_t ctlelte_deadband_now_ovrd_en : 1;/**< [ 12: 12](R/W) Enable use of [CTLELTE_DEADBAND_NOW_OVRD]. */
+ uint64_t afeos_deadband_now_ovrd : 12;/**< [ 24: 13](R/W) AFE OS adaptation deadband now override. */
+ uint64_t afeos_deadband_now_ovrd_en : 1;/**< [ 25: 25](R/W) Enable use of [AFEOS_DEADBAND_NOW_OVRD]. */
+ uint64_t dfe_c1_deadband_now_ovrd : 12;/**< [ 37: 26](R/W) DFE C1 Adaptation deadband now override. */
+ uint64_t dfe_c1_deadband_now_ovrd_en : 1;/**< [ 38: 38](R/W) Enable use of [DFE_C1_DEADBAND_NOW_OVRD]. */
+ uint64_t blwc_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) BLWC adaptation deadband now override. */
+ uint64_t blwc_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [BLWC_DEADBAND_NOW_OVRD]. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_17_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_17_bcfg bdk_gsernx_lanex_rx_17_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_17_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_17_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d70ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_17_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) bdk_gsernx_lanex_rx_17_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) "GSERNX_LANEX_RX_17_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_18_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 18
+ * Override registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_18_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_18_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t blwc_subrate_now_ovrd_en : 1;/**< [ 50: 50](R/W) Enable use of [BLWC_SUBRATE_NOW_OVRD]. */
+ uint64_t afeos_subrate_now_ovrd_en : 1;/**< [ 49: 49](R/W) Enable use of [AFEOS_SUBRATE_NOW_OVRD]. */
+ uint64_t subrate_now_ovrd_en : 1; /**< [ 48: 48](R/W) Enable use of [SUBRATE_NOW_OVRD]. */
+ uint64_t blwc_subrate_now_ovrd : 16; /**< [ 47: 32](R/W) BLWC Subrate_Now counter override value. */
+ uint64_t afeos_subrate_now_ovrd : 16;/**< [ 31: 16](R/W) AFEOS Subrate_Now counter override value. */
+ uint64_t subrate_now_ovrd : 16; /**< [ 15: 0](R/W) Subrate_Now counter override value. */
+#else /* Word 0 - Little Endian */
+ uint64_t subrate_now_ovrd : 16; /**< [ 15: 0](R/W) Subrate_Now counter override value. */
+ uint64_t afeos_subrate_now_ovrd : 16;/**< [ 31: 16](R/W) AFEOS Subrate_Now counter override value. */
+ uint64_t blwc_subrate_now_ovrd : 16; /**< [ 47: 32](R/W) BLWC Subrate_Now counter override value. */
+ uint64_t subrate_now_ovrd_en : 1; /**< [ 48: 48](R/W) Enable use of [SUBRATE_NOW_OVRD]. */
+ uint64_t afeos_subrate_now_ovrd_en : 1;/**< [ 49: 49](R/W) Enable use of [AFEOS_SUBRATE_NOW_OVRD]. */
+ uint64_t blwc_subrate_now_ovrd_en : 1;/**< [ 50: 50](R/W) Enable use of [BLWC_SUBRATE_NOW_OVRD]. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_18_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_18_bcfg bdk_gsernx_lanex_rx_18_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_18_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_18_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_18_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) bdk_gsernx_lanex_rx_18_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) "GSERNX_LANEX_RX_18_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_19_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 19
+ * Configuration registers for AFE Offset Adaptation.
+ */
+union bdk_gsernx_lanex_rx_19_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_19_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t blwc_leak_sgn : 1; /**< [ 56: 56](R/W) BLWC leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t blwc_updn_len : 5; /**< [ 55: 51](R/W) Accumulation length for BLWC drift up/down control. Range is 1 to 20. */
+ uint64_t blwc_deadband : 12; /**< [ 50: 39](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t blwc_deadband_inc : 12; /**< [ 38: 27](R/W) BLWC adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t blwc_leak : 3; /**< [ 26: 24](R/W) BLWC adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t blwc_mu : 3; /**< [ 18: 16](R/W) BLWC adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_15 : 1;
+ uint64_t blwc_timer_max : 15; /**< [ 14: 0](R/W) BLWC adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t blwc_timer_max : 15; /**< [ 14: 0](R/W) BLWC adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t reserved_15 : 1;
+ uint64_t blwc_mu : 3; /**< [ 18: 16](R/W) BLWC adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t blwc_leak : 3; /**< [ 26: 24](R/W) BLWC adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t blwc_deadband_inc : 12; /**< [ 38: 27](R/W) BLWC adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t blwc_deadband : 12; /**< [ 50: 39](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t blwc_updn_len : 5; /**< [ 55: 51](R/W) Accumulation length for BLWC drift up/down control. Range is 1 to 20. */
+ uint64_t blwc_leak_sgn : 1; /**< [ 56: 56](R/W) BLWC leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_19_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_19_bcfg bdk_gsernx_lanex_rx_19_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_19_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_19_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d90ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_19_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) bdk_gsernx_lanex_rx_19_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) "GSERNX_LANEX_RX_19_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_1_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 1
+ * Register controls for postcursor overrides from c10 through c15, and BLWC gain.
+ * Each override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t prevga_gn_ovrd_en : 1; /**< [ 56: 56](R/W) Enable use of [PREVGA_GN_OVRD]. */
+ uint64_t prevga_gn_ovrd : 3; /**< [ 55: 53](R/W) PREVGA_GN gain value override. */
+ uint64_t blwc_ovrd_en : 1; /**< [ 52: 52](R/W) Enable use of [BLWC_OVRD]. */
+ uint64_t blwc_ovrd : 5; /**< [ 51: 47](R/W) BLWC gain value override. */
+ uint64_t c15_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C15_OVRD]. */
+ uint64_t c15_ovrd : 6; /**< [ 45: 40](R/W) 15th postcursor value override. */
+ uint64_t reserved_39 : 1;
+ uint64_t c14_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C14_OVRD]. */
+ uint64_t c14_ovrd : 6; /**< [ 37: 32](R/W) 14th postcursor value override. */
+ uint64_t reserved_31 : 1;
+ uint64_t c13_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C13_OVRD]. */
+ uint64_t c13_ovrd : 6; /**< [ 29: 24](R/W) 13th postcursor value override. */
+ uint64_t reserved_23 : 1;
+ uint64_t c12_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C12_OVRD]. */
+ uint64_t c12_ovrd : 6; /**< [ 21: 16](R/W) 12th postcursor value override. */
+ uint64_t reserved_15 : 1;
+ uint64_t c11_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C11_OVRD]. */
+ uint64_t c11_ovrd : 6; /**< [ 13: 8](R/W) 11th postcursor value override. */
+ uint64_t reserved_7 : 1;
+ uint64_t c10_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C10_OVRD]. */
+ uint64_t c10_ovrd : 6; /**< [ 5: 0](R/W) 10th postcursor value override. */
+#else /* Word 0 - Little Endian */
+ uint64_t c10_ovrd : 6; /**< [ 5: 0](R/W) 10th postcursor value override. */
+ uint64_t c10_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C10_OVRD]. */
+ uint64_t reserved_7 : 1;
+ uint64_t c11_ovrd : 6; /**< [ 13: 8](R/W) 11th postcursor value override. */
+ uint64_t c11_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C11_OVRD]. */
+ uint64_t reserved_15 : 1;
+ uint64_t c12_ovrd : 6; /**< [ 21: 16](R/W) 12th postcursor value override. */
+ uint64_t c12_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C12_OVRD]. */
+ uint64_t reserved_23 : 1;
+ uint64_t c13_ovrd : 6; /**< [ 29: 24](R/W) 13th postcursor value override. */
+ uint64_t c13_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C13_OVRD]. */
+ uint64_t reserved_31 : 1;
+ uint64_t c14_ovrd : 6; /**< [ 37: 32](R/W) 14th postcursor value override. */
+ uint64_t c14_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C14_OVRD]. */
+ uint64_t reserved_39 : 1;
+ uint64_t c15_ovrd : 6; /**< [ 45: 40](R/W) 15th postcursor value override. */
+ uint64_t c15_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C15_OVRD]. */
+ uint64_t blwc_ovrd : 5; /**< [ 51: 47](R/W) BLWC gain value override. */
+ uint64_t blwc_ovrd_en : 1; /**< [ 52: 52](R/W) Enable use of [BLWC_OVRD]. */
+ uint64_t prevga_gn_ovrd : 3; /**< [ 55: 53](R/W) PREVGA_GN gain value override. */
+ uint64_t prevga_gn_ovrd_en : 1; /**< [ 56: 56](R/W) Enable use of [PREVGA_GN_OVRD]. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_1_bcfg bdk_gsernx_lanex_rx_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000c70ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) bdk_gsernx_lanex_rx_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) "GSERNX_LANEX_RX_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_1_bsts
+ *
+ * GSER Lane RX Base Status Register 1
+ * Status registers for postcursor values (either calibration results or
+ * overrides) from c10 through c15. Values in this register are only valid
+ * if GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted (indicating DFE adaptation
+ * has completed), or if the corresponding CSR override enable is asserted.
+ */
+union bdk_gsernx_lanex_rx_1_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_1_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t c15 : 6; /**< [ 45: 40](RO/H) 15th postcursor value. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c14 : 6; /**< [ 37: 32](RO/H) 14th postcursor value. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c13 : 6; /**< [ 29: 24](RO/H) 13th postcursor value. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c12 : 6; /**< [ 21: 16](RO/H) 12th postcursor value. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c11 : 6; /**< [ 13: 8](RO/H) 11th postcursor value. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c10 : 6; /**< [ 5: 0](RO/H) 10th postcursor value. */
+#else /* Word 0 - Little Endian */
+ uint64_t c10 : 6; /**< [ 5: 0](RO/H) 10th postcursor value. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c11 : 6; /**< [ 13: 8](RO/H) 11th postcursor value. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c12 : 6; /**< [ 21: 16](RO/H) 12th postcursor value. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c13 : 6; /**< [ 29: 24](RO/H) 13th postcursor value. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c14 : 6; /**< [ 37: 32](RO/H) 14th postcursor value. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c15 : 6; /**< [ 45: 40](RO/H) 15th postcursor value. */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_1_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_1_bsts bdk_gsernx_lanex_rx_1_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_1_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_1_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001660ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_1_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) bdk_gsernx_lanex_rx_1_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) "GSERNX_LANEX_RX_1_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_20_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 20
+ * Configuration registers for BLWC LMS adaptation
+ * Adaptation controls for Subrate parameters.
+ */
+union bdk_gsernx_lanex_rx_20_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_20_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t blwc_subrate_scale : 3; /**< [ 34: 32](R/W) BLWC subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t blwc_subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t blwc_subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t blwc_subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t blwc_subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t blwc_subrate_scale : 3; /**< [ 34: 32](R/W) BLWC subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_20_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_20_bcfg bdk_gsernx_lanex_rx_20_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_20_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_20_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000da0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_20_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) bdk_gsernx_lanex_rx_20_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) "GSERNX_LANEX_RX_20_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_21_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 20
+ * Configuration registers for PREVGA_GN LMS adaptation
+ * Adaptation controls for Subrate parameters.
+ */
+union bdk_gsernx_lanex_rx_21_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_21_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t prevga_gn_subrate_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [PREVGA_GN_SUBRATE_NOW_OVRD]. */
+ uint64_t prevga_gn_subrate_now_ovrd : 16;/**< [ 50: 35](R/W) PREVGA_GN Subrate_Now counter override value. */
+ uint64_t prevga_gn_subrate_scale : 3;/**< [ 34: 32](R/W) PREVGA_GN subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t prevga_gn_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t prevga_gn_subrate_fin : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t prevga_gn_subrate_fin : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t prevga_gn_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t prevga_gn_subrate_scale : 3;/**< [ 34: 32](R/W) PREVGA_GN subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t prevga_gn_subrate_now_ovrd : 16;/**< [ 50: 35](R/W) PREVGA_GN Subrate_Now counter override value. */
+ uint64_t prevga_gn_subrate_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [PREVGA_GN_SUBRATE_NOW_OVRD]. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_21_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_21_bcfg bdk_gsernx_lanex_rx_21_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_21_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_21_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000db0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_21_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) bdk_gsernx_lanex_rx_21_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) "GSERNX_LANEX_RX_21_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_22_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 22
+ * Override registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_22_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_22_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t prevga_gn_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [PREVGA_GN_DEADBAND_NOW_OVRD]. */
+ uint64_t prevga_gn_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) PREVGA_GN adaptation deadband now override. */
+ uint64_t reserved_0_38 : 39;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_38 : 39;
+ uint64_t prevga_gn_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) PREVGA_GN adaptation deadband now override. */
+ uint64_t prevga_gn_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [PREVGA_GN_DEADBAND_NOW_OVRD]. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_22_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_22_bcfg bdk_gsernx_lanex_rx_22_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_22_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_22_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000dc0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_22_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) bdk_gsernx_lanex_rx_22_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) "GSERNX_LANEX_RX_22_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_23_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 23
+ * Configuration registers for PREVGA_GN gain adaptation.
+ */
+union bdk_gsernx_lanex_rx_23_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_23_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t prevga_gn_leak_sgn : 1; /**< [ 51: 51](R/W) PREVGA_GN leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t prevga_gn_deadband : 12; /**< [ 50: 39](R/W) PREVGA_GN adaptation deadband settings. Typically a value less than 0x0FF is used. */
+ uint64_t prevga_gn_deadband_inc : 12;/**< [ 38: 27](R/W) PREVGA_GN adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t prevga_gn_leak : 3; /**< [ 26: 24](R/W) PREVGA_GN adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t prevga_gn_mu : 3; /**< [ 18: 16](R/W) PREVGA_GN adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_15 : 1;
+ uint64_t prevga_gn_timer_max : 15; /**< [ 14: 0](R/W) PREVGA_GN adaptation timer maximum count value. */
+#else /* Word 0 - Little Endian */
+ uint64_t prevga_gn_timer_max : 15; /**< [ 14: 0](R/W) PREVGA_GN adaptation timer maximum count value. */
+ uint64_t reserved_15 : 1;
+ uint64_t prevga_gn_mu : 3; /**< [ 18: 16](R/W) PREVGA_GN adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t prevga_gn_leak : 3; /**< [ 26: 24](R/W) PREVGA_GN adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t prevga_gn_deadband_inc : 12;/**< [ 38: 27](R/W) PREVGA_GN adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t prevga_gn_deadband : 12; /**< [ 50: 39](R/W) PREVGA_GN adaptation deadband settings. Typically a value less than 0x0FF is used. */
+ uint64_t prevga_gn_leak_sgn : 1; /**< [ 51: 51](R/W) PREVGA_GN leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_23_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_23_bcfg bdk_gsernx_lanex_rx_23_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_23_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_23_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000dd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_23_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) bdk_gsernx_lanex_rx_23_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) "GSERNX_LANEX_RX_23_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_24_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 24
+ * Configuration registers for DFE offset compensation timer.
+ */
+union bdk_gsernx_lanex_rx_24_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_24_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dfe_oscomp_timer_en : 1; /**< [ 63: 63](R/W) Enable for DFE offset compensation timer. When set, allows DFE offset
+ compensation timer to trigger DFE offset compensation upon timer expiration. */
+ uint64_t reserved_32_62 : 31;
+ uint64_t dfe_oscomp_timer_max : 32; /**< [ 31: 0](R/W) Maximum value of the DFE offset compensation Timer. When the timer reaches the
+ value set by this field, the DFE offset compensation process is triggered. Also,
+ when the timer reaches this value, the timer is reset to zero and allowed to
+ begin counting again. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_oscomp_timer_max : 32; /**< [ 31: 0](R/W) Maximum value of the DFE offset compensation Timer. When the timer reaches the
+ value set by this field, the DFE offset compensation process is triggered. Also,
+ when the timer reaches this value, the timer is reset to zero and allowed to
+ begin counting again. */
+ uint64_t reserved_32_62 : 31;
+ uint64_t dfe_oscomp_timer_en : 1; /**< [ 63: 63](R/W) Enable for DFE offset compensation timer. When set, allows DFE offset
+ compensation timer to trigger DFE offset compensation upon timer expiration. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_24_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_24_bcfg bdk_gsernx_lanex_rx_24_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_24_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_24_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000de0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_24_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) bdk_gsernx_lanex_rx_24_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) "GSERNX_LANEX_RX_24_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_2_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 2
+ * Register controls for first postcursor overrides of even/odd paths. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t c1_1e_ovrd_en : 1; /**< [ 62: 62](R/W) Enable use of [C1_1E_OVRD]. */
+ uint64_t c1_1e_ovrd : 6; /**< [ 61: 56](R/W) First postcursor value on odd E path override. */
+ uint64_t reserved_55 : 1;
+ uint64_t c1_0e_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [C1_0E_OVRD]. */
+ uint64_t c1_0e_ovrd : 6; /**< [ 53: 48](R/W) First postcursor value on even E path override. */
+ uint64_t reserved_47 : 1;
+ uint64_t c1_1x_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C1_1X_OVRD]. */
+ uint64_t c1_1x_ovrd : 6; /**< [ 45: 40](R/W) First postcursor value on odd X path override. */
+ uint64_t reserved_39 : 1;
+ uint64_t c1_0x_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C1_0X_OVRD]. */
+ uint64_t c1_0x_ovrd : 6; /**< [ 37: 32](R/W) First postcursor value on even X path override. */
+ uint64_t reserved_31 : 1;
+ uint64_t c1_1i_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C1_1I_OVRD]. */
+ uint64_t c1_1i_ovrd : 6; /**< [ 29: 24](R/W) First postcursor value on odd I path override. */
+ uint64_t reserved_23 : 1;
+ uint64_t c1_0i_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C1_0I_OVRD]. */
+ uint64_t c1_0i_ovrd : 6; /**< [ 21: 16](R/W) First postcursor value on even I path override. */
+ uint64_t reserved_15 : 1;
+ uint64_t c1_1q_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C1_1Q_OVRD]. */
+ uint64_t c1_1q_ovrd : 6; /**< [ 13: 8](R/W) First postcursor value on odd Q path override. */
+ uint64_t reserved_7 : 1;
+ uint64_t c1_0q_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C1_0Q_OVRD]. */
+ uint64_t c1_0q_ovrd : 6; /**< [ 5: 0](R/W) First postcursor value on even Q path override. */
+#else /* Word 0 - Little Endian */
+ uint64_t c1_0q_ovrd : 6; /**< [ 5: 0](R/W) First postcursor value on even Q path override. */
+ uint64_t c1_0q_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C1_0Q_OVRD]. */
+ uint64_t reserved_7 : 1;
+ uint64_t c1_1q_ovrd : 6; /**< [ 13: 8](R/W) First postcursor value on odd Q path override. */
+ uint64_t c1_1q_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C1_1Q_OVRD]. */
+ uint64_t reserved_15 : 1;
+ uint64_t c1_0i_ovrd : 6; /**< [ 21: 16](R/W) First postcursor value on even I path override. */
+ uint64_t c1_0i_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C1_0I_OVRD]. */
+ uint64_t reserved_23 : 1;
+ uint64_t c1_1i_ovrd : 6; /**< [ 29: 24](R/W) First postcursor value on odd I path override. */
+ uint64_t c1_1i_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C1_1I_OVRD]. */
+ uint64_t reserved_31 : 1;
+ uint64_t c1_0x_ovrd : 6; /**< [ 37: 32](R/W) First postcursor value on even X path override. */
+ uint64_t c1_0x_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C1_0X_OVRD]. */
+ uint64_t reserved_39 : 1;
+ uint64_t c1_1x_ovrd : 6; /**< [ 45: 40](R/W) First postcursor value on odd X path override. */
+ uint64_t c1_1x_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C1_1X_OVRD]. */
+ uint64_t reserved_47 : 1;
+ uint64_t c1_0e_ovrd : 6; /**< [ 53: 48](R/W) First postcursor value on even E path override. */
+ uint64_t c1_0e_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [C1_0E_OVRD]. */
+ uint64_t reserved_55 : 1;
+ uint64_t c1_1e_ovrd : 6; /**< [ 61: 56](R/W) First postcursor value on odd E path override. */
+ uint64_t c1_1e_ovrd_en : 1; /**< [ 62: 62](R/W) Enable use of [C1_1E_OVRD]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_2_bcfg bdk_gsernx_lanex_rx_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000c80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) bdk_gsernx_lanex_rx_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) "GSERNX_LANEX_RX_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_2_bsts
+ *
+ * GSER Lane RX Base Status Register 2
+ * Status registers for first postcursor values (either calibration
+ * results or overrides) of even/odd paths. Values in this register are
+ * only valid if GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted (indicating DFE
+ * adaptation has completed), or if the corresponding CSR override enable
+ * is asserted.
+ */
+union bdk_gsernx_lanex_rx_2_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_2_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t c1_1e : 6; /**< [ 61: 56](RO/H) First postcursor value on odd E path. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t c1_0e : 6; /**< [ 53: 48](RO/H) First postcursor value on even E path. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c1_1x : 6; /**< [ 45: 40](RO/H) First postcursor value on odd X path. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c1_0x : 6; /**< [ 37: 32](RO/H) First postcursor value on even X path. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c1_1i : 6; /**< [ 29: 24](RO/H) First postcursor value on odd I path. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c1_0i : 6; /**< [ 21: 16](RO/H) First postcursor value on even I path. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c1_1q : 6; /**< [ 13: 8](RO/H) First postcursor value on odd Q path. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c1_0q : 6; /**< [ 5: 0](RO/H) First postcursor value on even Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t c1_0q : 6; /**< [ 5: 0](RO/H) First postcursor value on even Q path. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c1_1q : 6; /**< [ 13: 8](RO/H) First postcursor value on odd Q path. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c1_0i : 6; /**< [ 21: 16](RO/H) First postcursor value on even I path. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c1_1i : 6; /**< [ 29: 24](RO/H) First postcursor value on odd I path. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c1_0x : 6; /**< [ 37: 32](RO/H) First postcursor value on even X path. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c1_1x : 6; /**< [ 45: 40](RO/H) First postcursor value on odd X path. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c1_0e : 6; /**< [ 53: 48](RO/H) First postcursor value on even E path. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t c1_1e : 6; /**< [ 61: 56](RO/H) First postcursor value on odd E path. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_2_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_2_bsts bdk_gsernx_lanex_rx_2_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_2_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_2_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001670ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_2_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) bdk_gsernx_lanex_rx_2_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) "GSERNX_LANEX_RX_2_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_3_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 3
+ * Register controls for calibration muxes and switch enable overrides.
+ * Some bit is this register are override controls (*_OVRD). Each
+ * override setting has a corresponding enable which will cause the
+ * calibration logic to use the override register setting instead of the
+ * calibration result.
+ */
+union bdk_gsernx_lanex_rx_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t cali1_odd_ovrd_en : 1; /**< [ 59: 59](R/W) Enable use of [CALI1_ODD_OVRD]. */
+ uint64_t cali1_even_ovrd_en : 1; /**< [ 58: 58](R/W) Enable use of [CALI1_EVEN_OVRD]. */
+ uint64_t cali0_odd_ovrd_en : 1; /**< [ 57: 57](R/W) Enable use of [CALI0_ODD_OVRD]. */
+ uint64_t cali0_even_ovrd_en : 1; /**< [ 56: 56](R/W) Enable use of [CALI0_EVEN_OVRD]. */
+ uint64_t cali1_odd_ovrd : 8; /**< [ 55: 48](R/W) Input calibration switch enable for speculation path 1
+ in odd paths override. */
+ uint64_t cali1_even_ovrd : 8; /**< [ 47: 40](R/W) Input calibration switch enable for speculation path 1
+ in even paths override. */
+ uint64_t cali0_odd_ovrd : 8; /**< [ 39: 32](R/W) Input calibration switch enable for speculation path 0
+ in odd paths override. */
+ uint64_t cali0_even_ovrd : 8; /**< [ 31: 24](R/W) Input calibration switch enable for speculation path 0
+ in even paths override. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t calsel_odd_ovrd_en : 1; /**< [ 19: 19](R/W) Enable use of [CALSEL_ODD_OVRD]. */
+ uint64_t calsel_even_ovrd_en : 1; /**< [ 18: 18](R/W) Enable use of [CALSEL_EVEN_OVRD]. */
+ uint64_t calo_odd_ovrd_en : 1; /**< [ 17: 17](R/W) Enable use of [CALO_ODD_OVRD]. */
+ uint64_t calo_even_ovrd_en : 1; /**< [ 16: 16](R/W) Enable use of [CALO_EVEN_OVRD]. */
+ uint64_t calsel_odd_ovrd : 4; /**< [ 15: 12](R/W) Odd calibration speculation mux override value. */
+ uint64_t calsel_even_ovrd : 4; /**< [ 11: 8](R/W) Even calibration speculation mux override value. */
+ uint64_t calo_odd_ovrd : 4; /**< [ 7: 4](R/W) Odd Slicer output calibration mux control override value. */
+ uint64_t calo_even_ovrd : 4; /**< [ 3: 0](R/W) Even Slicer output calibration mux control override value. */
+#else /* Word 0 - Little Endian */
+ uint64_t calo_even_ovrd : 4; /**< [ 3: 0](R/W) Even Slicer output calibration mux control override value. */
+ uint64_t calo_odd_ovrd : 4; /**< [ 7: 4](R/W) Odd Slicer output calibration mux control override value. */
+ uint64_t calsel_even_ovrd : 4; /**< [ 11: 8](R/W) Even calibration speculation mux override value. */
+ uint64_t calsel_odd_ovrd : 4; /**< [ 15: 12](R/W) Odd calibration speculation mux override value. */
+ uint64_t calo_even_ovrd_en : 1; /**< [ 16: 16](R/W) Enable use of [CALO_EVEN_OVRD]. */
+ uint64_t calo_odd_ovrd_en : 1; /**< [ 17: 17](R/W) Enable use of [CALO_ODD_OVRD]. */
+ uint64_t calsel_even_ovrd_en : 1; /**< [ 18: 18](R/W) Enable use of [CALSEL_EVEN_OVRD]. */
+ uint64_t calsel_odd_ovrd_en : 1; /**< [ 19: 19](R/W) Enable use of [CALSEL_ODD_OVRD]. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t cali0_even_ovrd : 8; /**< [ 31: 24](R/W) Input calibration switch enable for speculation path 0
+ in even paths override. */
+ uint64_t cali0_odd_ovrd : 8; /**< [ 39: 32](R/W) Input calibration switch enable for speculation path 0
+ in odd paths override. */
+ uint64_t cali1_even_ovrd : 8; /**< [ 47: 40](R/W) Input calibration switch enable for speculation path 1
+ in even paths override. */
+ uint64_t cali1_odd_ovrd : 8; /**< [ 55: 48](R/W) Input calibration switch enable for speculation path 1
+ in odd paths override. */
+ uint64_t cali0_even_ovrd_en : 1; /**< [ 56: 56](R/W) Enable use of [CALI0_EVEN_OVRD]. */
+ uint64_t cali0_odd_ovrd_en : 1; /**< [ 57: 57](R/W) Enable use of [CALI0_ODD_OVRD]. */
+ uint64_t cali1_even_ovrd_en : 1; /**< [ 58: 58](R/W) Enable use of [CALI1_EVEN_OVRD]. */
+ uint64_t cali1_odd_ovrd_en : 1; /**< [ 59: 59](R/W) Enable use of [CALI1_ODD_OVRD]. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_3_bcfg bdk_gsernx_lanex_rx_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000c90ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) bdk_gsernx_lanex_rx_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) "GSERNX_LANEX_RX_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_3_bsts
+ *
+ * GSER Lane RX Base Status Register 3
+ * Status registers for calibration muxes and switch enables (either
+ * calibration results ors). Values in this register are only valid if
+ * GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted (indicating DFE adaptation has
+ * completed), or if the corresponding CSR override enable is asserted.
+ */
+union bdk_gsernx_lanex_rx_3_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_3_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t cali1_odd : 8; /**< [ 55: 48](RO/H) Input calibration switch enable for speculation path 1
+ in odd paths. */
+ uint64_t cali1_even : 8; /**< [ 47: 40](RO/H) Input calibration switch enable for speculation path 1
+ in even paths. */
+ uint64_t cali0_odd : 8; /**< [ 39: 32](RO/H) Input calibration switch enable for speculation path 0
+ in odd paths. */
+ uint64_t cali0_even : 8; /**< [ 31: 24](RO/H) Input calibration switch enable for speculation path 0
+ in even paths. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t calsel_odd : 4; /**< [ 15: 12](RO/H) Odd calibration speculation mux. */
+ uint64_t calsel_even : 4; /**< [ 11: 8](RO/H) Even calibration speculation mux. */
+ uint64_t calo_odd : 4; /**< [ 7: 4](RO/H) Odd slicer output calibration mux control. */
+ uint64_t calo_even : 4; /**< [ 3: 0](RO/H) Even slicer output calibration mux control. */
+#else /* Word 0 - Little Endian */
+ uint64_t calo_even : 4; /**< [ 3: 0](RO/H) Even slicer output calibration mux control. */
+ uint64_t calo_odd : 4; /**< [ 7: 4](RO/H) Odd slicer output calibration mux control. */
+ uint64_t calsel_even : 4; /**< [ 11: 8](RO/H) Even calibration speculation mux. */
+ uint64_t calsel_odd : 4; /**< [ 15: 12](RO/H) Odd calibration speculation mux. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t cali0_even : 8; /**< [ 31: 24](RO/H) Input calibration switch enable for speculation path 0
+ in even paths. */
+ uint64_t cali0_odd : 8; /**< [ 39: 32](RO/H) Input calibration switch enable for speculation path 0
+ in odd paths. */
+ uint64_t cali1_even : 8; /**< [ 47: 40](RO/H) Input calibration switch enable for speculation path 1
+ in even paths. */
+ uint64_t cali1_odd : 8; /**< [ 55: 48](RO/H) Input calibration switch enable for speculation path 1
+ in odd paths. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_3_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_3_bsts bdk_gsernx_lanex_rx_3_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_3_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_3_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001680ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_3_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) bdk_gsernx_lanex_rx_3_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) "GSERNX_LANEX_RX_3_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_4_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 4
+ * Register controls for VGA, CTLE, and OS_AFE overrides.
+ * Some bit is this register are override controls (*_OVRD). Each
+ * override setting has a corresponding enable which will cause the
+ * calibration logic to use the override register setting instead of the
+ * calibration result.
+ */
+union bdk_gsernx_lanex_rx_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t edgesel_even_ovrd_en : 1; /**< [ 61: 61](R/W) Enable use of [EDGESEL_EVEN_OVRD]. */
+ uint64_t edgesel_even_ovrd : 1; /**< [ 60: 60](R/W) EDGESEL_EVEN override value. */
+ uint64_t edgesel_odd_ovrd_en : 1; /**< [ 59: 59](R/W) Enable use of [EDGESEL_ODD_OVRD]. */
+ uint64_t edgesel_odd_ovrd : 1; /**< [ 58: 58](R/W) EDGESEL_ODD override value. */
+ uint64_t en_os_afe_ovrd_en : 1; /**< [ 57: 57](R/W) Enable use of [EN_OS_AFE_OVRD]. */
+ uint64_t en_os_afe_ovrd : 1; /**< [ 56: 56](R/W) OS_AFE_EN override value. */
+ uint64_t reserved_55 : 1;
+ uint64_t os_afe_odd_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [OS_AFE_ODD_OVRD]. */
+ uint64_t os_afe_odd_ovrd : 6; /**< [ 53: 48](R/W) OS_AFE_ODD offset override value. */
+ uint64_t reserved_47 : 1;
+ uint64_t os_afe_even_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [OS_AFE_EVEN_OVRD]. */
+ uint64_t os_afe_even_ovrd : 6; /**< [ 45: 40](R/W) OS_AFE_EVEN offset override value. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t ctle_lte_zero_ovrd_en : 1; /**< [ 36: 36](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t ctle_lte_zero_ovrd : 4; /**< [ 35: 32](R/W) CTLE LTE zero frequency override value. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ctle_lte_gain_ovrd_en : 1; /**< [ 28: 28](R/W) Enable use of [CTLE_LTE_GAIN_OVRD]. */
+ uint64_t ctle_lte_gain_ovrd : 4; /**< [ 27: 24](R/W) CTLE LTE DC gain override value. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ctle_zero_ovrd_en : 1; /**< [ 20: 20](R/W) Enable use of [CTLE_ZERO_OVRD]. */
+ uint64_t ctle_zero_ovrd : 4; /**< [ 19: 16](R/W) CTLE zero frequency override value. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t ctle_gain_ovrd_en : 1; /**< [ 12: 12](R/W) Enable use of [CTLE_GAIN_OVRD]. */
+ uint64_t ctle_gain_ovrd : 4; /**< [ 11: 8](R/W) CTLE DC gain override value. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t vga_gain_ovrd_en : 1; /**< [ 4: 4](R/W) Enable use of [VGA_GAIN_OVRD]. */
+ uint64_t vga_gain_ovrd : 4; /**< [ 3: 0](R/W) VGA DC gain override value. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_gain_ovrd : 4; /**< [ 3: 0](R/W) VGA DC gain override value. */
+ uint64_t vga_gain_ovrd_en : 1; /**< [ 4: 4](R/W) Enable use of [VGA_GAIN_OVRD]. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t ctle_gain_ovrd : 4; /**< [ 11: 8](R/W) CTLE DC gain override value. */
+ uint64_t ctle_gain_ovrd_en : 1; /**< [ 12: 12](R/W) Enable use of [CTLE_GAIN_OVRD]. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t ctle_zero_ovrd : 4; /**< [ 19: 16](R/W) CTLE zero frequency override value. */
+ uint64_t ctle_zero_ovrd_en : 1; /**< [ 20: 20](R/W) Enable use of [CTLE_ZERO_OVRD]. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ctle_lte_gain_ovrd : 4; /**< [ 27: 24](R/W) CTLE LTE DC gain override value. */
+ uint64_t ctle_lte_gain_ovrd_en : 1; /**< [ 28: 28](R/W) Enable use of [CTLE_LTE_GAIN_OVRD]. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ctle_lte_zero_ovrd : 4; /**< [ 35: 32](R/W) CTLE LTE zero frequency override value. */
+ uint64_t ctle_lte_zero_ovrd_en : 1; /**< [ 36: 36](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t os_afe_even_ovrd : 6; /**< [ 45: 40](R/W) OS_AFE_EVEN offset override value. */
+ uint64_t os_afe_even_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [OS_AFE_EVEN_OVRD]. */
+ uint64_t reserved_47 : 1;
+ uint64_t os_afe_odd_ovrd : 6; /**< [ 53: 48](R/W) OS_AFE_ODD offset override value. */
+ uint64_t os_afe_odd_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [OS_AFE_ODD_OVRD]. */
+ uint64_t reserved_55 : 1;
+ uint64_t en_os_afe_ovrd : 1; /**< [ 56: 56](R/W) OS_AFE_EN override value. */
+ uint64_t en_os_afe_ovrd_en : 1; /**< [ 57: 57](R/W) Enable use of [EN_OS_AFE_OVRD]. */
+ uint64_t edgesel_odd_ovrd : 1; /**< [ 58: 58](R/W) EDGESEL_ODD override value. */
+ uint64_t edgesel_odd_ovrd_en : 1; /**< [ 59: 59](R/W) Enable use of [EDGESEL_ODD_OVRD]. */
+ uint64_t edgesel_even_ovrd : 1; /**< [ 60: 60](R/W) EDGESEL_EVEN override value. */
+ uint64_t edgesel_even_ovrd_en : 1; /**< [ 61: 61](R/W) Enable use of [EDGESEL_EVEN_OVRD]. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_4_bcfg bdk_gsernx_lanex_rx_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ca0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) bdk_gsernx_lanex_rx_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) "GSERNX_LANEX_RX_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_4_bsts
+ *
+ * GSER Lane RX Base Status Register 4
+ * Status registers for VGA, CTLE, and OS_AFE values
+ * (either calibration results ors).
+ */
+union bdk_gsernx_lanex_rx_4_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_4_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t blwc : 5; /**< [ 63: 59](RO/H) BLWC. This field is only valid if GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS]
+ is deasserted (indicating BLWC adaptation has completed), or if the
+ corresponding CSR override enable is asserted. */
+ uint64_t reserved_57_58 : 2;
+ uint64_t en_os_afe : 1; /**< [ 56: 56](RO/H) AFE offset compensation enable value in-use. This field is only
+ valid if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE
+ offset adaptation has completed), or if the corresponding CSR
+ override enable is asserted. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os_afe_odd : 6; /**< [ 53: 48](RO/H) AFE odd offset compensation value in-use. This field is only valid
+ if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE offset
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os_afe_even : 6; /**< [ 45: 40](RO/H) AFE even offset compensation value in-use. This field is only valid
+ if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE offset
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t ctle_lte_zero : 4; /**< [ 35: 32](RO/H) CTLE LTE zero frequency. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ctle_lte_gain : 4; /**< [ 27: 24](RO/H) CTLE LTE DC gain. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t ctle_zero : 4; /**< [ 19: 16](RO/H) CTLE zero frequency. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t ctle_gain : 4; /**< [ 11: 8](RO/H) CTLE DC gain. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_7 : 1;
+ uint64_t prevga_gn : 3; /**< [ 6: 4](RO/H) Pre-VGA gain. This field is only valid if
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is deasserted (indicating Pre-VGA
+ gain adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t vga_gain : 4; /**< [ 3: 0](RO/H) VGA DC gain. This field is only valid if GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS]
+ is deasserted (indicating VGA adaptation has completed), or if the
+ corresponding CSR override enable is asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_gain : 4; /**< [ 3: 0](RO/H) VGA DC gain. This field is only valid if GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS]
+ is deasserted (indicating VGA adaptation has completed), or if the
+ corresponding CSR override enable is asserted. */
+ uint64_t prevga_gn : 3; /**< [ 6: 4](RO/H) Pre-VGA gain. This field is only valid if
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is deasserted (indicating Pre-VGA
+ gain adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_7 : 1;
+ uint64_t ctle_gain : 4; /**< [ 11: 8](RO/H) CTLE DC gain. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t ctle_zero : 4; /**< [ 19: 16](RO/H) CTLE zero frequency. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t ctle_lte_gain : 4; /**< [ 27: 24](RO/H) CTLE LTE DC gain. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ctle_lte_zero : 4; /**< [ 35: 32](RO/H) CTLE LTE zero frequency. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t os_afe_even : 6; /**< [ 45: 40](RO/H) AFE even offset compensation value in-use. This field is only valid
+ if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE offset
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os_afe_odd : 6; /**< [ 53: 48](RO/H) AFE odd offset compensation value in-use. This field is only valid
+ if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE offset
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t en_os_afe : 1; /**< [ 56: 56](RO/H) AFE offset compensation enable value in-use. This field is only
+ valid if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE
+ offset adaptation has completed), or if the corresponding CSR
+ override enable is asserted. */
+ uint64_t reserved_57_58 : 2;
+ uint64_t blwc : 5; /**< [ 63: 59](RO/H) BLWC. This field is only valid if GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS]
+ is deasserted (indicating BLWC adaptation has completed), or if the
+ corresponding CSR override enable is asserted. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_4_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_4_bsts bdk_gsernx_lanex_rx_4_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_4_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_4_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001690ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_4_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) bdk_gsernx_lanex_rx_4_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) "GSERNX_LANEX_RX_4_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_5_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 5
+ * Adaptation parameters for DFE coefficients.
+ */
+union bdk_gsernx_lanex_rx_5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t ctle_leak_sgn : 1; /**< [ 62: 62](R/W) CTLE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t ctlez_leak_sgn : 1; /**< [ 61: 61](R/W) CTLE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t dfe_c1_leak_sgn : 1; /**< [ 60: 60](R/W) DFE C1 leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t vga_leak_sgn : 1; /**< [ 59: 59](R/W) VGA leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t dfe_c1_leak : 3; /**< [ 58: 56](R/W) DFE C1 Gain adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t dfe_c1_mu : 3; /**< [ 55: 53](R/W) DFE C1 adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t vga_leak : 3; /**< [ 52: 50](R/W) VGA gain adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t vga_mu : 3; /**< [ 49: 47](R/W) VGA adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t vga_timer_max : 15; /**< [ 46: 32](R/W) VGA adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t dfe_leak_sgn : 1; /**< [ 21: 21](R/W) DFE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t dfe_leak : 3; /**< [ 20: 18](R/W) DFE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t dfe_mu : 3; /**< [ 17: 15](R/W) DFE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t dfe_timer_max : 15; /**< [ 14: 0](R/W) DFE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_timer_max : 15; /**< [ 14: 0](R/W) DFE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t dfe_mu : 3; /**< [ 17: 15](R/W) DFE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t dfe_leak : 3; /**< [ 20: 18](R/W) DFE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t dfe_leak_sgn : 1; /**< [ 21: 21](R/W) DFE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_22_31 : 10;
+ uint64_t vga_timer_max : 15; /**< [ 46: 32](R/W) VGA adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t vga_mu : 3; /**< [ 49: 47](R/W) VGA adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t vga_leak : 3; /**< [ 52: 50](R/W) VGA gain adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t dfe_c1_mu : 3; /**< [ 55: 53](R/W) DFE C1 adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t dfe_c1_leak : 3; /**< [ 58: 56](R/W) DFE C1 Gain adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t vga_leak_sgn : 1; /**< [ 59: 59](R/W) VGA leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t dfe_c1_leak_sgn : 1; /**< [ 60: 60](R/W) DFE C1 leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t ctlez_leak_sgn : 1; /**< [ 61: 61](R/W) CTLE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t ctle_leak_sgn : 1; /**< [ 62: 62](R/W) CTLE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_5_bcfg bdk_gsernx_lanex_rx_5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000cb0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) bdk_gsernx_lanex_rx_5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) "GSERNX_LANEX_RX_5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_5_bsts
+ *
+ * GSER Lane RX Base Status Register 5
+ * Status registers for VGA, CTLE, and DFE adaptation.
+ */
+union bdk_gsernx_lanex_rx_5_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_5_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ctlez_adapt_count : 15; /**< [ 63: 49](RO/H) CTLEZ adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is deasserted. */
+ uint64_t ctlez_adapt_status : 1; /**< [ 48: 48](RO/H) CTLEZ adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t ctle_adapt_count : 15; /**< [ 47: 33](RO/H) CTLE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctle_adapt_status : 1; /**< [ 32: 32](RO/H) CTLE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t dfe_adapt_count : 15; /**< [ 31: 17](RO/H) DFE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted. */
+ uint64_t dfe_adapt_status : 1; /**< [ 16: 16](RO/H) DFE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t vga_adapt_count : 15; /**< [ 15: 1](RO/H) VGA Gain adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS] is deasserted. */
+ uint64_t vga_adapt_status : 1; /**< [ 0: 0](RO/H) VGA Gain adaptation status. When 0, training is inactive. When 1, training is active. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_adapt_status : 1; /**< [ 0: 0](RO/H) VGA Gain adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t vga_adapt_count : 15; /**< [ 15: 1](RO/H) VGA Gain adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS] is deasserted. */
+ uint64_t dfe_adapt_status : 1; /**< [ 16: 16](RO/H) DFE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t dfe_adapt_count : 15; /**< [ 31: 17](RO/H) DFE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctle_adapt_status : 1; /**< [ 32: 32](RO/H) CTLE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t ctle_adapt_count : 15; /**< [ 47: 33](RO/H) CTLE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctlez_adapt_status : 1; /**< [ 48: 48](RO/H) CTLEZ adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t ctlez_adapt_count : 15; /**< [ 63: 49](RO/H) CTLEZ adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is deasserted. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_5_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_5_bsts bdk_gsernx_lanex_rx_5_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_5_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_5_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_5_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) bdk_gsernx_lanex_rx_5_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) "GSERNX_LANEX_RX_5_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_6_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 6
+ * Adaptation controls for DFE CTLE and CTLEZ parameter.
+ */
+union bdk_gsernx_lanex_rx_6_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_6_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ctlelte_leak_sgn : 1; /**< [ 63: 63](R/W) CTLELTE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t ctlelte_leak : 3; /**< [ 62: 60](R/W) CTLELTE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlelte_mu : 3; /**< [ 59: 57](R/W) CTLELTE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctlelte_timer_max : 15; /**< [ 56: 42](R/W) CTLELTE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctlez_leak : 3; /**< [ 41: 39](R/W) CTLEZ adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlez_mu : 3; /**< [ 38: 36](R/W) CTLEZ adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctlez_timer_max : 15; /**< [ 35: 21](R/W) CTLEZ adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctle_leak : 3; /**< [ 20: 18](R/W) DFE CTLE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctle_mu : 3; /**< [ 17: 15](R/W) DFE CTLE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctle_timer_max : 15; /**< [ 14: 0](R/W) DFE CTLE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t ctle_timer_max : 15; /**< [ 14: 0](R/W) DFE CTLE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctle_mu : 3; /**< [ 17: 15](R/W) DFE CTLE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctle_leak : 3; /**< [ 20: 18](R/W) DFE CTLE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlez_timer_max : 15; /**< [ 35: 21](R/W) CTLEZ adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctlez_mu : 3; /**< [ 38: 36](R/W) CTLEZ adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctlez_leak : 3; /**< [ 41: 39](R/W) CTLEZ adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlelte_timer_max : 15; /**< [ 56: 42](R/W) CTLELTE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctlelte_mu : 3; /**< [ 59: 57](R/W) CTLELTE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctlelte_leak : 3; /**< [ 62: 60](R/W) CTLELTE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlelte_leak_sgn : 1; /**< [ 63: 63](R/W) CTLELTE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_6_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_6_bcfg bdk_gsernx_lanex_rx_6_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_6_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_6_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000cc0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_6_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) bdk_gsernx_lanex_rx_6_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) "GSERNX_LANEX_RX_6_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_6_bsts
+ *
+ * GSER Lane RX Base Status Register 6
+ * Status registers for LMS adaptation.
+ */
+union bdk_gsernx_lanex_rx_6_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_6_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t ctlelte_adapt_count : 15; /**< [ 47: 33](RO/H) CTLELTE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctlelte_adapt_status : 1; /**< [ 32: 32](RO/H) CTLELTE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t subrate_now : 16; /**< [ 31: 16](RO/H) Subrate_Now counter value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t upv_count : 16; /**< [ 15: 0](RO/H) UPV (Up-Vote) counter value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t upv_count : 16; /**< [ 15: 0](RO/H) UPV (Up-Vote) counter value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t subrate_now : 16; /**< [ 31: 16](RO/H) Subrate_Now counter value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctlelte_adapt_status : 1; /**< [ 32: 32](RO/H) CTLELTE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t ctlelte_adapt_count : 15; /**< [ 47: 33](RO/H) CTLELTE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_6_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_6_bsts bdk_gsernx_lanex_rx_6_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_6_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_6_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_6_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) bdk_gsernx_lanex_rx_6_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) "GSERNX_LANEX_RX_6_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_7_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 7
+ * Adaptation reset/mode for the DFE.
+ */
+union bdk_gsernx_lanex_rx_7_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_7_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t gain_diff_max : 4; /**< [ 27: 24](R/W) Gain Difference Maximum Value. This value is used in the correlation function
+ for the Pre-VGA Gain and VGA Gain adaptation.
+ The gain difference maximum value is used to manage the adapation rates of these
+ two parameters (Pre-VGA Gain and VGA Gain). */
+ uint64_t prevga_gn_upv_rst : 1; /**< [ 23: 23](R/W) PREVGA_GN UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using PREVGA_GN adaptation subrate gear-shifting.
+ When enabled, the gear-shifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t prevga_gn_subrate_rst : 1; /**< [ 22: 22](R/W) PREVGA_GN subrate counter reset. The subrate counter controls the interval between LMS
+ updates.
+ When 1, the counter is reset. When 0, the counter increments to the value
+ controlled by GSERN()_LANE()_RX_21_BCFG[PREVGA_GN_SUBRATE_INIT] and
+ GSERN()_LANE()_RX_21_BCFG[PREVGA_GN_SUBRATE_FIN]. */
+ uint64_t prevga_gn_rst : 2; /**< [ 21: 20](R/W) PREVGA_GN adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t blwc_upv_rst : 1; /**< [ 19: 19](R/W) BLWC UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using BLWC adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t blwc_subrate_rst : 1; /**< [ 18: 18](R/W) BLWC subrate counter reset. The subrate counter controls the interval between LMS updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the BLWC_SUBRATE_INIT and BLWC_SUBRATE_FINAL registers. */
+ uint64_t blwc_rst : 2; /**< [ 17: 16](R/W) BLWC adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t afeos_upv_rst : 1; /**< [ 15: 15](R/W) AFEOS UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using AFEOS adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t afeos_subrate_rst : 1; /**< [ 14: 14](R/W) AFEOS subrate counter reset. The subrate counter controls the interval between LMS
+ updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the AFEOS_SUBRATE_INIT and AFEOS_SUBRATE_FINAL registers. */
+ uint64_t afeos_rst : 2; /**< [ 13: 12](R/W) AFE offset adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t upv_rst : 1; /**< [ 11: 11](R/W) UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t subrate_rst : 1; /**< [ 10: 10](R/W) Subrate counter reset. The subrate counter controls the interval between LMS updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the SUBRATE INIT and SUBRATE_FINAL registers. */
+ uint64_t ctlelte_rst : 2; /**< [ 9: 8](R/W) CTLELTE adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctlez_rst : 2; /**< [ 7: 6](R/W) CTLEZ adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t vga_rst : 2; /**< [ 5: 4](R/W) VGA Gain adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctle_rst : 2; /**< [ 3: 2](R/W) CTLE/CTLEZ adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t dfe_rst : 2; /**< [ 1: 0](R/W) DFE adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_rst : 2; /**< [ 1: 0](R/W) DFE adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctle_rst : 2; /**< [ 3: 2](R/W) CTLE/CTLEZ adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t vga_rst : 2; /**< [ 5: 4](R/W) VGA Gain adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctlez_rst : 2; /**< [ 7: 6](R/W) CTLEZ adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctlelte_rst : 2; /**< [ 9: 8](R/W) CTLELTE adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t subrate_rst : 1; /**< [ 10: 10](R/W) Subrate counter reset. The subrate counter controls the interval between LMS updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the SUBRATE INIT and SUBRATE_FINAL registers. */
+ uint64_t upv_rst : 1; /**< [ 11: 11](R/W) UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t afeos_rst : 2; /**< [ 13: 12](R/W) AFE offset adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t afeos_subrate_rst : 1; /**< [ 14: 14](R/W) AFEOS subrate counter reset. The subrate counter controls the interval between LMS
+ updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the AFEOS_SUBRATE_INIT and AFEOS_SUBRATE_FINAL registers. */
+ uint64_t afeos_upv_rst : 1; /**< [ 15: 15](R/W) AFEOS UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using AFEOS adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t blwc_rst : 2; /**< [ 17: 16](R/W) BLWC adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t blwc_subrate_rst : 1; /**< [ 18: 18](R/W) BLWC subrate counter reset. The subrate counter controls the interval between LMS updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the BLWC_SUBRATE_INIT and BLWC_SUBRATE_FINAL registers. */
+ uint64_t blwc_upv_rst : 1; /**< [ 19: 19](R/W) BLWC UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using BLWC adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t prevga_gn_rst : 2; /**< [ 21: 20](R/W) PREVGA_GN adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t prevga_gn_subrate_rst : 1; /**< [ 22: 22](R/W) PREVGA_GN subrate counter reset. The subrate counter controls the interval between LMS
+ updates.
+ When 1, the counter is reset. When 0, the counter increments to the value
+ controlled by GSERN()_LANE()_RX_21_BCFG[PREVGA_GN_SUBRATE_INIT] and
+ GSERN()_LANE()_RX_21_BCFG[PREVGA_GN_SUBRATE_FIN]. */
+ uint64_t prevga_gn_upv_rst : 1; /**< [ 23: 23](R/W) PREVGA_GN UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using PREVGA_GN adaptation subrate gear-shifting.
+ When enabled, the gear-shifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t gain_diff_max : 4; /**< [ 27: 24](R/W) Gain Difference Maximum Value. This value is used in the correlation function
+ for the Pre-VGA Gain and VGA Gain adaptation.
+ The gain difference maximum value is used to manage the adapation rates of these
+ two parameters (Pre-VGA Gain and VGA Gain). */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_7_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_7_bcfg bdk_gsernx_lanex_rx_7_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_7_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_7_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000cd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_7_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) bdk_gsernx_lanex_rx_7_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) "GSERNX_LANEX_RX_7_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_7_bsts
+ *
+ * GSER Lane RX Base Status Register 7
+ * Configuration registers for LMS adaptation. Current Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_7_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_7_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ctlelte_deadband_now : 12; /**< [ 59: 48](RO/H) Current 12-bit integer value of CTLELTE adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is
+ asserted. */
+ uint64_t ctlez_deadband_now : 12; /**< [ 47: 36](RO/H) Current 12-bit integer value of CTLEZ adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is
+ deasserted. */
+ uint64_t ctle_deadband_now : 12; /**< [ 35: 24](RO/H) Current 12-bit integer value of CTLE adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is
+ deasserted. */
+ uint64_t dfe_deadband_now : 12; /**< [ 23: 12](RO/H) Current 12-bit integer value of Coeff Adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted. */
+ uint64_t vga_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of VGA adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS] is deasserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of VGA adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS] is deasserted. */
+ uint64_t dfe_deadband_now : 12; /**< [ 23: 12](RO/H) Current 12-bit integer value of Coeff Adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctle_deadband_now : 12; /**< [ 35: 24](RO/H) Current 12-bit integer value of CTLE adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is
+ deasserted. */
+ uint64_t ctlez_deadband_now : 12; /**< [ 47: 36](RO/H) Current 12-bit integer value of CTLEZ adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is
+ deasserted. */
+ uint64_t ctlelte_deadband_now : 12; /**< [ 59: 48](RO/H) Current 12-bit integer value of CTLELTE adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is
+ asserted. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_7_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_7_bsts bdk_gsernx_lanex_rx_7_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_7_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_7_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_7_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) bdk_gsernx_lanex_rx_7_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) "GSERNX_LANEX_RX_7_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_8_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 8
+ * Configuration registers for LMS adaptation
+ * Adaptation controls for Subrate parameters.
+ */
+union bdk_gsernx_lanex_rx_8_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_8_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t dfe_edgemode_ovrd : 1; /**< [ 49: 49](R/W) 0 = Selects non-transition bits for DFE adaptation.
+ 1 = Selects transition bits for DFE adaptation.
+
+ It applies the mode to the I, Q, and X paths.
+ GSERN()_LANE()_EYE_CTL_2[CAPTURE_EDGEMODE] sets the E path. */
+ uint64_t dfe_edgemode_ovrd_en : 1; /**< [ 48: 48](R/W) 0 = DFE state machine controls DFE edge mode select.
+ Currently, the DFE FSM will time interleave between both
+ edge modes (i.e. 50% non-transition, 50% transition).
+
+ 1 = [DFE_EDGEMODE_OVRD] controls DFE edge mode select. */
+ uint64_t reserved_35_47 : 13;
+ uint64_t subrate_scale : 3; /**< [ 34: 32](R/W) Subrate now counter scaling value for compare against Up Vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the starting value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set [SUBRATE_INIT] = [SUBRATE_FINAL] if subrate gearshifting is not
+ enabled. */
+ uint64_t subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the final value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set [SUBRATE_INIT] = [SUBRATE_FINAL] if subrate gearshifting is not
+ enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the final value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set [SUBRATE_INIT] = [SUBRATE_FINAL] if subrate gearshifting is not
+ enabled. */
+ uint64_t subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the starting value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set [SUBRATE_INIT] = [SUBRATE_FINAL] if subrate gearshifting is not
+ enabled. */
+ uint64_t subrate_scale : 3; /**< [ 34: 32](R/W) Subrate now counter scaling value for compare against Up Vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t reserved_35_47 : 13;
+ uint64_t dfe_edgemode_ovrd_en : 1; /**< [ 48: 48](R/W) 0 = DFE state machine controls DFE edge mode select.
+ Currently, the DFE FSM will time interleave between both
+ edge modes (i.e. 50% non-transition, 50% transition).
+
+ 1 = [DFE_EDGEMODE_OVRD] controls DFE edge mode select. */
+ uint64_t dfe_edgemode_ovrd : 1; /**< [ 49: 49](R/W) 0 = Selects non-transition bits for DFE adaptation.
+ 1 = Selects transition bits for DFE adaptation.
+
+ It applies the mode to the I, Q, and X paths.
+ GSERN()_LANE()_EYE_CTL_2[CAPTURE_EDGEMODE] sets the E path. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_8_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_8_bcfg bdk_gsernx_lanex_rx_8_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_8_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_8_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ce0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_8_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) bdk_gsernx_lanex_rx_8_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) "GSERNX_LANEX_RX_8_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_8_bsts
+ *
+ * GSER Lane RX Base Status Register 8
+ * Status registers for AFEOS LMS adaptation. Current AFEOS Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_8_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_8_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t afeos_subrate_now : 16; /**< [ 63: 48](RO/H) AFEOS subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t afeos_upv_count : 16; /**< [ 43: 28](RO/H) AFE up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t afeos_adapt_status : 1; /**< [ 27: 27](RO/H) AFEOS adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t afeos_adapt_count : 15; /**< [ 26: 12](RO/H) AFEOS adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t afeos_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of AFEOS adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t afeos_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of AFEOS adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t afeos_adapt_count : 15; /**< [ 26: 12](RO/H) AFEOS adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t afeos_adapt_status : 1; /**< [ 27: 27](RO/H) AFEOS adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t afeos_upv_count : 16; /**< [ 43: 28](RO/H) AFE up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t afeos_subrate_now : 16; /**< [ 63: 48](RO/H) AFEOS subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_8_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_8_bsts bdk_gsernx_lanex_rx_8_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_8_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_8_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_8_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) bdk_gsernx_lanex_rx_8_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) "GSERNX_LANEX_RX_8_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_9_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 9
+ * Configuration registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_9_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_9_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ctlelte_deadband : 12; /**< [ 59: 48](R/W) CTLELTE adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctlez_deadband : 12; /**< [ 47: 36](R/W) CTLEZ adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctle_deadband : 12; /**< [ 35: 24](R/W) CTLE adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t dfe_deadband : 12; /**< [ 23: 12](R/W) Coeff adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t vga_deadband : 12; /**< [ 11: 0](R/W) VGA adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_deadband : 12; /**< [ 11: 0](R/W) VGA adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t dfe_deadband : 12; /**< [ 23: 12](R/W) Coeff adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctle_deadband : 12; /**< [ 35: 24](R/W) CTLE adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctlez_deadband : 12; /**< [ 47: 36](R/W) CTLEZ adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctlelte_deadband : 12; /**< [ 59: 48](R/W) CTLELTE adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_9_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_9_bcfg bdk_gsernx_lanex_rx_9_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_9_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_9_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000cf0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_9_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) bdk_gsernx_lanex_rx_9_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) "GSERNX_LANEX_RX_9_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_9_bsts
+ *
+ * GSER Lane RX Base Status Register 9
+ * Status registers for DFE LMS adaptation.
+ */
+union bdk_gsernx_lanex_rx_9_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_9_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t dfe_c1_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of Coeff adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_c1_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of Coeff adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is clear. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_9_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_9_bsts bdk_gsernx_lanex_rx_9_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_9_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_9_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_9_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) bdk_gsernx_lanex_rx_9_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) "GSERNX_LANEX_RX_9_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idle_cal_cfg
+ *
+ * GSER Lane RX Idle Offset Dynamic ReCalibration Control Register
+ * Idle dynamic recalibration FSM control register. Used to configure the duration,
+ * frequency, and modes for the dynamic recalibration of the idle offset. Also,
+ * allows for enable/disable of this feature.
+ */
+union bdk_gsernx_lanex_rx_idle_cal_cfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idle_cal_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t idle_recal_disable : 1; /**< [ 63: 63](R/W) Single bit for enabling or disability the recalibration if idle offset. (This
+ bit does not affect the initial calibration of the idle offset).
+ 0 = Allow idle recalibration to run.
+ 1 = Disable dynamic recalibration of the idle offset. */
+ uint64_t idle_recal_oob_mode_disable : 1;/**< [ 62: 62](R/W) Single bit for enabling or disability the dynamic recalibration OOB delay feature.
+ This feature allows us to push out any idle offset recalibration when any OOB
+ activity has been detected on the idle signal.
+ 0 = Allow idle recalibration to detect OOB transactions and delay recalibration
+ 1 = Disable OOB transaction detection and do NOT delay recalibration. */
+ uint64_t idle_oob_adder_counter_clear : 1;/**< [ 61: 61](R/W) This bit one set to high, forces the counter counting the number of OOB caused
+ dealys to 8'h00. This is a static clear signal and has to be asserted to enable
+ the counter to resume counting. The count is in terms of the number of
+ RECALIBRATION_OOB_COUNT_ADDER increments.
+ 0 = Allow [OOB_DELAY_ADDER_COUNT] to increment.
+ 1 = Forces [OOB_DELAY_ADDER_COUNT] to 0x0.
+
+ Internal:
+ FIXME no such field RECALIBRATION_OOB_COUNT_ADDER then remove above exempt attribute. */
+ uint64_t reserved_40_60 : 21;
+ uint64_t max_oob_adder_count : 8; /**< [ 39: 32](R/W) Maximum number of OOB forced pushouts of the idle recalibrations allowed. If the
+ number of pushouts matches this number, the the idle offset is forced to recalibrate
+ regardless of the state of the link. */
+ uint64_t oob_delay_adder_count : 32; /**< [ 31: 0](R/W) Number of svc_clk ticks allowed to delay the idle recalibration. Default is equal to
+ 1 second based on a 10 ns service clock cycle time. */
+#else /* Word 0 - Little Endian */
+ uint64_t oob_delay_adder_count : 32; /**< [ 31: 0](R/W) Number of svc_clk ticks allowed to delay the idle recalibration. Default is equal to
+ 1 second based on a 10 ns service clock cycle time. */
+ uint64_t max_oob_adder_count : 8; /**< [ 39: 32](R/W) Maximum number of OOB forced pushouts of the idle recalibrations allowed. If the
+ number of pushouts matches this number, the the idle offset is forced to recalibrate
+ regardless of the state of the link. */
+ uint64_t reserved_40_60 : 21;
+ uint64_t idle_oob_adder_counter_clear : 1;/**< [ 61: 61](R/W) This bit one set to high, forces the counter counting the number of OOB caused
+ dealys to 8'h00. This is a static clear signal and has to be asserted to enable
+ the counter to resume counting. The count is in terms of the number of
+ RECALIBRATION_OOB_COUNT_ADDER increments.
+ 0 = Allow [OOB_DELAY_ADDER_COUNT] to increment.
+ 1 = Forces [OOB_DELAY_ADDER_COUNT] to 0x0.
+
+ Internal:
+ FIXME no such field RECALIBRATION_OOB_COUNT_ADDER then remove above exempt attribute. */
+ uint64_t idle_recal_oob_mode_disable : 1;/**< [ 62: 62](R/W) Single bit for enabling or disability the dynamic recalibration OOB delay feature.
+ This feature allows us to push out any idle offset recalibration when any OOB
+ activity has been detected on the idle signal.
+ 0 = Allow idle recalibration to detect OOB transactions and delay recalibration
+ 1 = Disable OOB transaction detection and do NOT delay recalibration. */
+ uint64_t idle_recal_disable : 1; /**< [ 63: 63](R/W) Single bit for enabling or disability the recalibration if idle offset. (This
+ bit does not affect the initial calibration of the idle offset).
+ 0 = Allow idle recalibration to run.
+ 1 = Disable dynamic recalibration of the idle offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idle_cal_cfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idle_cal_cfg bdk_gsernx_lanex_rx_idle_cal_cfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001530ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLE_CAL_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) bdk_gsernx_lanex_rx_idle_cal_cfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) "GSERNX_LANEX_RX_IDLE_CAL_CFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idle_recal_cnt
+ *
+ * GSER Lane RX Idle Duration Count Before ReCalibration Register
+ * Count used to specify the duration of time between idle offset recalibrations.
+ */
+union bdk_gsernx_lanex_rx_idle_recal_cnt
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idle_recal_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t idle_recal_duration_count : 48;/**< [ 47: 0](R/W) Number of svc_clk ticks to specify the delay between idle recalibration
+ triggers. Default is equal to
+ 1 min based on a 10ns svc_clk cycle time. */
+#else /* Word 0 - Little Endian */
+ uint64_t idle_recal_duration_count : 48;/**< [ 47: 0](R/W) Number of svc_clk ticks to specify the delay between idle recalibration
+ triggers. Default is equal to
+ 1 min based on a 10ns svc_clk cycle time. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idle_recal_cnt_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idle_recal_cnt bdk_gsernx_lanex_rx_idle_recal_cnt_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001540ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLE_RECAL_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) bdk_gsernx_lanex_rx_idle_recal_cnt_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) "GSERNX_LANEX_RX_IDLE_RECAL_CNT"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idledet_1_bcfg
+ *
+ * GSER Lane RX Idle Detection Filter Control Register 1
+ * Parameters controlling the digital filter of the analog receiver's raw idle
+ * signal. Setting all fields to 1, i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_rx_idledet_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idledet_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reset_filter : 1; /**< [ 63: 63](R/W) Reset for the digital filter of the analog receiver's raw idle signal. Set the
+ other fields in this register as desired before releasing [RESET_FILTER]. Note
+ that while the filter is in reset, the filter output will be high, indicating
+ idle.
+ 0 = Allow filter to run.
+ 1 = Hold filter in reset. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reset_filter : 1; /**< [ 63: 63](R/W) Reset for the digital filter of the analog receiver's raw idle signal. Set the
+ other fields in this register as desired before releasing [RESET_FILTER]. Note
+ that while the filter is in reset, the filter output will be high, indicating
+ idle.
+ 0 = Allow filter to run.
+ 1 = Hold filter in reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idledet_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idledet_1_bcfg bdk_gsernx_lanex_rx_idledet_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001100ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLEDET_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) bdk_gsernx_lanex_rx_idledet_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) "GSERNX_LANEX_RX_IDLEDET_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idledet_2_bcfg
+ *
+ * GSER Lane RX Idle Detection Filter Control Register 2
+ * Parameters controlling the digital filter of the analog receiver's raw idle
+ * signal. Setting all fields to 1, i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_rx_idledet_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idledet_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t frc_en : 1; /**< [ 55: 55](R/W) Force enable.
+ 0 = Use the filter output based on the input from the analog idle detector.
+ 1 = Force the output of the digital idle filter to the value specified by
+ [FRC_VAL]. */
+ uint64_t frc_val : 1; /**< [ 54: 54](R/W) When [FRC_EN] is set to 1, this will be the value forced at the output of the
+ digital idle filter. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t frc_val : 1; /**< [ 54: 54](R/W) When [FRC_EN] is set to 1, this will be the value forced at the output of the
+ digital idle filter. */
+ uint64_t frc_en : 1; /**< [ 55: 55](R/W) Force enable.
+ 0 = Use the filter output based on the input from the analog idle detector.
+ 1 = Force the output of the digital idle filter to the value specified by
+ [FRC_VAL]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idledet_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idledet_2_bcfg bdk_gsernx_lanex_rx_idledet_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001110ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLEDET_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) bdk_gsernx_lanex_rx_idledet_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) "GSERNX_LANEX_RX_IDLEDET_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idledet_bsts
+ *
+ * GSER Lane RX Base Idle Status Register
+ * Status register for receiver idle detection status.
+ */
+union bdk_gsernx_lanex_rx_idledet_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idledet_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t idle : 1; /**< [ 0: 0](RO/H) One indicates that the receiver idle detection circuit has detected no input
+ data stream. Valid results can be expected anytime after the custom receiver
+ power-up and reset-exit sequence is complete. This is the output of the digital
+ idle detection filter. */
+#else /* Word 0 - Little Endian */
+ uint64_t idle : 1; /**< [ 0: 0](RO/H) One indicates that the receiver idle detection circuit has detected no input
+ data stream. Valid results can be expected anytime after the custom receiver
+ power-up and reset-exit sequence is complete. This is the output of the digital
+ idle detection filter. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idledet_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idledet_bsts bdk_gsernx_lanex_rx_idledet_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001120ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLEDET_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) bdk_gsernx_lanex_rx_idledet_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) "GSERNX_LANEX_RX_IDLEDET_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_0_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Value Settings Register 0
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trim15_ovrd : 4; /**< [ 63: 60](R/W) Override setting for bits 87..84 of 180b ir25_trim. */
+ uint64_t trim14_ovrd : 4; /**< [ 59: 56](R/W) Override setting for bits 83..80 of 180b ir25_trim. */
+ uint64_t trim13_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 79..76 of 180b ir25_trim. */
+ uint64_t trim12_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 75..72 of 180b ir25_trim. */
+ uint64_t trim11_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 71..68 of 180b ir25_trim. */
+ uint64_t trim10_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 67..64 of 180b ir25_trim. */
+ uint64_t trim9_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 63..60 of 180b ir25_trim. */
+ uint64_t trim8_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 59..56 of 180b ir25_trim. */
+ uint64_t trim7_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 55..52 of 180b ir25_trim. */
+ uint64_t trim6_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 51..48 of 180b ir25_trim. */
+ uint64_t trim5_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 47..44 of 180b ir25_trim. */
+ uint64_t trim4_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 43..40 of 180b ir25_trim. */
+ uint64_t trim3_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 39..36 of 180b ir25_trim. */
+ uint64_t trim2_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 35..32 of 180b ir25_trim. */
+ uint64_t trim1_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 31..28 of 180b ir25_trim. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t trim1_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 31..28 of 180b ir25_trim. */
+ uint64_t trim2_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 35..32 of 180b ir25_trim. */
+ uint64_t trim3_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 39..36 of 180b ir25_trim. */
+ uint64_t trim4_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 43..40 of 180b ir25_trim. */
+ uint64_t trim5_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 47..44 of 180b ir25_trim. */
+ uint64_t trim6_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 51..48 of 180b ir25_trim. */
+ uint64_t trim7_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 55..52 of 180b ir25_trim. */
+ uint64_t trim8_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 59..56 of 180b ir25_trim. */
+ uint64_t trim9_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 63..60 of 180b ir25_trim. */
+ uint64_t trim10_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 67..64 of 180b ir25_trim. */
+ uint64_t trim11_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 71..68 of 180b ir25_trim. */
+ uint64_t trim12_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 75..72 of 180b ir25_trim. */
+ uint64_t trim13_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 79..76 of 180b ir25_trim. */
+ uint64_t trim14_ovrd : 4; /**< [ 59: 56](R/W) Override setting for bits 83..80 of 180b ir25_trim. */
+ uint64_t trim15_ovrd : 4; /**< [ 63: 60](R/W) Override setting for bits 87..84 of 180b ir25_trim. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_0_bcfg bdk_gsernx_lanex_rx_itrim_0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001a80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_0_bsts
+ *
+ * GSER Lane Receiver Ir25 Trim Settings Register 0
+ * These are the ir25_trim settings in use. ir25_trim settings are in groups of 4 bits.
+ */
+union bdk_gsernx_lanex_rx_itrim_0_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_0_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trim15 : 4; /**< [ 63: 60](RO/H) Setting for bits 87..84 of 180b ir25_trim. */
+ uint64_t trim14 : 4; /**< [ 59: 56](RO/H) Setting for bits 83..80 of 180b ir25_trim. */
+ uint64_t trim13 : 4; /**< [ 55: 52](RO/H) Setting for bits 79..76 of 180b ir25_trim. */
+ uint64_t trim12 : 4; /**< [ 51: 48](RO/H) Setting for bits 75..72 of 180b ir25_trim. */
+ uint64_t trim11 : 4; /**< [ 47: 44](RO/H) Setting for bits 71..68 of 180b ir25_trim. */
+ uint64_t trim10 : 4; /**< [ 43: 40](RO/H) Setting for bits 67..64 of 180b ir25_trim. */
+ uint64_t trim9 : 4; /**< [ 39: 36](RO/H) Setting for bits 63..60 of 180b ir25_trim. */
+ uint64_t trim8 : 4; /**< [ 35: 32](RO/H) Setting for bits 59..56 of 180b ir25_trim. */
+ uint64_t trim7 : 4; /**< [ 31: 28](RO/H) Setting for bits 55..52 of 180b ir25_trim. */
+ uint64_t trim6 : 4; /**< [ 27: 24](RO/H) Setting for bits 51..48 of 180b ir25_trim. */
+ uint64_t trim5 : 4; /**< [ 23: 20](RO/H) Setting for bits 47..44 of 180b ir25_trim. */
+ uint64_t trim4 : 4; /**< [ 19: 16](RO/H) Setting for bits 43..40 of 180b ir25_trim. */
+ uint64_t trim3 : 4; /**< [ 15: 12](RO/H) Setting for bits 39..36 of 180b ir25_trim. */
+ uint64_t trim2 : 4; /**< [ 11: 8](RO/H) Setting for bits 35..32 of 180b ir25_trim. */
+ uint64_t trim1 : 4; /**< [ 7: 4](RO/H) Setting for bits 31..28 of 180b ir25_trim. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t trim1 : 4; /**< [ 7: 4](RO/H) Setting for bits 31..28 of 180b ir25_trim. */
+ uint64_t trim2 : 4; /**< [ 11: 8](RO/H) Setting for bits 35..32 of 180b ir25_trim. */
+ uint64_t trim3 : 4; /**< [ 15: 12](RO/H) Setting for bits 39..36 of 180b ir25_trim. */
+ uint64_t trim4 : 4; /**< [ 19: 16](RO/H) Setting for bits 43..40 of 180b ir25_trim. */
+ uint64_t trim5 : 4; /**< [ 23: 20](RO/H) Setting for bits 47..44 of 180b ir25_trim. */
+ uint64_t trim6 : 4; /**< [ 27: 24](RO/H) Setting for bits 51..48 of 180b ir25_trim. */
+ uint64_t trim7 : 4; /**< [ 31: 28](RO/H) Setting for bits 55..52 of 180b ir25_trim. */
+ uint64_t trim8 : 4; /**< [ 35: 32](RO/H) Setting for bits 59..56 of 180b ir25_trim. */
+ uint64_t trim9 : 4; /**< [ 39: 36](RO/H) Setting for bits 63..60 of 180b ir25_trim. */
+ uint64_t trim10 : 4; /**< [ 43: 40](RO/H) Setting for bits 67..64 of 180b ir25_trim. */
+ uint64_t trim11 : 4; /**< [ 47: 44](RO/H) Setting for bits 71..68 of 180b ir25_trim. */
+ uint64_t trim12 : 4; /**< [ 51: 48](RO/H) Setting for bits 75..72 of 180b ir25_trim. */
+ uint64_t trim13 : 4; /**< [ 55: 52](RO/H) Setting for bits 79..76 of 180b ir25_trim. */
+ uint64_t trim14 : 4; /**< [ 59: 56](RO/H) Setting for bits 83..80 of 180b ir25_trim. */
+ uint64_t trim15 : 4; /**< [ 63: 60](RO/H) Setting for bits 87..84 of 180b ir25_trim. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_0_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_0_bsts bdk_gsernx_lanex_rx_itrim_0_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001bd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_0_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) bdk_gsernx_lanex_rx_itrim_0_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) "GSERNX_LANEX_RX_ITRIM_0_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_1_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Value Settings Register 1
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trim31_ovrd : 4; /**< [ 63: 60](R/W) Override setting for bits 179..176 of 180b ir25_trim. */
+ uint64_t trim30_ovrd : 4; /**< [ 59: 56](R/W) Override setting for bits 175..172 of 180b ir25_trim. */
+ uint64_t trim29_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 171..168 of 180b ir25_trim. */
+ uint64_t trim28_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 167..164 of 180b ir25_trim. */
+ uint64_t trim27_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 163..160 of 180b ir25_trim. */
+ uint64_t trim26_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 159..156 of 180b ir25_trim. */
+ uint64_t trim25_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 155..152 of 180b ir25_trim. */
+ uint64_t trim24_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 151..148 of 180b ir25_trim. */
+ uint64_t trim23_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 147..144 of 180b ir25_trim. */
+ uint64_t trim22_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 143..140 of 180b ir25_trim. */
+ uint64_t trim21_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 139..136 of 180b ir25_trim. */
+ uint64_t trim20_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 135..132 of 180b ir25_trim. */
+ uint64_t trim19_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 131..128 of 180b ir25_trim. */
+ uint64_t trim18_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 127..124 of 180b ir25_trim. */
+ uint64_t trim17_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 123..120 of 180b ir25_trim. */
+ uint64_t trim16_ovrd : 4; /**< [ 3: 0](R/W) Override setting for bits 119..116 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim16_ovrd : 4; /**< [ 3: 0](R/W) Override setting for bits 119..116 of 180b ir25_trim. */
+ uint64_t trim17_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 123..120 of 180b ir25_trim. */
+ uint64_t trim18_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 127..124 of 180b ir25_trim. */
+ uint64_t trim19_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 131..128 of 180b ir25_trim. */
+ uint64_t trim20_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 135..132 of 180b ir25_trim. */
+ uint64_t trim21_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 139..136 of 180b ir25_trim. */
+ uint64_t trim22_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 143..140 of 180b ir25_trim. */
+ uint64_t trim23_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 147..144 of 180b ir25_trim. */
+ uint64_t trim24_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 151..148 of 180b ir25_trim. */
+ uint64_t trim25_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 155..152 of 180b ir25_trim. */
+ uint64_t trim26_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 159..156 of 180b ir25_trim. */
+ uint64_t trim27_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 163..160 of 180b ir25_trim. */
+ uint64_t trim28_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 167..164 of 180b ir25_trim. */
+ uint64_t trim29_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 171..168 of 180b ir25_trim. */
+ uint64_t trim30_ovrd : 4; /**< [ 59: 56](R/W) Override setting for bits 175..172 of 180b ir25_trim. */
+ uint64_t trim31_ovrd : 4; /**< [ 63: 60](R/W) Override setting for bits 179..176 of 180b ir25_trim. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_1_bcfg bdk_gsernx_lanex_rx_itrim_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001a90ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_1_bsts
+ *
+ * GSER Lane Receiver Ir25 Trim Settings Register 1
+ * These are the ir25_trim settings in use. ir25_trim settings are in groups of 4 bits.
+ */
+union bdk_gsernx_lanex_rx_itrim_1_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_1_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trim31 : 4; /**< [ 63: 60](RO/H) Setting for bits 179..176 of 180b ir25_trim. */
+ uint64_t trim30 : 4; /**< [ 59: 56](RO/H) Setting for bits 175..172 of 180b ir25_trim. */
+ uint64_t trim29 : 4; /**< [ 55: 52](RO/H) Setting for bits 171..168 of 180b ir25_trim. */
+ uint64_t trim28 : 4; /**< [ 51: 48](RO/H) Setting for bits 167..164 of 180b ir25_trim. */
+ uint64_t trim27 : 4; /**< [ 47: 44](RO/H) Setting for bits 163..160 of 180b ir25_trim. */
+ uint64_t trim26 : 4; /**< [ 43: 40](RO/H) Setting for bits 159..156 of 180b ir25_trim. */
+ uint64_t trim25 : 4; /**< [ 39: 36](RO/H) Setting for bits 155..152 of 180b ir25_trim. */
+ uint64_t trim24 : 4; /**< [ 35: 32](RO/H) Setting for bits 151..148 of 180b ir25_trim. */
+ uint64_t trim23 : 4; /**< [ 31: 28](RO/H) Setting for bits 147..144 of 180b ir25_trim. */
+ uint64_t trim22 : 4; /**< [ 27: 24](RO/H) Setting for bits 143..140 of 180b ir25_trim. */
+ uint64_t trim21 : 4; /**< [ 23: 20](RO/H) Setting for bits 139..136 of 180b ir25_trim. */
+ uint64_t trim20 : 4; /**< [ 19: 16](RO/H) Setting for bits 135..132 of 180b ir25_trim. */
+ uint64_t trim19 : 4; /**< [ 15: 12](RO/H) Setting for bits 131..128 of 180b ir25_trim. */
+ uint64_t trim18 : 4; /**< [ 11: 8](RO/H) Setting for bits 127..124 of 180b ir25_trim. */
+ uint64_t trim17 : 4; /**< [ 7: 4](RO/H) Setting for bits 123..120 of 180b ir25_trim. */
+ uint64_t trim16 : 4; /**< [ 3: 0](RO/H) Setting for bits 119..116 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim16 : 4; /**< [ 3: 0](RO/H) Setting for bits 119..116 of 180b ir25_trim. */
+ uint64_t trim17 : 4; /**< [ 7: 4](RO/H) Setting for bits 123..120 of 180b ir25_trim. */
+ uint64_t trim18 : 4; /**< [ 11: 8](RO/H) Setting for bits 127..124 of 180b ir25_trim. */
+ uint64_t trim19 : 4; /**< [ 15: 12](RO/H) Setting for bits 131..128 of 180b ir25_trim. */
+ uint64_t trim20 : 4; /**< [ 19: 16](RO/H) Setting for bits 135..132 of 180b ir25_trim. */
+ uint64_t trim21 : 4; /**< [ 23: 20](RO/H) Setting for bits 139..136 of 180b ir25_trim. */
+ uint64_t trim22 : 4; /**< [ 27: 24](RO/H) Setting for bits 143..140 of 180b ir25_trim. */
+ uint64_t trim23 : 4; /**< [ 31: 28](RO/H) Setting for bits 147..144 of 180b ir25_trim. */
+ uint64_t trim24 : 4; /**< [ 35: 32](RO/H) Setting for bits 151..148 of 180b ir25_trim. */
+ uint64_t trim25 : 4; /**< [ 39: 36](RO/H) Setting for bits 155..152 of 180b ir25_trim. */
+ uint64_t trim26 : 4; /**< [ 43: 40](RO/H) Setting for bits 159..156 of 180b ir25_trim. */
+ uint64_t trim27 : 4; /**< [ 47: 44](RO/H) Setting for bits 163..160 of 180b ir25_trim. */
+ uint64_t trim28 : 4; /**< [ 51: 48](RO/H) Setting for bits 167..164 of 180b ir25_trim. */
+ uint64_t trim29 : 4; /**< [ 55: 52](RO/H) Setting for bits 171..168 of 180b ir25_trim. */
+ uint64_t trim30 : 4; /**< [ 59: 56](RO/H) Setting for bits 175..172 of 180b ir25_trim. */
+ uint64_t trim31 : 4; /**< [ 63: 60](RO/H) Setting for bits 179..176 of 180b ir25_trim. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_1_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_1_bsts bdk_gsernx_lanex_rx_itrim_1_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001be0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_1_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) bdk_gsernx_lanex_rx_itrim_1_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) "GSERNX_LANEX_RX_ITRIM_1_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_2_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Value Settings Register 2
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t trim45_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 27..24 of 180b ir25_trim. */
+ uint64_t trim44_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 115..112 of 180b ir25_trim. */
+ uint64_t trim43_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 23..20 of 180b ir25_trim. */
+ uint64_t trim42_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 111..108 of 180b ir25_trim. */
+ uint64_t trim41_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 19..16 of 180b ir25_trim. */
+ uint64_t trim40_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 107..104 of 180b ir25_trim. */
+ uint64_t trim39_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 15..12 of 180b ir25_trim. */
+ uint64_t trim38_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 103..100 of 180b ir25_trim. */
+ uint64_t trim37_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 11..8 of 180b ir25_trim. */
+ uint64_t trim36_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 99..96 of 180b ir25_trim. */
+ uint64_t trim35_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 7..4 of 180b ir25_trim. */
+ uint64_t trim34_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 95..92 of 180b ir25_trim. */
+ uint64_t trim33_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 3..0 of 180b ir25_trim. */
+ uint64_t trim32_ovrd : 4; /**< [ 3: 0](R/W) Override setting for bits 91..88 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim32_ovrd : 4; /**< [ 3: 0](R/W) Override setting for bits 91..88 of 180b ir25_trim. */
+ uint64_t trim33_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 3..0 of 180b ir25_trim. */
+ uint64_t trim34_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 95..92 of 180b ir25_trim. */
+ uint64_t trim35_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 7..4 of 180b ir25_trim. */
+ uint64_t trim36_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 99..96 of 180b ir25_trim. */
+ uint64_t trim37_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 11..8 of 180b ir25_trim. */
+ uint64_t trim38_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 103..100 of 180b ir25_trim. */
+ uint64_t trim39_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 15..12 of 180b ir25_trim. */
+ uint64_t trim40_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 107..104 of 180b ir25_trim. */
+ uint64_t trim41_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 19..16 of 180b ir25_trim. */
+ uint64_t trim42_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 111..108 of 180b ir25_trim. */
+ uint64_t trim43_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 23..20 of 180b ir25_trim. */
+ uint64_t trim44_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 115..112 of 180b ir25_trim. */
+ uint64_t trim45_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 27..24 of 180b ir25_trim. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_2_bcfg bdk_gsernx_lanex_rx_itrim_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001aa0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_2_bsts
+ *
+ * GSER Lane Receiver Ir25 Trim Settings Register 2
+ * These are the ir25_trim settings in use. ir25_trim settings are in groups of 4 bits.
+ */
+union bdk_gsernx_lanex_rx_itrim_2_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_2_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t trim45 : 4; /**< [ 55: 52](RO/H) Setting for bits 27..24 of 180b ir25_trim. */
+ uint64_t trim44 : 4; /**< [ 51: 48](RO/H) Setting for bits 115..112 of 180b ir25_trim. */
+ uint64_t trim43 : 4; /**< [ 47: 44](RO/H) Setting for bits 23..20 of 180b ir25_trim. */
+ uint64_t trim42 : 4; /**< [ 43: 40](RO/H) Setting for bits 111..108 of 180b ir25_trim. */
+ uint64_t trim41 : 4; /**< [ 39: 36](RO/H) Setting for bits 19..16 of 180b ir25_trim. */
+ uint64_t trim40 : 4; /**< [ 35: 32](RO/H) Setting for bits 107..104 of 180b ir25_trim. */
+ uint64_t trim39 : 4; /**< [ 31: 28](RO/H) Setting for bits 15..12 of 180b ir25_trim. */
+ uint64_t trim38 : 4; /**< [ 27: 24](RO/H) Setting for bits 103..100 of 180b ir25_trim. */
+ uint64_t trim37 : 4; /**< [ 23: 20](RO/H) Setting for bits 11..8 of 180b ir25_trim. */
+ uint64_t trim36 : 4; /**< [ 19: 16](RO/H) Setting for bits 99..96 of 180b ir25_trim. */
+ uint64_t trim35 : 4; /**< [ 15: 12](RO/H) Setting for bits 7..4 of 180b ir25_trim. */
+ uint64_t trim34 : 4; /**< [ 11: 8](RO/H) Setting for bits 95..92 of 180b ir25_trim. */
+ uint64_t trim33 : 4; /**< [ 7: 4](RO/H) Setting for bits 3..0 of 180b ir25_trim. */
+ uint64_t trim32 : 4; /**< [ 3: 0](RO/H) Setting for bits 91..88 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim32 : 4; /**< [ 3: 0](RO/H) Setting for bits 91..88 of 180b ir25_trim. */
+ uint64_t trim33 : 4; /**< [ 7: 4](RO/H) Setting for bits 3..0 of 180b ir25_trim. */
+ uint64_t trim34 : 4; /**< [ 11: 8](RO/H) Setting for bits 95..92 of 180b ir25_trim. */
+ uint64_t trim35 : 4; /**< [ 15: 12](RO/H) Setting for bits 7..4 of 180b ir25_trim. */
+ uint64_t trim36 : 4; /**< [ 19: 16](RO/H) Setting for bits 99..96 of 180b ir25_trim. */
+ uint64_t trim37 : 4; /**< [ 23: 20](RO/H) Setting for bits 11..8 of 180b ir25_trim. */
+ uint64_t trim38 : 4; /**< [ 27: 24](RO/H) Setting for bits 103..100 of 180b ir25_trim. */
+ uint64_t trim39 : 4; /**< [ 31: 28](RO/H) Setting for bits 15..12 of 180b ir25_trim. */
+ uint64_t trim40 : 4; /**< [ 35: 32](RO/H) Setting for bits 107..104 of 180b ir25_trim. */
+ uint64_t trim41 : 4; /**< [ 39: 36](RO/H) Setting for bits 19..16 of 180b ir25_trim. */
+ uint64_t trim42 : 4; /**< [ 43: 40](RO/H) Setting for bits 111..108 of 180b ir25_trim. */
+ uint64_t trim43 : 4; /**< [ 47: 44](RO/H) Setting for bits 23..20 of 180b ir25_trim. */
+ uint64_t trim44 : 4; /**< [ 51: 48](RO/H) Setting for bits 115..112 of 180b ir25_trim. */
+ uint64_t trim45 : 4; /**< [ 55: 52](RO/H) Setting for bits 27..24 of 180b ir25_trim. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_2_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_2_bsts bdk_gsernx_lanex_rx_itrim_2_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001bf0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_2_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) bdk_gsernx_lanex_rx_itrim_2_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) "GSERNX_LANEX_RX_ITRIM_2_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_3_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Enables Register 0
+ * Enables in this register allow the corresponding override value setting to take
+ * effect.
+ */
+union bdk_gsernx_lanex_rx_itrim_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t trim15_ovrd_en : 1; /**< [ 60: 60](R/W) Override enable for bits 87..84 of 180b ir25_trim. */
+ uint64_t reserved_57_59 : 3;
+ uint64_t trim14_ovrd_en : 1; /**< [ 56: 56](R/W) Override enable for bits 83..80 of 180b ir25_trim. */
+ uint64_t reserved_53_55 : 3;
+ uint64_t trim13_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 79..76 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim12_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 75..72 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim11_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 71..68 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim10_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 67..64 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim9_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 63..60 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim8_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 59..56 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim7_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 55..52 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim6_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 51..48 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim5_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 47..44 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim4_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 43..40 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim3_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 39..36 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim2_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 35..32 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim1_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 31..28 of 180b ir25_trim. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t trim1_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 31..28 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim2_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 35..32 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim3_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 39..36 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim4_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 43..40 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim5_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 47..44 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim6_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 51..48 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim7_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 55..52 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim8_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 59..56 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim9_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 63..60 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim10_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 67..64 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim11_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 71..68 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim12_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 75..72 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim13_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 79..76 of 180b ir25_trim. */
+ uint64_t reserved_53_55 : 3;
+ uint64_t trim14_ovrd_en : 1; /**< [ 56: 56](R/W) Override enable for bits 83..80 of 180b ir25_trim. */
+ uint64_t reserved_57_59 : 3;
+ uint64_t trim15_ovrd_en : 1; /**< [ 60: 60](R/W) Override enable for bits 87..84 of 180b ir25_trim. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_3_bcfg bdk_gsernx_lanex_rx_itrim_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001ab0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_4_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Enables Register 1
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t trim31_ovrd_en : 1; /**< [ 60: 60](R/W) Override enable for bits 179..176 of 180b ir25_trim. */
+ uint64_t reserved_57_59 : 3;
+ uint64_t trim30_ovrd_en : 1; /**< [ 56: 56](R/W) Override enable for bits 175..172 of 180b ir25_trim. */
+ uint64_t reserved_53_55 : 3;
+ uint64_t trim29_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 171..168 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim28_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 167..164 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim27_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 163..160 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim26_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 159..156 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim25_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 155..152 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim24_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 151..148 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim23_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 147..144 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim22_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 143..140 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim21_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 139..136 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim20_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 135..132 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim19_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 131..128 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim18_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 127..124 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim17_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 123..120 of 180b ir25_trim. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t trim16_ovrd_en : 1; /**< [ 0: 0](R/W) Override enable for bits 119..116 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim16_ovrd_en : 1; /**< [ 0: 0](R/W) Override enable for bits 119..116 of 180b ir25_trim. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t trim17_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 123..120 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim18_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 127..124 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim19_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 131..128 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim20_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 135..132 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim21_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 139..136 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim22_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 143..140 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim23_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 147..144 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim24_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 151..148 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim25_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 155..152 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim26_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 159..156 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim27_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 163..160 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim28_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 167..164 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim29_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 171..168 of 180b ir25_trim. */
+ uint64_t reserved_53_55 : 3;
+ uint64_t trim30_ovrd_en : 1; /**< [ 56: 56](R/W) Override enable for bits 175..172 of 180b ir25_trim. */
+ uint64_t reserved_57_59 : 3;
+ uint64_t trim31_ovrd_en : 1; /**< [ 60: 60](R/W) Override enable for bits 179..176 of 180b ir25_trim. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_4_bcfg bdk_gsernx_lanex_rx_itrim_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001ac0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_5_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Enables Register 2
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t trim45_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 27..24 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim44_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 115..112 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim43_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 23..20 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim42_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 111..108 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim41_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 19..16 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim40_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 107..104 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim39_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 15..12 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim38_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 103..100 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim37_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 11..8 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim36_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 99..96 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim35_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 7..4 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim34_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 95..92 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim33_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 3..0 of 180b ir25_trim. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t trim32_ovrd_en : 1; /**< [ 0: 0](R/W) Override enable for bits 91..88 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim32_ovrd_en : 1; /**< [ 0: 0](R/W) Override enable for bits 91..88 of 180b ir25_trim. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t trim33_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 3..0 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim34_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 95..92 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim35_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 7..4 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim36_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 99..96 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim37_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 11..8 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim38_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 103..100 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim39_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 15..12 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim40_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 107..104 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim41_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 19..16 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim42_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 111..108 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim43_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 23..20 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim44_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 115..112 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim45_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 27..24 of 180b ir25_trim. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_5_bcfg bdk_gsernx_lanex_rx_itrim_5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001ad0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_margin_dbg_cnt
+ *
+ * GSER Lane RX Margining Debug Control Register
+ * CSR basec control of Phy initiated read/write operations to the PEM. This is a
+ * debug field that can be used to check the results of an RX Margining sequence.
+ * The expecation is that the PEM FSM will initiate the transactions and the results
+ * will be placed in MAC/PEM CSRs using the p2m_mesage_bus. However, ability to
+ * read/write these registers into the processor is not clear from Synopsys's MAC
+ * spec. As such, this feature was added to allow an RSL read/write of these registers.
+ * Protocal is Ready & Done based. A transaction is updated in the CSR registers and the
+ * Ready bit is set high. Once it is set high, the mbus_fsm will execute the transaction
+ * and assert the Done bit when done or when results are available in
+ * GSERN()_LANE()_RX_MARGIN_DBG_OBS.
+ */
+union bdk_gsernx_lanex_rx_margin_dbg_cnt
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_margin_dbg_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ready : 1; /**< [ 63: 63](R/W) Handshake bit to indicate there is a valid request from the RSL bus to transact
+ on the mesage bus. Setting this bit triggers the mbus_fsm to execute the
+ transaction. Once a transaction is done, this bit has to be cleared before
+ another transaction is issued.
+ 0 = No mbus transactions are outstanding.
+ 1 = An mbus transaction is outstanding. */
+ uint64_t write_commit : 1; /**< [ 62: 62](R/W) This bit will determin to the mbus transactor if the write operation is a
+ commited write or an uncommited write. When doing a read, this bit is a
+ don't care.
+ 0 = If executing a write, this write operation is not-commited type.
+ 1 = If executing a write, this write operation is a commited type. */
+ uint64_t read_writen : 1; /**< [ 61: 61](R/W) This bit indicates if we are doing a read or write operation.
+ 0 = Performing a write operation.
+ 1 = Performing a read operation. */
+ uint64_t reserved_20_60 : 41;
+ uint64_t address : 12; /**< [ 19: 8](R/W) The 12-bit field of address to be send to the MAC/PEM if we are peforming either
+ a read or write operation. */
+ uint64_t data : 8; /**< [ 7: 0](R/W) The 8-bit field of Data to be send to the MAC/PEM if we are peforming a write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 8; /**< [ 7: 0](R/W) The 8-bit field of Data to be send to the MAC/PEM if we are peforming a write operation. */
+ uint64_t address : 12; /**< [ 19: 8](R/W) The 12-bit field of address to be send to the MAC/PEM if we are peforming either
+ a read or write operation. */
+ uint64_t reserved_20_60 : 41;
+ uint64_t read_writen : 1; /**< [ 61: 61](R/W) This bit indicates if we are doing a read or write operation.
+ 0 = Performing a write operation.
+ 1 = Performing a read operation. */
+ uint64_t write_commit : 1; /**< [ 62: 62](R/W) This bit will determin to the mbus transactor if the write operation is a
+ commited write or an uncommited write. When doing a read, this bit is a
+ don't care.
+ 0 = If executing a write, this write operation is not-commited type.
+ 1 = If executing a write, this write operation is a commited type. */
+ uint64_t ready : 1; /**< [ 63: 63](R/W) Handshake bit to indicate there is a valid request from the RSL bus to transact
+ on the mesage bus. Setting this bit triggers the mbus_fsm to execute the
+ transaction. Once a transaction is done, this bit has to be cleared before
+ another transaction is issued.
+ 0 = No mbus transactions are outstanding.
+ 1 = An mbus transaction is outstanding. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_margin_dbg_cnt_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_margin_dbg_cnt bdk_gsernx_lanex_rx_margin_dbg_cnt_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001220ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_MARGIN_DBG_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) bdk_gsernx_lanex_rx_margin_dbg_cnt_t
+#define bustype_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) "GSERNX_LANEX_RX_MARGIN_DBG_CNT"
+#define device_bar_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_margin_dbg_obs
+ *
+ * GSER Lane RX Margining Debug Result Register
+ * Observes the results of an mbus_messaging transaction. The results are expected to be
+ * valid only when the Done bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_margin_dbg_obs
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_margin_dbg_obs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t done : 1; /**< [ 63: 63](RO/H) Done bit indicating that the outstanding transaction on the mbus
+ has finished and if there are results that are expected, they will
+ be presented to this register. The results are not sticky, so a copy
+ needs to be moved out of this register to another location before
+ de-asserting the READY bit in GSERN()_LANE()_RX_MARGIN_DBG_CNT.
+ De-assertign the READY bit will force this bit low again and remove
+ the data being presented to this CSR inputs. */
+ uint64_t reserved_20_62 : 43;
+ uint64_t address : 12; /**< [ 19: 8](RO/H) Observed Address a read was completed against or location of the write operation being executed. */
+ uint64_t data : 8; /**< [ 7: 0](RO/H) Observed Data read back from the MAC/PEM at the completion of the read operation */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 8; /**< [ 7: 0](RO/H) Observed Data read back from the MAC/PEM at the completion of the read operation */
+ uint64_t address : 12; /**< [ 19: 8](RO/H) Observed Address a read was completed against or location of the write operation being executed. */
+ uint64_t reserved_20_62 : 43;
+ uint64_t done : 1; /**< [ 63: 63](RO/H) Done bit indicating that the outstanding transaction on the mbus
+ has finished and if there are results that are expected, they will
+ be presented to this register. The results are not sticky, so a copy
+ needs to be moved out of this register to another location before
+ de-asserting the READY bit in GSERN()_LANE()_RX_MARGIN_DBG_CNT.
+ De-assertign the READY bit will force this bit low again and remove
+ the data being presented to this CSR inputs. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_margin_dbg_obs_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_margin_dbg_obs bdk_gsernx_lanex_rx_margin_dbg_obs_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001230ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_MARGIN_DBG_OBS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) bdk_gsernx_lanex_rx_margin_dbg_obs_t
+#define bustype_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) "GSERNX_LANEX_RX_MARGIN_DBG_OBS"
+#define device_bar_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_margin_phy_cnt
+ *
+ * GSER Lane RX Margining Overrides of Phy MBUS margining bits Register
+ * Can override existing values generated by the RX Margining FSM. This feature will
+ * allow the RSL interface to provide its own values to the MAC/PEM Phy CSRs for the
+ * mbus interface. This is strictly a debug method for sending the mbus CSRs in the
+ * phy to the MAC/PEM in a predictable method.
+ */
+union bdk_gsernx_lanex_rx_margin_phy_cnt
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_margin_phy_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t override_margining_fsm : 1; /**< [ 63: 63](R/W) The bit that when asserted to 1'b1, will enable the values of this register to
+ replace the values generated by the RX Margining FSM. */
+ uint64_t sample_count_reset : 1; /**< [ 62: 62](R/W) Resets the sample count register for the RX Margining FSM. */
+ uint64_t error_count_reset : 1; /**< [ 61: 61](R/W) Resets the error count register for the RX Margining FSM. */
+ uint64_t margin_voltage_timing : 1; /**< [ 60: 60](R/W) Sets whitch type of margining to perfomr. 1'b0 for timing 1'b1 for voltage */
+ uint64_t start_margining : 1; /**< [ 59: 59](R/W) Enables margining FSM to operate. */
+ uint64_t margin_direction : 1; /**< [ 58: 58](R/W) Sets the direction of the margining.
+ For timing, a 1'b0 steps to the left a 1'b1 steps to the right.
+ For voltage, 1'b0 steps voltage up and 1'b1 steps voltage down. */
+ uint64_t margin_offset : 7; /**< [ 57: 51](R/W) Margining offset for the sample point. */
+ uint64_t reserved_48_50 : 3;
+ uint64_t sample_count_ovr : 40; /**< [ 47: 8](R/W) Margining sample count size. Default is 1K samples, but can be updated to any
+ value with in the 40-bit length. */
+ uint64_t elastic_buffer_depth : 8; /**< [ 7: 0](R/W) Sets the margining buffer depth. Feature is not used */
+#else /* Word 0 - Little Endian */
+ uint64_t elastic_buffer_depth : 8; /**< [ 7: 0](R/W) Sets the margining buffer depth. Feature is not used */
+ uint64_t sample_count_ovr : 40; /**< [ 47: 8](R/W) Margining sample count size. Default is 1K samples, but can be updated to any
+ value with in the 40-bit length. */
+ uint64_t reserved_48_50 : 3;
+ uint64_t margin_offset : 7; /**< [ 57: 51](R/W) Margining offset for the sample point. */
+ uint64_t margin_direction : 1; /**< [ 58: 58](R/W) Sets the direction of the margining.
+ For timing, a 1'b0 steps to the left a 1'b1 steps to the right.
+ For voltage, 1'b0 steps voltage up and 1'b1 steps voltage down. */
+ uint64_t start_margining : 1; /**< [ 59: 59](R/W) Enables margining FSM to operate. */
+ uint64_t margin_voltage_timing : 1; /**< [ 60: 60](R/W) Sets whitch type of margining to perfomr. 1'b0 for timing 1'b1 for voltage */
+ uint64_t error_count_reset : 1; /**< [ 61: 61](R/W) Resets the error count register for the RX Margining FSM. */
+ uint64_t sample_count_reset : 1; /**< [ 62: 62](R/W) Resets the sample count register for the RX Margining FSM. */
+ uint64_t override_margining_fsm : 1; /**< [ 63: 63](R/W) The bit that when asserted to 1'b1, will enable the values of this register to
+ replace the values generated by the RX Margining FSM. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_margin_phy_cnt_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_margin_phy_cnt bdk_gsernx_lanex_rx_margin_phy_cnt_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001330ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_MARGIN_PHY_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) bdk_gsernx_lanex_rx_margin_phy_cnt_t
+#define bustype_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) "GSERNX_LANEX_RX_MARGIN_PHY_CNT"
+#define device_bar_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_margin_phy_obs
+ *
+ * GSER Lane RX Margining Observe of Phy MBUS margining bits Register
+ * Observes the status of phy mbus CSRs. The results are expected to be changed by the
+ * margining FSM. This is strictly an observe path to the mbus CSRs in the phy.
+ */
+union bdk_gsernx_lanex_rx_margin_phy_obs
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_margin_phy_obs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t margin_nak : 1; /**< [ 63: 63](RO/H) Asserted when the margining setup is out of range for the margining hardware to
+ perform. */
+ uint64_t margin_status : 1; /**< [ 62: 62](RO/H) Indicates the status of the margining FSM. If asserted, then there is an open
+ Reciever Margining transaction being executed. */
+ uint64_t elastic_buffer_status : 1; /**< [ 61: 61](RO/H) Indicates the status of the elastic buffer. This feature is not supported and
+ will always return 0. */
+ uint64_t reserved_15_60 : 46;
+ uint64_t sample_count : 7; /**< [ 14: 8](RO/H) Observed Address a read was completed against or location of the write operation being executed. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t error_count : 6; /**< [ 5: 0](RO/H) Observed Data read back from the MAC/PEM at the completion of the read operation */
+#else /* Word 0 - Little Endian */
+ uint64_t error_count : 6; /**< [ 5: 0](RO/H) Observed Data read back from the MAC/PEM at the completion of the read operation */
+ uint64_t reserved_6_7 : 2;
+ uint64_t sample_count : 7; /**< [ 14: 8](RO/H) Observed Address a read was completed against or location of the write operation being executed. */
+ uint64_t reserved_15_60 : 46;
+ uint64_t elastic_buffer_status : 1; /**< [ 61: 61](RO/H) Indicates the status of the elastic buffer. This feature is not supported and
+ will always return 0. */
+ uint64_t margin_status : 1; /**< [ 62: 62](RO/H) Indicates the status of the margining FSM. If asserted, then there is an open
+ Reciever Margining transaction being executed. */
+ uint64_t margin_nak : 1; /**< [ 63: 63](RO/H) Asserted when the margining setup is out of range for the margining hardware to
+ perform. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_margin_phy_obs_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_margin_phy_obs bdk_gsernx_lanex_rx_margin_phy_obs_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001430ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_MARGIN_PHY_OBS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) bdk_gsernx_lanex_rx_margin_phy_obs_t
+#define bustype_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) "GSERNX_LANEX_RX_MARGIN_PHY_OBS"
+#define device_bar_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_1_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 1 Register
+ * Register controls for offset overrides from os0_0 through os3_1. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_os_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t os3_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS3_1_OVRD]. */
+ uint64_t reserved_62 : 1;
+ uint64_t os3_1_ovrd : 6; /**< [ 61: 56](R/W) os3_1 offset compensation override bits. */
+ uint64_t os3_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS3_0_OVRD]. */
+ uint64_t reserved_54 : 1;
+ uint64_t os3_0_ovrd : 6; /**< [ 53: 48](R/W) os3_0 offset compensation override bits. */
+ uint64_t os2_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS2_1_OVRD]. */
+ uint64_t reserved_46 : 1;
+ uint64_t os2_1_ovrd : 6; /**< [ 45: 40](R/W) os2_1 offset compensation override bits. */
+ uint64_t os2_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS2_0_OVRD]. */
+ uint64_t reserved_38 : 1;
+ uint64_t os2_0_ovrd : 6; /**< [ 37: 32](R/W) os2_0 offset compensation override bits. */
+ uint64_t os1_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS1_1_OVRD]. */
+ uint64_t reserved_30 : 1;
+ uint64_t os1_1_ovrd : 6; /**< [ 29: 24](R/W) os1_1 offset compensation override bits. */
+ uint64_t os1_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS1_0_OVRD]. */
+ uint64_t reserved_22 : 1;
+ uint64_t os1_0_ovrd : 6; /**< [ 21: 16](R/W) os1_0 offset compensation override bits. */
+ uint64_t os0_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS0_1_OVRD]. */
+ uint64_t reserved_14 : 1;
+ uint64_t os0_1_ovrd : 6; /**< [ 13: 8](R/W) os0_1 offset compensation override bits. */
+ uint64_t os0_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS0_0_OVRD]. */
+ uint64_t reserved_6 : 1;
+ uint64_t os0_0_ovrd : 6; /**< [ 5: 0](R/W) os0_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os0_0_ovrd : 6; /**< [ 5: 0](R/W) os0_0 offset compensation override bits. */
+ uint64_t reserved_6 : 1;
+ uint64_t os0_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS0_0_OVRD]. */
+ uint64_t os0_1_ovrd : 6; /**< [ 13: 8](R/W) os0_1 offset compensation override bits. */
+ uint64_t reserved_14 : 1;
+ uint64_t os0_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS0_1_OVRD]. */
+ uint64_t os1_0_ovrd : 6; /**< [ 21: 16](R/W) os1_0 offset compensation override bits. */
+ uint64_t reserved_22 : 1;
+ uint64_t os1_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS1_0_OVRD]. */
+ uint64_t os1_1_ovrd : 6; /**< [ 29: 24](R/W) os1_1 offset compensation override bits. */
+ uint64_t reserved_30 : 1;
+ uint64_t os1_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS1_1_OVRD]. */
+ uint64_t os2_0_ovrd : 6; /**< [ 37: 32](R/W) os2_0 offset compensation override bits. */
+ uint64_t reserved_38 : 1;
+ uint64_t os2_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS2_0_OVRD]. */
+ uint64_t os2_1_ovrd : 6; /**< [ 45: 40](R/W) os2_1 offset compensation override bits. */
+ uint64_t reserved_46 : 1;
+ uint64_t os2_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS2_1_OVRD]. */
+ uint64_t os3_0_ovrd : 6; /**< [ 53: 48](R/W) os3_0 offset compensation override bits. */
+ uint64_t reserved_54 : 1;
+ uint64_t os3_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS3_0_OVRD]. */
+ uint64_t os3_1_ovrd : 6; /**< [ 61: 56](R/W) os3_1 offset compensation override bits. */
+ uint64_t reserved_62 : 1;
+ uint64_t os3_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS3_1_OVRD]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_1_bcfg bdk_gsernx_lanex_rx_os_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001800ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) bdk_gsernx_lanex_rx_os_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) "GSERNX_LANEX_RX_OS_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_1_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 1 Register
+ * Status for offset settings actually in use (either calibration results
+ * or overrides) from os0_0 through os3_1. Results in all fields of this
+ * register are valid only if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] and
+ * GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] are asserted or if the corresponding
+ * override enable bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_os_1_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_1_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t os3_1 : 6; /**< [ 61: 56](RO/H) os3_1 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os3_0 : 6; /**< [ 53: 48](RO/H) os3_0 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os2_1 : 6; /**< [ 45: 40](RO/H) os2_1 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os2_0 : 6; /**< [ 37: 32](RO/H) os2_0 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os1_1 : 6; /**< [ 29: 24](RO/H) os1_1 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os1_0 : 6; /**< [ 21: 16](RO/H) os1_0 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os0_1 : 6; /**< [ 13: 8](RO/H) os0_1 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os0_0 : 6; /**< [ 5: 0](RO/H) os0_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os0_0 : 6; /**< [ 5: 0](RO/H) os0_0 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os0_1 : 6; /**< [ 13: 8](RO/H) os0_1 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os1_0 : 6; /**< [ 21: 16](RO/H) os1_0 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os1_1 : 6; /**< [ 29: 24](RO/H) os1_1 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os2_0 : 6; /**< [ 37: 32](RO/H) os2_0 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os2_1 : 6; /**< [ 45: 40](RO/H) os2_1 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os3_0 : 6; /**< [ 53: 48](RO/H) os3_0 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os3_1 : 6; /**< [ 61: 56](RO/H) os3_1 offset compensation override bits. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_1_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_1_bsts bdk_gsernx_lanex_rx_os_1_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_1_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_1_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001940ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_1_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) bdk_gsernx_lanex_rx_os_1_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) "GSERNX_LANEX_RX_OS_1_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_2_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 2 Register
+ * Register controls for offset overrides from os4_0 through os7_1. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_os_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t os7_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS7_1_OVRD]. */
+ uint64_t reserved_62 : 1;
+ uint64_t os7_1_ovrd : 6; /**< [ 61: 56](R/W) os7_1 offset compensation override bits. */
+ uint64_t os7_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS7_0_OVRD]. */
+ uint64_t reserved_54 : 1;
+ uint64_t os7_0_ovrd : 6; /**< [ 53: 48](R/W) os7_0 offset compensation override bits. */
+ uint64_t os6_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS6_1_OVRD]. */
+ uint64_t reserved_46 : 1;
+ uint64_t os6_1_ovrd : 6; /**< [ 45: 40](R/W) os6_1 offset compensation override bits. */
+ uint64_t os6_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS6_0_OVRD]. */
+ uint64_t reserved_38 : 1;
+ uint64_t os6_0_ovrd : 6; /**< [ 37: 32](R/W) os6_0 offset compensation override bits. */
+ uint64_t os5_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS5_1_OVRD]. */
+ uint64_t reserved_30 : 1;
+ uint64_t os5_1_ovrd : 6; /**< [ 29: 24](R/W) os5_1 offset compensation override bits. */
+ uint64_t os5_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS5_0_OVRD]. */
+ uint64_t reserved_22 : 1;
+ uint64_t os5_0_ovrd : 6; /**< [ 21: 16](R/W) os5_0 offset compensation override bits. */
+ uint64_t os4_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS4_1_OVRD]. */
+ uint64_t reserved_14 : 1;
+ uint64_t os4_1_ovrd : 6; /**< [ 13: 8](R/W) os4_1 offset compensation override bits. */
+ uint64_t os4_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS4_0_OVRD]. */
+ uint64_t reserved_6 : 1;
+ uint64_t os4_0_ovrd : 6; /**< [ 5: 0](R/W) os4_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os4_0_ovrd : 6; /**< [ 5: 0](R/W) os4_0 offset compensation override bits. */
+ uint64_t reserved_6 : 1;
+ uint64_t os4_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS4_0_OVRD]. */
+ uint64_t os4_1_ovrd : 6; /**< [ 13: 8](R/W) os4_1 offset compensation override bits. */
+ uint64_t reserved_14 : 1;
+ uint64_t os4_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS4_1_OVRD]. */
+ uint64_t os5_0_ovrd : 6; /**< [ 21: 16](R/W) os5_0 offset compensation override bits. */
+ uint64_t reserved_22 : 1;
+ uint64_t os5_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS5_0_OVRD]. */
+ uint64_t os5_1_ovrd : 6; /**< [ 29: 24](R/W) os5_1 offset compensation override bits. */
+ uint64_t reserved_30 : 1;
+ uint64_t os5_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS5_1_OVRD]. */
+ uint64_t os6_0_ovrd : 6; /**< [ 37: 32](R/W) os6_0 offset compensation override bits. */
+ uint64_t reserved_38 : 1;
+ uint64_t os6_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS6_0_OVRD]. */
+ uint64_t os6_1_ovrd : 6; /**< [ 45: 40](R/W) os6_1 offset compensation override bits. */
+ uint64_t reserved_46 : 1;
+ uint64_t os6_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS6_1_OVRD]. */
+ uint64_t os7_0_ovrd : 6; /**< [ 53: 48](R/W) os7_0 offset compensation override bits. */
+ uint64_t reserved_54 : 1;
+ uint64_t os7_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS7_0_OVRD]. */
+ uint64_t os7_1_ovrd : 6; /**< [ 61: 56](R/W) os7_1 offset compensation override bits. */
+ uint64_t reserved_62 : 1;
+ uint64_t os7_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS7_1_OVRD]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_2_bcfg bdk_gsernx_lanex_rx_os_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001810ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) bdk_gsernx_lanex_rx_os_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) "GSERNX_LANEX_RX_OS_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_2_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 2 Register
+ * Status for offset settings actually in use (either calibration results
+ * or overrides) from os4_0 through os7_1. Results in all fields of this
+ * register are valid only if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] and
+ * GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] are asserted or if the corresponding
+ * override enable bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_os_2_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_2_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t os7_1 : 6; /**< [ 61: 56](RO/H) os7_1 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os7_0 : 6; /**< [ 53: 48](RO/H) os7_0 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os6_1 : 6; /**< [ 45: 40](RO/H) os6_1 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os6_0 : 6; /**< [ 37: 32](RO/H) os6_0 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os5_1 : 6; /**< [ 29: 24](RO/H) os5_1 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os5_0 : 6; /**< [ 21: 16](RO/H) os5_0 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os4_1 : 6; /**< [ 13: 8](RO/H) os4_1 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os4_0 : 6; /**< [ 5: 0](RO/H) os4_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os4_0 : 6; /**< [ 5: 0](RO/H) os4_0 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os4_1 : 6; /**< [ 13: 8](RO/H) os4_1 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os5_0 : 6; /**< [ 21: 16](RO/H) os5_0 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os5_1 : 6; /**< [ 29: 24](RO/H) os5_1 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os6_0 : 6; /**< [ 37: 32](RO/H) os6_0 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os6_1 : 6; /**< [ 45: 40](RO/H) os6_1 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os7_0 : 6; /**< [ 53: 48](RO/H) os7_0 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os7_1 : 6; /**< [ 61: 56](RO/H) os7_1 offset compensation override bits. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_2_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_2_bsts bdk_gsernx_lanex_rx_os_2_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_2_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_2_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001950ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_2_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) bdk_gsernx_lanex_rx_os_2_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) "GSERNX_LANEX_RX_OS_2_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_3_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 3 Register
+ * Register controls for offset overrides from os8_0 through os11_1. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_os_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t os11_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS11_1_OVRD]. */
+ uint64_t reserved_62 : 1;
+ uint64_t os11_1_ovrd : 6; /**< [ 61: 56](R/W) os11_1 offset compensation override bits. */
+ uint64_t os11_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS11_0_OVRD]. */
+ uint64_t reserved_54 : 1;
+ uint64_t os11_0_ovrd : 6; /**< [ 53: 48](R/W) os11_0 offset compensation override bits. */
+ uint64_t os10_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS10_1_OVRD]. */
+ uint64_t reserved_46 : 1;
+ uint64_t os10_1_ovrd : 6; /**< [ 45: 40](R/W) os10_1 offset compensation override bits. */
+ uint64_t os10_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS10_0_OVRD]. */
+ uint64_t reserved_38 : 1;
+ uint64_t os10_0_ovrd : 6; /**< [ 37: 32](R/W) os10_0 offset compensation override bits. */
+ uint64_t os9_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS9_1_OVRD]. */
+ uint64_t reserved_30 : 1;
+ uint64_t os9_1_ovrd : 6; /**< [ 29: 24](R/W) os9_1 offset compensation override bits. */
+ uint64_t os9_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS9_0_OVRD]. */
+ uint64_t reserved_22 : 1;
+ uint64_t os9_0_ovrd : 6; /**< [ 21: 16](R/W) os9_0 offset compensation override bits. */
+ uint64_t os8_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS8_1_OVRD]. */
+ uint64_t reserved_14 : 1;
+ uint64_t os8_1_ovrd : 6; /**< [ 13: 8](R/W) os8_1 offset compensation override bits. */
+ uint64_t os8_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS8_0_OVRD]. */
+ uint64_t reserved_6 : 1;
+ uint64_t os8_0_ovrd : 6; /**< [ 5: 0](R/W) os8_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os8_0_ovrd : 6; /**< [ 5: 0](R/W) os8_0 offset compensation override bits. */
+ uint64_t reserved_6 : 1;
+ uint64_t os8_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS8_0_OVRD]. */
+ uint64_t os8_1_ovrd : 6; /**< [ 13: 8](R/W) os8_1 offset compensation override bits. */
+ uint64_t reserved_14 : 1;
+ uint64_t os8_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS8_1_OVRD]. */
+ uint64_t os9_0_ovrd : 6; /**< [ 21: 16](R/W) os9_0 offset compensation override bits. */
+ uint64_t reserved_22 : 1;
+ uint64_t os9_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS9_0_OVRD]. */
+ uint64_t os9_1_ovrd : 6; /**< [ 29: 24](R/W) os9_1 offset compensation override bits. */
+ uint64_t reserved_30 : 1;
+ uint64_t os9_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS9_1_OVRD]. */
+ uint64_t os10_0_ovrd : 6; /**< [ 37: 32](R/W) os10_0 offset compensation override bits. */
+ uint64_t reserved_38 : 1;
+ uint64_t os10_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS10_0_OVRD]. */
+ uint64_t os10_1_ovrd : 6; /**< [ 45: 40](R/W) os10_1 offset compensation override bits. */
+ uint64_t reserved_46 : 1;
+ uint64_t os10_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS10_1_OVRD]. */
+ uint64_t os11_0_ovrd : 6; /**< [ 53: 48](R/W) os11_0 offset compensation override bits. */
+ uint64_t reserved_54 : 1;
+ uint64_t os11_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS11_0_OVRD]. */
+ uint64_t os11_1_ovrd : 6; /**< [ 61: 56](R/W) os11_1 offset compensation override bits. */
+ uint64_t reserved_62 : 1;
+ uint64_t os11_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS11_1_OVRD]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_3_bcfg bdk_gsernx_lanex_rx_os_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001820ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) bdk_gsernx_lanex_rx_os_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) "GSERNX_LANEX_RX_OS_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_3_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 3 Register
+ * Status for offset settings actually in use (either calibration results
+ * or overrides) from os8_0 through os11_1. Results in all fields of this
+ * register are valid only if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] and
+ * GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] are asserted or if the corresponding
+ * override enable bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_os_3_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_3_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t os11_1 : 6; /**< [ 61: 56](RO/H) os11_1 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os11_0 : 6; /**< [ 53: 48](RO/H) os11_0 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os10_1 : 6; /**< [ 45: 40](RO/H) os10_1 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os10_0 : 6; /**< [ 37: 32](RO/H) os10_0 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os9_1 : 6; /**< [ 29: 24](RO/H) os9_1 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os9_0 : 6; /**< [ 21: 16](RO/H) os9_0 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os8_1 : 6; /**< [ 13: 8](RO/H) os8_1 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os8_0 : 6; /**< [ 5: 0](RO/H) os8_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os8_0 : 6; /**< [ 5: 0](RO/H) os8_0 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os8_1 : 6; /**< [ 13: 8](RO/H) os8_1 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os9_0 : 6; /**< [ 21: 16](RO/H) os9_0 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os9_1 : 6; /**< [ 29: 24](RO/H) os9_1 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os10_0 : 6; /**< [ 37: 32](RO/H) os10_0 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os10_1 : 6; /**< [ 45: 40](RO/H) os10_1 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os11_0 : 6; /**< [ 53: 48](RO/H) os11_0 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os11_1 : 6; /**< [ 61: 56](RO/H) os11_1 offset compensation override bits. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_3_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_3_bsts bdk_gsernx_lanex_rx_os_3_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_3_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_3_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001960ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_3_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) bdk_gsernx_lanex_rx_os_3_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) "GSERNX_LANEX_RX_OS_3_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_4_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 4 Register
+ * Register controls for offset overrides from os12_0 through os15_1. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_os_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t os15_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS15_1_OVRD]. */
+ uint64_t reserved_62 : 1;
+ uint64_t os15_1_ovrd : 6; /**< [ 61: 56](R/W) os15_1 offset compensation override bits. */
+ uint64_t os15_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS15_0_OVRD]. */
+ uint64_t reserved_54 : 1;
+ uint64_t os15_0_ovrd : 6; /**< [ 53: 48](R/W) os15_0 offset compensation override bits. */
+ uint64_t os14_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS14_1_OVRD]. */
+ uint64_t reserved_46 : 1;
+ uint64_t os14_1_ovrd : 6; /**< [ 45: 40](R/W) os10_1 offset compensation override bits. */
+ uint64_t os14_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS14_0_OVRD]. */
+ uint64_t reserved_38 : 1;
+ uint64_t os14_0_ovrd : 6; /**< [ 37: 32](R/W) os14_0 offset compensation override bits. */
+ uint64_t os13_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS13_1_OVRD]. */
+ uint64_t reserved_30 : 1;
+ uint64_t os13_1_ovrd : 6; /**< [ 29: 24](R/W) os13_1 offset compensation override bits. */
+ uint64_t os13_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS13_0_OVRD]. */
+ uint64_t reserved_22 : 1;
+ uint64_t os13_0_ovrd : 6; /**< [ 21: 16](R/W) os13_0 offset compensation override bits. */
+ uint64_t os12_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS12_1_OVRD]. */
+ uint64_t reserved_14 : 1;
+ uint64_t os12_1_ovrd : 6; /**< [ 13: 8](R/W) os12_1 offset compensation override bits. */
+ uint64_t os12_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS12_0_OVRD]. */
+ uint64_t reserved_6 : 1;
+ uint64_t os12_0_ovrd : 6; /**< [ 5: 0](R/W) os12_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os12_0_ovrd : 6; /**< [ 5: 0](R/W) os12_0 offset compensation override bits. */
+ uint64_t reserved_6 : 1;
+ uint64_t os12_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS12_0_OVRD]. */
+ uint64_t os12_1_ovrd : 6; /**< [ 13: 8](R/W) os12_1 offset compensation override bits. */
+ uint64_t reserved_14 : 1;
+ uint64_t os12_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS12_1_OVRD]. */
+ uint64_t os13_0_ovrd : 6; /**< [ 21: 16](R/W) os13_0 offset compensation override bits. */
+ uint64_t reserved_22 : 1;
+ uint64_t os13_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS13_0_OVRD]. */
+ uint64_t os13_1_ovrd : 6; /**< [ 29: 24](R/W) os13_1 offset compensation override bits. */
+ uint64_t reserved_30 : 1;
+ uint64_t os13_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS13_1_OVRD]. */
+ uint64_t os14_0_ovrd : 6; /**< [ 37: 32](R/W) os14_0 offset compensation override bits. */
+ uint64_t reserved_38 : 1;
+ uint64_t os14_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS14_0_OVRD]. */
+ uint64_t os14_1_ovrd : 6; /**< [ 45: 40](R/W) os10_1 offset compensation override bits. */
+ uint64_t reserved_46 : 1;
+ uint64_t os14_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS14_1_OVRD]. */
+ uint64_t os15_0_ovrd : 6; /**< [ 53: 48](R/W) os15_0 offset compensation override bits. */
+ uint64_t reserved_54 : 1;
+ uint64_t os15_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS15_0_OVRD]. */
+ uint64_t os15_1_ovrd : 6; /**< [ 61: 56](R/W) os15_1 offset compensation override bits. */
+ uint64_t reserved_62 : 1;
+ uint64_t os15_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS15_1_OVRD]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_4_bcfg bdk_gsernx_lanex_rx_os_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001830ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) bdk_gsernx_lanex_rx_os_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) "GSERNX_LANEX_RX_OS_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_4_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 4 Register
+ * Status for offset settings actually in use (either calibration results
+ * or overrides) from os12_0 through os15_1. Results in all fields of this
+ * register are valid only if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] and
+ * GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] are asserted or if the corresponding
+ * override enable bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_os_4_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_4_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t os15_1 : 6; /**< [ 61: 56](RO/H) os15_1 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os15_0 : 6; /**< [ 53: 48](RO/H) os15_0 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os14_1 : 6; /**< [ 45: 40](RO/H) os10_1 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os14_0 : 6; /**< [ 37: 32](RO/H) os14_0 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os13_1 : 6; /**< [ 29: 24](RO/H) os13_1 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os13_0 : 6; /**< [ 21: 16](RO/H) os13_0 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os12_1 : 6; /**< [ 13: 8](RO/H) os12_1 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os12_0 : 6; /**< [ 5: 0](RO/H) os12_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os12_0 : 6; /**< [ 5: 0](RO/H) os12_0 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os12_1 : 6; /**< [ 13: 8](RO/H) os12_1 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os13_0 : 6; /**< [ 21: 16](RO/H) os13_0 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os13_1 : 6; /**< [ 29: 24](RO/H) os13_1 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os14_0 : 6; /**< [ 37: 32](RO/H) os14_0 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os14_1 : 6; /**< [ 45: 40](RO/H) os10_1 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os15_0 : 6; /**< [ 53: 48](RO/H) os15_0 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os15_1 : 6; /**< [ 61: 56](RO/H) os15_1 offset compensation override bits. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_4_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_4_bsts bdk_gsernx_lanex_rx_os_4_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_4_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_4_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001970ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_4_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) bdk_gsernx_lanex_rx_os_4_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) "GSERNX_LANEX_RX_OS_4_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_5_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 5 Register
+ * This register controls for triggering RX offset compensation state machines.
+ */
+union bdk_gsernx_lanex_rx_os_5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t run_eye_oscal : 1; /**< [ 54: 54](R/W) Enables eye (doute) DFE offset compensation to run at the correct
+ point in the hardware-driven reset sequence if asserted when the eye data path
+ bringup sequence begins. If deasserted when the eye data path bringup sequence
+ is run, this bit may be asserted later under software control prior to
+ performing eye measurements. */
+ uint64_t reserved_53 : 1;
+ uint64_t c1_e_adjust : 5; /**< [ 52: 48](R/W) Adjust value magnitude for the error slice in the E path. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t c1_i_adjust : 5; /**< [ 44: 40](R/W) Adjust value magnitude for the error slice in the I path. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t c1_q_adjust : 5; /**< [ 36: 32](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t offset_comp_en : 1; /**< [ 31: 31](R/W) Enable AFE and DFE offset compensation to run at the
+ correct point in the hardware-driven reset sequence if asserted when
+ the reset sequence begins. If deasserted when the hardware-driven
+ reset sequence is run, this bit should be asserted later, once,
+ under software control to initiate AFE and DFE offset compensation
+ in a pure software-driven bringup. This bit field affects both AFE
+ and DFE offset compensation training. */
+ uint64_t binsrch_margin : 3; /**< [ 30: 28](R/W) Binary Search Noise Margin. This value is added to the binary search difference
+ count value. This bit field affects the binary search engine for IR TRIM.
+ 0x0 = 13'h000
+ 0x1 = 13'h020
+ 0x2 = 13'h040
+ 0x3 = 13'h080
+ 0x4 = 13'h100
+ 0x5 = 13'h200
+ 0x6 = 13'h400
+ 0x7 = 13'h800 (use with caution, may cause difference count overflow) */
+ uint64_t binsrch_wait : 10; /**< [ 27: 18](R/W) Number of clock cycles to wait after changing the offset code.
+ It is used to allow adjustments in wait time due to changes in the service clock
+ frequency.
+ This bit field affects the binary seach engines for DFE/AFE offset and IR TRIM. */
+ uint64_t binsrch_acclen : 2; /**< [ 17: 16](R/W) Number of words to include in the binary search accumulation. This bit field
+ affects the binary seach engines for DFE/AFE offset and IR TRIM.
+ 0x0 = 16 words.
+ 0x1 = 32 words.
+ 0x2 = 64 words.
+ 0x3 = 128 words. */
+ uint64_t settle_wait : 4; /**< [ 15: 12](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t ir_trim_early_iter_max : 5; /**< [ 9: 5](R/W) Early IR TRIM Iteration Count Max. Controls the number of iterations
+ to perform during the Early IR trim. If set to 0, no iterations are done
+ and Early IR TRIM is skipped. Valid range 0 to 31. Note that
+ GSERN()_LANE()_RST_CNT4_BCFG[DFE_AFE_OSCAL_WAIT] must be increased to allow for
+ iterations. */
+ uint64_t ir_trim_comp_en : 1; /**< [ 4: 4](R/W) Enable IR TRIM compensation to run at the correct
+ point in the hardware-driven reset sequence if asserted when the
+ reset sequence begins. This bit field affects only IR trim compensation. */
+ uint64_t ir_trim_trigger : 1; /**< [ 3: 3](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the IR trim compensation FSM to run. Note that this is
+ a debug-only feature. */
+ uint64_t idle_offset_trigger : 1; /**< [ 2: 2](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the IDLE offset compensation training FSM to run. Note
+ that this is a debug-only feature. */
+ uint64_t afe_offset_trigger : 1; /**< [ 1: 1](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the AFE offset compensation training FSM to run. Note
+ that this is a debug-only feature and should not be performed while
+ transferring data on the serial link. Note also that only one of the
+ offset compensation training engines can be run at a time. To
+ trigger both DFE offset compensation and AFE offset compensation,
+ they must be run sequentially with the CSR write to trigger the
+ second in the sequence waiting until the first has completed
+ (indicated in GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] or
+ GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS]). */
+ uint64_t dfe_offset_trigger : 1; /**< [ 0: 0](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the DFE offset compensation training FSM to run. Note
+ that only one of the offset compensation training engines can be run
+ at a time. To trigger both DFE offset compensation and AFE offset
+ compensation, they must be run sequentially with the CSR write to
+ the second in the sequence waiting until the first has completed
+ (indicated in GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] or
+ GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS]). */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_offset_trigger : 1; /**< [ 0: 0](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the DFE offset compensation training FSM to run. Note
+ that only one of the offset compensation training engines can be run
+ at a time. To trigger both DFE offset compensation and AFE offset
+ compensation, they must be run sequentially with the CSR write to
+ the second in the sequence waiting until the first has completed
+ (indicated in GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] or
+ GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS]). */
+ uint64_t afe_offset_trigger : 1; /**< [ 1: 1](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the AFE offset compensation training FSM to run. Note
+ that this is a debug-only feature and should not be performed while
+ transferring data on the serial link. Note also that only one of the
+ offset compensation training engines can be run at a time. To
+ trigger both DFE offset compensation and AFE offset compensation,
+ they must be run sequentially with the CSR write to trigger the
+ second in the sequence waiting until the first has completed
+ (indicated in GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] or
+ GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS]). */
+ uint64_t idle_offset_trigger : 1; /**< [ 2: 2](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the IDLE offset compensation training FSM to run. Note
+ that this is a debug-only feature. */
+ uint64_t ir_trim_trigger : 1; /**< [ 3: 3](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the IR trim compensation FSM to run. Note that this is
+ a debug-only feature. */
+ uint64_t ir_trim_comp_en : 1; /**< [ 4: 4](R/W) Enable IR TRIM compensation to run at the correct
+ point in the hardware-driven reset sequence if asserted when the
+ reset sequence begins. This bit field affects only IR trim compensation. */
+ uint64_t ir_trim_early_iter_max : 5; /**< [ 9: 5](R/W) Early IR TRIM Iteration Count Max. Controls the number of iterations
+ to perform during the Early IR trim. If set to 0, no iterations are done
+ and Early IR TRIM is skipped. Valid range 0 to 31. Note that
+ GSERN()_LANE()_RST_CNT4_BCFG[DFE_AFE_OSCAL_WAIT] must be increased to allow for
+ iterations. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t settle_wait : 4; /**< [ 15: 12](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t binsrch_acclen : 2; /**< [ 17: 16](R/W) Number of words to include in the binary search accumulation. This bit field
+ affects the binary seach engines for DFE/AFE offset and IR TRIM.
+ 0x0 = 16 words.
+ 0x1 = 32 words.
+ 0x2 = 64 words.
+ 0x3 = 128 words. */
+ uint64_t binsrch_wait : 10; /**< [ 27: 18](R/W) Number of clock cycles to wait after changing the offset code.
+ It is used to allow adjustments in wait time due to changes in the service clock
+ frequency.
+ This bit field affects the binary seach engines for DFE/AFE offset and IR TRIM. */
+ uint64_t binsrch_margin : 3; /**< [ 30: 28](R/W) Binary Search Noise Margin. This value is added to the binary search difference
+ count value. This bit field affects the binary search engine for IR TRIM.
+ 0x0 = 13'h000
+ 0x1 = 13'h020
+ 0x2 = 13'h040
+ 0x3 = 13'h080
+ 0x4 = 13'h100
+ 0x5 = 13'h200
+ 0x6 = 13'h400
+ 0x7 = 13'h800 (use with caution, may cause difference count overflow) */
+ uint64_t offset_comp_en : 1; /**< [ 31: 31](R/W) Enable AFE and DFE offset compensation to run at the
+ correct point in the hardware-driven reset sequence if asserted when
+ the reset sequence begins. If deasserted when the hardware-driven
+ reset sequence is run, this bit should be asserted later, once,
+ under software control to initiate AFE and DFE offset compensation
+ in a pure software-driven bringup. This bit field affects both AFE
+ and DFE offset compensation training. */
+ uint64_t c1_q_adjust : 5; /**< [ 36: 32](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t c1_i_adjust : 5; /**< [ 44: 40](R/W) Adjust value magnitude for the error slice in the I path. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t c1_e_adjust : 5; /**< [ 52: 48](R/W) Adjust value magnitude for the error slice in the E path. */
+ uint64_t reserved_53 : 1;
+ uint64_t run_eye_oscal : 1; /**< [ 54: 54](R/W) Enables eye (doute) DFE offset compensation to run at the correct
+ point in the hardware-driven reset sequence if asserted when the eye data path
+ bringup sequence begins. If deasserted when the eye data path bringup sequence
+ is run, this bit may be asserted later under software control prior to
+ performing eye measurements. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_5_bcfg bdk_gsernx_lanex_rx_os_5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001840ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) bdk_gsernx_lanex_rx_os_5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) "GSERNX_LANEX_RX_OS_5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_5_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 5 Register
+ * This register controls for triggering RX offset compensation state machines.
+ */
+union bdk_gsernx_lanex_rx_os_5_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_5_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t idle : 1; /**< [ 24: 24](RO/H) For diagnostic use only.
+ Internal:
+ A copy of GSERN()_LANE()_RX_IDLEDET_BSTS[IDLE] for verification convenience. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t idle_offset_valid : 1; /**< [ 17: 17](R/W1C/H) Valid indicator for the DFE Offset calibration values. This bit gets set when
+ DFE offset calibration
+ completes, and may be cleared by software write to 1. */
+ uint64_t dfe_offsets_valid : 1; /**< [ 16: 16](R/W1C/H) Valid indicator for the DFE Offset calibration values. This bit gets set when
+ DFE offset calibration
+ completes, and may be cleared by software write to 1. */
+ uint64_t idle_os : 6; /**< [ 15: 10](RO/H) Value for the IDLE detect offset currently in use. This field may differ from
+ [IDLE_OS_CAL] if idle hysteresis is enabled. This field is only valid when the
+ idle detect offset calibration is not running. */
+ uint64_t idle_os_cal : 6; /**< [ 9: 4](RO/H) Result of IDLE detect offset calibration. This field is only valid when the idle
+ detect offset calibration is not running. */
+ uint64_t ir_trim_status : 1; /**< [ 3: 3](RO/H) When 1, indicates that the IR TRIM compensation FSM has completed operations.
+ Cleared to 0 by hardware when the IR TRIM compensation training FSM is triggered by software
+ or state machines. */
+ uint64_t idle_offset_status : 1; /**< [ 2: 2](RO/H) When 1, indicates that the IDLE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the IDLE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t afe_offset_status : 1; /**< [ 1: 1](RO/H) When 1, indicates that the AFE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the AFE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t dfe_offset_status : 1; /**< [ 0: 0](RO/H) When 1, indicates that the DFE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the DFE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_offset_status : 1; /**< [ 0: 0](RO/H) When 1, indicates that the DFE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the DFE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t afe_offset_status : 1; /**< [ 1: 1](RO/H) When 1, indicates that the AFE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the AFE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t idle_offset_status : 1; /**< [ 2: 2](RO/H) When 1, indicates that the IDLE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the IDLE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t ir_trim_status : 1; /**< [ 3: 3](RO/H) When 1, indicates that the IR TRIM compensation FSM has completed operations.
+ Cleared to 0 by hardware when the IR TRIM compensation training FSM is triggered by software
+ or state machines. */
+ uint64_t idle_os_cal : 6; /**< [ 9: 4](RO/H) Result of IDLE detect offset calibration. This field is only valid when the idle
+ detect offset calibration is not running. */
+ uint64_t idle_os : 6; /**< [ 15: 10](RO/H) Value for the IDLE detect offset currently in use. This field may differ from
+ [IDLE_OS_CAL] if idle hysteresis is enabled. This field is only valid when the
+ idle detect offset calibration is not running. */
+ uint64_t dfe_offsets_valid : 1; /**< [ 16: 16](R/W1C/H) Valid indicator for the DFE Offset calibration values. This bit gets set when
+ DFE offset calibration
+ completes, and may be cleared by software write to 1. */
+ uint64_t idle_offset_valid : 1; /**< [ 17: 17](R/W1C/H) Valid indicator for the DFE Offset calibration values. This bit gets set when
+ DFE offset calibration
+ completes, and may be cleared by software write to 1. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t idle : 1; /**< [ 24: 24](RO/H) For diagnostic use only.
+ Internal:
+ A copy of GSERN()_LANE()_RX_IDLEDET_BSTS[IDLE] for verification convenience. */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_5_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_5_bsts bdk_gsernx_lanex_rx_os_5_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_5_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_5_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001980ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_5_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) bdk_gsernx_lanex_rx_os_5_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) "GSERNX_LANEX_RX_OS_5_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_qac_bcfg
+ *
+ * GSER Lane RX Quadrature Corrector Base Configuration Register
+ * Static controls for the quadrature corrector in the receiver. All fields
+ * must be set prior to exiting reset.
+ */
+union bdk_gsernx_lanex_rx_qac_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_qac_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t cdr_qac_selq : 1; /**< [ 41: 41](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t cdr_qac_sele : 1; /**< [ 40: 40](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t qac_cntset_q : 3; /**< [ 34: 32](R/W) Programmable counter depth for QAC corrector value for the doutq
+ path. The 3-bit encoding represents a integration time with 12-7 bit
+ counter. The counter stops counting until it saturates or reaches
+ 0. If [EN_QAC_Q] is clear, this register is not used. If
+ [EN_QAC_Q] is set, this correction value will be output to the
+ CDR loop. Set this field prior to exiting reset. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t qac_cntset_e : 3; /**< [ 26: 24](R/W) Programmable counter depth for QAC corrector value for the doute
+ path. The 3-bit encoding represents a integration time with 12-7 bit
+ counter. The counter stops counting until it saturates or reaches
+ 0. If [EN_QAC_E] is clear, this register is not used. If
+ [EN_QAC_E] is set, this correction value will be output to the
+ CDR loop. Set this field prior to exiting reset. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t qac_ref_qoffs : 6; /**< [ 21: 16](R/W) Target value for the phase relationship between the i-path (leading)
+ and the q-path (trailing). The range is zero to 180 degrees in 64
+ steps, i.e., 2.8571 degrees per step. Used only when the QAC filter
+ is enabled and selected. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qac_ref_eoffs : 6; /**< [ 13: 8](R/W) Target value for the phase relationship between the i-path (leading)
+ and the e-path (trailing). The range is zero to 180 degrees in 64
+ steps, i.e., 2.8571 degrees per step. Used only when the QAC filter
+ is enabled and selected. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t en_qac_e : 1; /**< [ 1: 1](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t en_qac_q : 1; /**< [ 0: 0](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t en_qac_q : 1; /**< [ 0: 0](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t en_qac_e : 1; /**< [ 1: 1](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t qac_ref_eoffs : 6; /**< [ 13: 8](R/W) Target value for the phase relationship between the i-path (leading)
+ and the e-path (trailing). The range is zero to 180 degrees in 64
+ steps, i.e., 2.8571 degrees per step. Used only when the QAC filter
+ is enabled and selected. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qac_ref_qoffs : 6; /**< [ 21: 16](R/W) Target value for the phase relationship between the i-path (leading)
+ and the q-path (trailing). The range is zero to 180 degrees in 64
+ steps, i.e., 2.8571 degrees per step. Used only when the QAC filter
+ is enabled and selected. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t qac_cntset_e : 3; /**< [ 26: 24](R/W) Programmable counter depth for QAC corrector value for the doute
+ path. The 3-bit encoding represents a integration time with 12-7 bit
+ counter. The counter stops counting until it saturates or reaches
+ 0. If [EN_QAC_E] is clear, this register is not used. If
+ [EN_QAC_E] is set, this correction value will be output to the
+ CDR loop. Set this field prior to exiting reset. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t qac_cntset_q : 3; /**< [ 34: 32](R/W) Programmable counter depth for QAC corrector value for the doutq
+ path. The 3-bit encoding represents a integration time with 12-7 bit
+ counter. The counter stops counting until it saturates or reaches
+ 0. If [EN_QAC_Q] is clear, this register is not used. If
+ [EN_QAC_Q] is set, this correction value will be output to the
+ CDR loop. Set this field prior to exiting reset. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t cdr_qac_sele : 1; /**< [ 40: 40](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t cdr_qac_selq : 1; /**< [ 41: 41](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_qac_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_qac_bcfg bdk_gsernx_lanex_rx_qac_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_QAC_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_QAC_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ee0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_QAC_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) bdk_gsernx_lanex_rx_qac_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) "GSERNX_LANEX_RX_QAC_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_qac_bsts
+ *
+ * GSER Lane RX Quadrature Corrector Base Status Register
+ * Quadrature corrector outputs captured in a CSR register; results should be close to
+ * GSERN()_LANE()_RX_QAC_BCFG[QAC_REF_EOFFS] and
+ * GSERN()_LANE()_RX_QAC_BCFG[QAC_REF_QOFFS] when the QAC is in use and stable.
+ */
+union bdk_gsernx_lanex_rx_qac_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_qac_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_22_63 : 42;
+ uint64_t qac_qoffs : 6; /**< [ 21: 16](RO/H) Quadrature filter control output for the phase relationship between
+ the i-path (leading) and the q-path (trailing). The range is zero
+ to 180 degrees in 64 steps, i.e., 2.8571 degrees per step. Valid only
+ when the QAC filter is enabled and selected. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qac_eoffs : 6; /**< [ 13: 8](RO/H) Quadrature filter control output for the phase relationship between
+ the i-path (leading) and the e-path (trailing). The range is zero
+ to 180 degrees in 64 steps, i.e., 2.8571 degrees per step. Valid only
+ when the QAC filter is enabled and selected. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t qac_eoffs : 6; /**< [ 13: 8](RO/H) Quadrature filter control output for the phase relationship between
+ the i-path (leading) and the e-path (trailing). The range is zero
+ to 180 degrees in 64 steps, i.e., 2.8571 degrees per step. Valid only
+ when the QAC filter is enabled and selected. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qac_qoffs : 6; /**< [ 21: 16](RO/H) Quadrature filter control output for the phase relationship between
+ the i-path (leading) and the q-path (trailing). The range is zero
+ to 180 degrees in 64 steps, i.e., 2.8571 degrees per step. Valid only
+ when the QAC filter is enabled and selected. */
+ uint64_t reserved_22_63 : 42;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_qac_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_qac_bsts bdk_gsernx_lanex_rx_qac_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_QAC_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_QAC_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ef0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_QAC_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) bdk_gsernx_lanex_rx_qac_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) "GSERNX_LANEX_RX_QAC_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_st_bcfg
+ *
+ * GSER Lane RX Static Base Configuration Register
+ * This register controls for static RX settings that do not need FSM overrides.
+ */
+union bdk_gsernx_lanex_rx_st_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_st_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t rxcdrfsmi : 1; /**< [ 48: 48](R/W) Set to provide the RX interpolator with the RX CDR load I
+ clock (rxcdrldi). deassert (low) to provide the interpolator with
+ the RX CDR load Q clock (rxcdrldq). This bit is ignored when
+ txcdrdfsm is asserted (high), which set the RX interpolator
+ and CDR FSM to use the TX clock (txcdrld).
+
+ Internal:
+ (For initial testing, assert rxcdrfsmi, but if we have trouble
+ meeting timing, we can deassert this signal to provide some
+ additional timing margin from the last flops in the RX CDR FSM to
+ the flops interpolator.) */
+ uint64_t reserved_42_47 : 6;
+ uint64_t rx_dcc_iboost : 1; /**< [ 41: 41](R/W) Set to assert the iboost control bit of the
+ receiver duty cycle correcter. Should be programmed as desired before
+ sequencing the receiver reset state machine. Differs
+ from [RX_DCC_LOWF] in the data rate range that it is set at. */
+ uint64_t rx_dcc_lowf : 1; /**< [ 40: 40](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t bstuff : 1; /**< [ 34: 34](R/W) Set to place custom receive pipe in bit-stuffing
+ mode. Only the odd bits in the half-rate DFE outputs are passed to
+ the cdrout* and dout* pipe outputs; the odd bits are duplicated to
+ fill up the expected data path width. */
+ uint64_t rx_idle_lowf : 2; /**< [ 33: 32](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t idle_os_bitlen : 2; /**< [ 31: 30](R/W) Number of bits to accumulate for IDLE detect offset calibration, measured in
+ cycles of the 100 MHz system service clock.
+ 0x0 = 5 cycles.
+ 0x1 = 30 cycles.
+ 0x2 = 60 cycles.
+ 0x3 = 250 cycles. */
+ uint64_t idle_os_ovrd_en : 1; /**< [ 29: 29](R/W) Enable use of [IDLE_OS_OVRD]. */
+ uint64_t refset : 5; /**< [ 28: 24](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as
+ idle.
+ 0x0 = Threshold (refp-refn) is 23 mV.
+ 0x1 = Threshold (refp-refn) is 27.4 mV.
+ 0x2 = Threshold (refp-refn) is 31.8 mV.
+ 0x3 = Threshold (refp-refn) is 36.2 mV.
+ 0x4 = Threshold (refp-refn) is 40.6 mV.
+ 0x5 = Threshold (refp-refn) is 45 mV.
+ 0x6 = Threshold (refp-refn) is 49.4 mV.
+ 0x7 = Threshold (refp-refn) is 53.8 mV.
+ 0x8 = Threshold (refp-refn) is 58.2 mV.
+ 0x9 = Threshold (refp-refn) is 62.6 mV.
+ 0xA = Threshold (refp-refn) is 67 mV.
+ 0xB = Threshold (refp-refn) is 71.4 mV.
+ 0xC = Threshold (refp-refn) is 75.8 mV.
+ 0xD = Threshold (refp-refn) is 80.2 mV.
+ 0xE = Threshold (refp-refn) is 84.6 mV.
+ 0xF = Threshold (refp-refn) is 89 mV.
+ 0x10 = Threshold (refp-refn) is 55 mV.
+ 0x11 = Threshold (refp-refn) is 62.9 mV.
+ 0x12 = Threshold (refp-refn) is 70.8 mV.
+ 0x13 = Threshold (refp-refn) is 78.7 mV.
+ 0x14 = Threshold (refp-refn) is 86.6 mV.
+ 0x15 = Threshold (refp-refn) is 94.5 mV.
+ 0x16 = Threshold (refp-refn) is 102.4 mV.
+ 0x17 = Threshold (refp-refn) is 110.3 mV.
+ 0x18 = Threshold (refp-refn) is 118.2 mV.
+ 0x19 = Threshold (refp-refn) is 126.1 mV.
+ 0x1A = Threshold (refp-refn) is 134 mV.
+ 0x1B = Threshold (refp-refn) is 141.9 mV.
+ 0x1C = Threshold (refp-refn) is 149.8 mV.
+ 0x1D = Threshold (refp-refn) is 157.7 mV.
+ 0x1E = Threshold (refp-refn) is 165.6 mV.
+ 0x1F = Threshold (refp-refn) is 173.5 mV. */
+ uint64_t idle_os_ovrd : 6; /**< [ 23: 18](R/W) Override value for the IDLE detect offset calibration. As with the
+ other offset DACs in the RX, the MSB sets the sign, and the 5 LSBs
+ are binary-encoded magnitudes. */
+ uint64_t en_idle_cal : 1; /**< [ 17: 17](R/W) Set to put the idle detector into calibration mode. */
+ uint64_t rxelecidle : 1; /**< [ 16: 16](R/W) Set to place the CDR finite state machine into a reset state so it does not try
+ to track clock or data and starts from a reset state when the CDR finite state
+ machine begins or resumes operation. deassert (low) to allow the CDR FSM to run. */
+ uint64_t rxcdrhold : 1; /**< [ 15: 15](R/W) Set to place the CDR finite state machine (FSM) into a hold state so it does not
+ try to track clock or data, which would not normally be present during
+ electrical idle. The CDR FSM state is preserved, provided [RXELECIDLE] is not
+ asserted, so the CDR FSM resumes operation with the same settings in effect
+ prior to entering the hold state. deassert (low) to allow the CDR FSM to run. */
+ uint64_t rxcdrramp : 1; /**< [ 14: 14](R/W) For diagnostic use only.
+ Internal:
+ For lab characterization use only. Set to 1 to cause the CDR FSM to ramp the 1st
+ order state by [INC1], independent of voter, & hold the 2nd order state. */
+ uint64_t reserved_13 : 1;
+ uint64_t en_sh_lb : 1; /**< [ 12: 12](R/W) Enable for shallow loopback mode within RX. Used when in shallow loopback
+ mode to mux the CDR receive clock onto the transmit data path clock
+ to ensure that the clock frequencies are matched (to prevent data overrun).
+ This signal should be enabled along with GSERN()_LANE()_PLL_2_BCFG[SHLB_EN] for
+ the PLL. */
+ uint64_t erc : 4; /**< [ 11: 8](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. Set as follows:
+ \<pre\>
+ if (data_period \>= 500ps) erc = 4'h1;
+ else if (data_period \>= 407ps) erc = 4'h2;
+ else if (data_period \>= 333ps) erc = 4'h3;
+ else if (data_period \>= 167ps) erc = 4'h4;
+ else if (data_period \>= 166ps) erc = 4'h5;
+ else if (data_period \>= 100ps) erc = 4'h7;
+ else if (data_period \>= 85ps) erc = 4'h8;
+ else if (data_period \>= 80ps) erc = 4'h9;
+ else if (data_period \>= 62ps) erc = 4'hA;
+ else if (data_period \>= 55ps) erc = 4'hB;
+ else if (data_period \>= 50ps) erc = 4'hC;
+ else if (data_period \>= 45ps) erc = 4'hD;
+ else if (data_period \>= 38ps) erc = 4'hE;
+ else erc = 4'hF;
+ \</pre\> */
+ uint64_t term : 2; /**< [ 7: 6](R/W) Termination voltage control. Setting to 0x1 (VDSSA) is typically appropriate for
+ PCIe channels. For channels without a series board capacitor the typical setting
+ would be 0x0 (floating).
+ 0x0 = Floating.
+ 0x1 = VSSA.
+ 0x2 = VDDA.
+ 0x3 = VSSA. */
+ uint64_t en_rt85 : 1; /**< [ 5: 5](R/W) Enable 85 Ohm termination in the receiver. */
+ uint64_t en_lb : 1; /**< [ 4: 4](R/W) Enable for near-end TX loopback path. */
+ uint64_t en_rterm : 1; /**< [ 3: 3](R/W) For debug use only. Set to one to enable the receiver's termination circuit
+ during bringup. Setting to zero will turn off receiver termination. */
+ uint64_t reserved_0_2 : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_2 : 3;
+ uint64_t en_rterm : 1; /**< [ 3: 3](R/W) For debug use only. Set to one to enable the receiver's termination circuit
+ during bringup. Setting to zero will turn off receiver termination. */
+ uint64_t en_lb : 1; /**< [ 4: 4](R/W) Enable for near-end TX loopback path. */
+ uint64_t en_rt85 : 1; /**< [ 5: 5](R/W) Enable 85 Ohm termination in the receiver. */
+ uint64_t term : 2; /**< [ 7: 6](R/W) Termination voltage control. Setting to 0x1 (VDSSA) is typically appropriate for
+ PCIe channels. For channels without a series board capacitor the typical setting
+ would be 0x0 (floating).
+ 0x0 = Floating.
+ 0x1 = VSSA.
+ 0x2 = VDDA.
+ 0x3 = VSSA. */
+ uint64_t erc : 4; /**< [ 11: 8](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. Set as follows:
+ \<pre\>
+ if (data_period \>= 500ps) erc = 4'h1;
+ else if (data_period \>= 407ps) erc = 4'h2;
+ else if (data_period \>= 333ps) erc = 4'h3;
+ else if (data_period \>= 167ps) erc = 4'h4;
+ else if (data_period \>= 166ps) erc = 4'h5;
+ else if (data_period \>= 100ps) erc = 4'h7;
+ else if (data_period \>= 85ps) erc = 4'h8;
+ else if (data_period \>= 80ps) erc = 4'h9;
+ else if (data_period \>= 62ps) erc = 4'hA;
+ else if (data_period \>= 55ps) erc = 4'hB;
+ else if (data_period \>= 50ps) erc = 4'hC;
+ else if (data_period \>= 45ps) erc = 4'hD;
+ else if (data_period \>= 38ps) erc = 4'hE;
+ else erc = 4'hF;
+ \</pre\> */
+ uint64_t en_sh_lb : 1; /**< [ 12: 12](R/W) Enable for shallow loopback mode within RX. Used when in shallow loopback
+ mode to mux the CDR receive clock onto the transmit data path clock
+ to ensure that the clock frequencies are matched (to prevent data overrun).
+ This signal should be enabled along with GSERN()_LANE()_PLL_2_BCFG[SHLB_EN] for
+ the PLL. */
+ uint64_t reserved_13 : 1;
+ uint64_t rxcdrramp : 1; /**< [ 14: 14](R/W) For diagnostic use only.
+ Internal:
+ For lab characterization use only. Set to 1 to cause the CDR FSM to ramp the 1st
+ order state by [INC1], independent of voter, & hold the 2nd order state. */
+ uint64_t rxcdrhold : 1; /**< [ 15: 15](R/W) Set to place the CDR finite state machine (FSM) into a hold state so it does not
+ try to track clock or data, which would not normally be present during
+ electrical idle. The CDR FSM state is preserved, provided [RXELECIDLE] is not
+ asserted, so the CDR FSM resumes operation with the same settings in effect
+ prior to entering the hold state. deassert (low) to allow the CDR FSM to run. */
+ uint64_t rxelecidle : 1; /**< [ 16: 16](R/W) Set to place the CDR finite state machine into a reset state so it does not try
+ to track clock or data and starts from a reset state when the CDR finite state
+ machine begins or resumes operation. deassert (low) to allow the CDR FSM to run. */
+ uint64_t en_idle_cal : 1; /**< [ 17: 17](R/W) Set to put the idle detector into calibration mode. */
+ uint64_t idle_os_ovrd : 6; /**< [ 23: 18](R/W) Override value for the IDLE detect offset calibration. As with the
+ other offset DACs in the RX, the MSB sets the sign, and the 5 LSBs
+ are binary-encoded magnitudes. */
+ uint64_t refset : 5; /**< [ 28: 24](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as
+ idle.
+ 0x0 = Threshold (refp-refn) is 23 mV.
+ 0x1 = Threshold (refp-refn) is 27.4 mV.
+ 0x2 = Threshold (refp-refn) is 31.8 mV.
+ 0x3 = Threshold (refp-refn) is 36.2 mV.
+ 0x4 = Threshold (refp-refn) is 40.6 mV.
+ 0x5 = Threshold (refp-refn) is 45 mV.
+ 0x6 = Threshold (refp-refn) is 49.4 mV.
+ 0x7 = Threshold (refp-refn) is 53.8 mV.
+ 0x8 = Threshold (refp-refn) is 58.2 mV.
+ 0x9 = Threshold (refp-refn) is 62.6 mV.
+ 0xA = Threshold (refp-refn) is 67 mV.
+ 0xB = Threshold (refp-refn) is 71.4 mV.
+ 0xC = Threshold (refp-refn) is 75.8 mV.
+ 0xD = Threshold (refp-refn) is 80.2 mV.
+ 0xE = Threshold (refp-refn) is 84.6 mV.
+ 0xF = Threshold (refp-refn) is 89 mV.
+ 0x10 = Threshold (refp-refn) is 55 mV.
+ 0x11 = Threshold (refp-refn) is 62.9 mV.
+ 0x12 = Threshold (refp-refn) is 70.8 mV.
+ 0x13 = Threshold (refp-refn) is 78.7 mV.
+ 0x14 = Threshold (refp-refn) is 86.6 mV.
+ 0x15 = Threshold (refp-refn) is 94.5 mV.
+ 0x16 = Threshold (refp-refn) is 102.4 mV.
+ 0x17 = Threshold (refp-refn) is 110.3 mV.
+ 0x18 = Threshold (refp-refn) is 118.2 mV.
+ 0x19 = Threshold (refp-refn) is 126.1 mV.
+ 0x1A = Threshold (refp-refn) is 134 mV.
+ 0x1B = Threshold (refp-refn) is 141.9 mV.
+ 0x1C = Threshold (refp-refn) is 149.8 mV.
+ 0x1D = Threshold (refp-refn) is 157.7 mV.
+ 0x1E = Threshold (refp-refn) is 165.6 mV.
+ 0x1F = Threshold (refp-refn) is 173.5 mV. */
+ uint64_t idle_os_ovrd_en : 1; /**< [ 29: 29](R/W) Enable use of [IDLE_OS_OVRD]. */
+ uint64_t idle_os_bitlen : 2; /**< [ 31: 30](R/W) Number of bits to accumulate for IDLE detect offset calibration, measured in
+ cycles of the 100 MHz system service clock.
+ 0x0 = 5 cycles.
+ 0x1 = 30 cycles.
+ 0x2 = 60 cycles.
+ 0x3 = 250 cycles. */
+ uint64_t rx_idle_lowf : 2; /**< [ 33: 32](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t bstuff : 1; /**< [ 34: 34](R/W) Set to place custom receive pipe in bit-stuffing
+ mode. Only the odd bits in the half-rate DFE outputs are passed to
+ the cdrout* and dout* pipe outputs; the odd bits are duplicated to
+ fill up the expected data path width. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t rx_dcc_lowf : 1; /**< [ 40: 40](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t rx_dcc_iboost : 1; /**< [ 41: 41](R/W) Set to assert the iboost control bit of the
+ receiver duty cycle correcter. Should be programmed as desired before
+ sequencing the receiver reset state machine. Differs
+ from [RX_DCC_LOWF] in the data rate range that it is set at. */
+ uint64_t reserved_42_47 : 6;
+ uint64_t rxcdrfsmi : 1; /**< [ 48: 48](R/W) Set to provide the RX interpolator with the RX CDR load I
+ clock (rxcdrldi). deassert (low) to provide the interpolator with
+ the RX CDR load Q clock (rxcdrldq). This bit is ignored when
+ txcdrdfsm is asserted (high), which set the RX interpolator
+ and CDR FSM to use the TX clock (txcdrld).
+
+ Internal:
+ (For initial testing, assert rxcdrfsmi, but if we have trouble
+ meeting timing, we can deassert this signal to provide some
+ additional timing margin from the last flops in the RX CDR FSM to
+ the flops interpolator.) */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_st_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_st_bcfg bdk_gsernx_lanex_rx_st_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ST_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ST_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ff0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ST_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) bdk_gsernx_lanex_rx_st_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) "GSERNX_LANEX_RX_ST_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_phy2_bcfg
+ *
+ * GSER Lane SATA Control 2 Register
+ * Control settings for SATA PHY functionality.
+ */
+union bdk_gsernx_lanex_sata_phy2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_phy2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_align_count : 16; /**< [ 63: 48](R/W) Count in service clock cycles representing the duration of ALIGNp primitives
+ received at each speed from the far end Device during the rate negotiation
+ process.
+ Reset value is set to yield a 54.61ns duration. */
+ uint64_t reserved_43_47 : 5;
+ uint64_t cdr_lock_wait : 11; /**< [ 42: 32](R/W) Maximum wait count in service clock cycles required after detecting a received
+ signal or after completing a Receiver reset before the SATA aligner begins to
+ scan for 8B10B symbol alignment.
+ Reset value is set to 5us based on analysis of worst case SSC scenarios. */
+ uint64_t do_afeos_final : 4; /**< [ 31: 28](R/W) Set to one to allow AFEOS adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_AFEOS_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlelte_final : 4; /**< [ 27: 24](R/W) Set to one to allow CTLELTE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLELTE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlez_final : 4; /**< [ 23: 20](R/W) Set to one to allow CTLEZ adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLEZ_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctle_final : 4; /**< [ 19: 16](R/W) Set to one to allow CTLE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_dfe_final : 4; /**< [ 15: 12](R/W) Set to one to allow DFE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_DFE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_vga_final : 4; /**< [ 11: 8](R/W) Set to one to allow VGA adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_VGA_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_blwc_final : 4; /**< [ 7: 4](R/W) Set to one to allow BLWC adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_BLWC_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_prevga_gn_final : 4; /**< [ 3: 0](R/W) Set to one to allow PREVGA_GN adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_PREVGA_GN_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t do_prevga_gn_final : 4; /**< [ 3: 0](R/W) Set to one to allow PREVGA_GN adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_PREVGA_GN_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_blwc_final : 4; /**< [ 7: 4](R/W) Set to one to allow BLWC adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_BLWC_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_vga_final : 4; /**< [ 11: 8](R/W) Set to one to allow VGA adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_VGA_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_dfe_final : 4; /**< [ 15: 12](R/W) Set to one to allow DFE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_DFE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctle_final : 4; /**< [ 19: 16](R/W) Set to one to allow CTLE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlez_final : 4; /**< [ 23: 20](R/W) Set to one to allow CTLEZ adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLEZ_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlelte_final : 4; /**< [ 27: 24](R/W) Set to one to allow CTLELTE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLELTE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_afeos_final : 4; /**< [ 31: 28](R/W) Set to one to allow AFEOS adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_AFEOS_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t cdr_lock_wait : 11; /**< [ 42: 32](R/W) Maximum wait count in service clock cycles required after detecting a received
+ signal or after completing a Receiver reset before the SATA aligner begins to
+ scan for 8B10B symbol alignment.
+ Reset value is set to 5us based on analysis of worst case SSC scenarios. */
+ uint64_t reserved_43_47 : 5;
+ uint64_t dev_align_count : 16; /**< [ 63: 48](R/W) Count in service clock cycles representing the duration of ALIGNp primitives
+ received at each speed from the far end Device during the rate negotiation
+ process.
+ Reset value is set to yield a 54.61ns duration. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_phy2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_phy2_bcfg bdk_gsernx_lanex_sata_phy2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002bb0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_PHY2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) bdk_gsernx_lanex_sata_phy2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) "GSERNX_LANEX_SATA_PHY2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_phy_bcfg
+ *
+ * GSER Lane SATA Control Register
+ * Control settings for SATA PHY functionality.
+ */
+union bdk_gsernx_lanex_sata_phy_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_phy_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t do_afeos_adpt : 4; /**< [ 63: 60](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlelte_adpt : 4; /**< [ 59: 56](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlez_adpt : 4; /**< [ 55: 52](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctle_adpt : 4; /**< [ 51: 48](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_dfe_adpt : 4; /**< [ 47: 44](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_vga_adpt : 4; /**< [ 43: 40](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_blwc_adpt : 4; /**< [ 39: 36](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_prevga_gn_adpt : 4; /**< [ 35: 32](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_dp_width_sel : 4; /**< [ 31: 28](R/W) Cleared to select a 20 bit and set to select a 40 bit Rx and Tx Data Path Width
+ in the PCS Lite Layer.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1 (default 40 bits).
+ \<1\> = SATA gen2 (default 20 bits).
+ \<2\> = SATA gen3 (default 20 bits).
+ \<3\> = Reserved. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t inhibit_power_change : 1; /**< [ 25: 25](R/W) Inhibit SATA power state changes in response to pX_partial, pX_slumber and
+ pX_phy_devslp inputs. */
+ uint64_t frc_unalgn_rxelecidle : 1; /**< [ 24: 24](R/W) Enables use of negated pX_sig_det to force the RX PHY into unalign state. */
+ uint64_t sata_bitstuff_tx_en : 4; /**< [ 23: 20](R/W) Set to duplicate the first 20 bits of TX data before
+ alignment & ordering for lower data rates. This could be PCS TX
+ data, PRBS data, or shallow-loopback RX data depending on mode.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_bitstuff_rx_drop_even : 4;/**< [ 19: 16](R/W) Tells the PCS lite receive datapath to drop even bits
+ in the vector of received data from the PMA when [SATA_BITSTUFF_RX_EN] is
+ set:
+ 0 = Drop bits 1, 3, 5, 7, ...
+ 1 = Drop bits 0, 2, 4, 6, ...
+
+ This bit is also used in the eye monitor to mask out the dropped
+ bits when counting mismatches.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_bitstuff_rx_en : 4; /**< [ 15: 12](R/W) Set to expect duplicates on the PMA RX data and drop bits after
+ alignment & ordering for PCS layer to consume. The drop ordering is
+ determined by [SATA_BITSTUFF_RX_DROP_EVEN]. This value must only be changed
+ while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t rx_squelch_on_idle : 1; /**< [ 11: 11](R/W) Receive data squelch on idle. When idle detection is signaled
+ to the SATA control with the negation of phy_sig_det, the parallel
+ receive data will be set to all 0's regardless of the output of the
+ CDR. */
+ uint64_t comma_thr : 7; /**< [ 10: 4](R/W) COMMA detection threshold. The receive aligner must see this many
+ COMMA characters at the same rotation before declaring symbol
+ alignment. */
+ uint64_t error_thr : 4; /**< [ 3: 0](R/W) Error threshold. The receive aligner must see this many COMMA
+ characters at a different rotation than currently in use before
+ declaring loss of symbol alignment. */
+#else /* Word 0 - Little Endian */
+ uint64_t error_thr : 4; /**< [ 3: 0](R/W) Error threshold. The receive aligner must see this many COMMA
+ characters at a different rotation than currently in use before
+ declaring loss of symbol alignment. */
+ uint64_t comma_thr : 7; /**< [ 10: 4](R/W) COMMA detection threshold. The receive aligner must see this many
+ COMMA characters at the same rotation before declaring symbol
+ alignment. */
+ uint64_t rx_squelch_on_idle : 1; /**< [ 11: 11](R/W) Receive data squelch on idle. When idle detection is signaled
+ to the SATA control with the negation of phy_sig_det, the parallel
+ receive data will be set to all 0's regardless of the output of the
+ CDR. */
+ uint64_t sata_bitstuff_rx_en : 4; /**< [ 15: 12](R/W) Set to expect duplicates on the PMA RX data and drop bits after
+ alignment & ordering for PCS layer to consume. The drop ordering is
+ determined by [SATA_BITSTUFF_RX_DROP_EVEN]. This value must only be changed
+ while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_bitstuff_rx_drop_even : 4;/**< [ 19: 16](R/W) Tells the PCS lite receive datapath to drop even bits
+ in the vector of received data from the PMA when [SATA_BITSTUFF_RX_EN] is
+ set:
+ 0 = Drop bits 1, 3, 5, 7, ...
+ 1 = Drop bits 0, 2, 4, 6, ...
+
+ This bit is also used in the eye monitor to mask out the dropped
+ bits when counting mismatches.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_bitstuff_tx_en : 4; /**< [ 23: 20](R/W) Set to duplicate the first 20 bits of TX data before
+ alignment & ordering for lower data rates. This could be PCS TX
+ data, PRBS data, or shallow-loopback RX data depending on mode.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t frc_unalgn_rxelecidle : 1; /**< [ 24: 24](R/W) Enables use of negated pX_sig_det to force the RX PHY into unalign state. */
+ uint64_t inhibit_power_change : 1; /**< [ 25: 25](R/W) Inhibit SATA power state changes in response to pX_partial, pX_slumber and
+ pX_phy_devslp inputs. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t sata_dp_width_sel : 4; /**< [ 31: 28](R/W) Cleared to select a 20 bit and set to select a 40 bit Rx and Tx Data Path Width
+ in the PCS Lite Layer.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1 (default 40 bits).
+ \<1\> = SATA gen2 (default 20 bits).
+ \<2\> = SATA gen3 (default 20 bits).
+ \<3\> = Reserved. */
+ uint64_t do_prevga_gn_adpt : 4; /**< [ 35: 32](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_blwc_adpt : 4; /**< [ 39: 36](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_vga_adpt : 4; /**< [ 43: 40](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_dfe_adpt : 4; /**< [ 47: 44](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctle_adpt : 4; /**< [ 51: 48](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlez_adpt : 4; /**< [ 55: 52](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlelte_adpt : 4; /**< [ 59: 56](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_afeos_adpt : 4; /**< [ 63: 60](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_phy_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_phy_bcfg bdk_gsernx_lanex_sata_phy_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002b30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_PHY_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) bdk_gsernx_lanex_sata_phy_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) "GSERNX_LANEX_SATA_PHY_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_phy_bsts
+ *
+ * GSER Lane SATA PCS Status Register
+ * Error Status for SATA PHY functionality.
+ */
+union bdk_gsernx_lanex_sata_phy_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_phy_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t align_error : 1; /**< [ 0: 0](R/W1C/H) Alignment error.
+ The receive 8B10B aligner has detected an error. An error is
+ declared if GSERN()_LANE()_SATA_PHY_BCFG[ERROR_THR]
+ COMMA characters are detected at a 10 bit rotation that does not match
+ the active rotation. The COMMAs do not have to all be at the same rotation. */
+#else /* Word 0 - Little Endian */
+ uint64_t align_error : 1; /**< [ 0: 0](R/W1C/H) Alignment error.
+ The receive 8B10B aligner has detected an error. An error is
+ declared if GSERN()_LANE()_SATA_PHY_BCFG[ERROR_THR]
+ COMMA characters are detected at a 10 bit rotation that does not match
+ the active rotation. The COMMAs do not have to all be at the same rotation. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_phy_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_phy_bsts bdk_gsernx_lanex_sata_phy_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002fb0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_PHY_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) bdk_gsernx_lanex_sata_phy_bsts_t
+#define bustype_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) "GSERNX_LANEX_SATA_PHY_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq1_1_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during SATA gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq1_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq1_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t sata_g1_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t sata_g1_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g1_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g1_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g1_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g1_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g1_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g1_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g1_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g1_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g1_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g1_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g1_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t sata_g1_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t sata_g1_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g1_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g1_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g1_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g1_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g1_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g1_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g1_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g1_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g1_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g1_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g1_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq1_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq1_1_bcfg bdk_gsernx_lanex_sata_rxeq1_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e00ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ1_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq1_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ1_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq1_2_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during SATA gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq1_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq1_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g1_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g1_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t sata_g1_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq1_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq1_2_bcfg bdk_gsernx_lanex_sata_rxeq1_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ1_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq1_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ1_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq1_3_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during SATA Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq1_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq1_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g1_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g1_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq1_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq1_3_bcfg bdk_gsernx_lanex_sata_rxeq1_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e20ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ1_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq1_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ1_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq2_1_bcfg
+ *
+ * GSER Lane SATA Gen2 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during SATA gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq2_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq2_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t sata_g2_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t sata_g2_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g2_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g2_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g2_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g2_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g2_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g2_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g2_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g2_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g2_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g2_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g2_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t sata_g2_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t sata_g2_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g2_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g2_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g2_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g2_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g2_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g2_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g2_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g2_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g2_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g2_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g2_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq2_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq2_1_bcfg bdk_gsernx_lanex_sata_rxeq2_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ2_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq2_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ2_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq2_2_bcfg
+ *
+ * GSER Lane SATA Gen2 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during SATA gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq2_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq2_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g2_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g2_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t sata_g2_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq2_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq2_2_bcfg bdk_gsernx_lanex_sata_rxeq2_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e40ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ2_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq2_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ2_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq2_3_bcfg
+ *
+ * GSER Lane SATA Gen2 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during SATA Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq2_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq2_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g2_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g2_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq2_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq2_3_bcfg bdk_gsernx_lanex_sata_rxeq2_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e50ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ2_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq2_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ2_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq3_1_bcfg
+ *
+ * GSER Lane SATA Gen3 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during SATA gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq3_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq3_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t sata_g3_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t sata_g3_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g3_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g3_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g3_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g3_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g3_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g3_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g3_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g3_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g3_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g3_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g3_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t sata_g3_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t sata_g3_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g3_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g3_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g3_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g3_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g3_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g3_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g3_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g3_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g3_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g3_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g3_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq3_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq3_1_bcfg bdk_gsernx_lanex_sata_rxeq3_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e60ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ3_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq3_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ3_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq3_2_bcfg
+ *
+ * GSER Lane SATA Gen3 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during SATA gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq3_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq3_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g3_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g3_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t sata_g3_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq3_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq3_2_bcfg bdk_gsernx_lanex_sata_rxeq3_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e70ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ3_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq3_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ3_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq3_3_bcfg
+ *
+ * GSER Lane SATA Gen3 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during SATA Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq3_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq3_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g3_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g3_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq3_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq3_3_bcfg bdk_gsernx_lanex_sata_rxeq3_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ3_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq3_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ3_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidl1a_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN1. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidl1a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidl1a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidl1a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidl1a_bcfg bdk_gsernx_lanex_sata_rxidl1a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002cc0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDL1A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) bdk_gsernx_lanex_sata_rxidl1a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDL1A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidl2a_bcfg
+ *
+ * GSER Lane SATA Gen2 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN2. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidl2a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidl2a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidl2a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidl2a_bcfg bdk_gsernx_lanex_sata_rxidl2a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002ce0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDL2A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) bdk_gsernx_lanex_sata_rxidl2a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDL2A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidl3a_bcfg
+ *
+ * GSER Lane SATA Gen3 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN3. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidl3a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidl3a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidl3a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidl3a_bcfg bdk_gsernx_lanex_sata_rxidl3a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002d00ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDL3A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) bdk_gsernx_lanex_sata_rxidl3a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDL3A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidle1_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN1. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidle1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidle1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidle1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidle1_bcfg bdk_gsernx_lanex_sata_rxidle1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002cb0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDLE1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) bdk_gsernx_lanex_sata_rxidle1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDLE1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidle2_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN2. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidle2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidle2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidle2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidle2_bcfg bdk_gsernx_lanex_sata_rxidle2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002cd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDLE2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) bdk_gsernx_lanex_sata_rxidle2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDLE2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidle3_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN3. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidle3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidle3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidle3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidle3_bcfg bdk_gsernx_lanex_sata_rxidle3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002cf0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDLE3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) bdk_gsernx_lanex_sata_rxidle3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDLE3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_txdrv1_bcfg
+ *
+ * GSER Lane SATA TX Drive Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values and TX bias/swing for SATA GEN1.
+ */
+union bdk_gsernx_lanex_sata_txdrv1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_txdrv1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t sata_g1_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN1.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g1_cpost : 5; /**< [ 20: 16](R/W) SATA GEN1 Cpost value. Combined with the reset values of [SATA_G1_CMAIN] and
+ [SATA_G1_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g1_cmain : 6; /**< [ 13: 8](R/W) SATA GEN1 Cmain value. Combined with the reset values of [SATA_G1_CPOST] and
+ [SATA_G1_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g1_cpre : 5; /**< [ 4: 0](R/W) SATA GEN1 Cpre value. Combined with the reset values of [SATA_G1_CPOST] and
+ [SATA_G1_CMAIN] this yields 3.5 dB TX deemphasis. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g1_cpre : 5; /**< [ 4: 0](R/W) SATA GEN1 Cpre value. Combined with the reset values of [SATA_G1_CPOST] and
+ [SATA_G1_CMAIN] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g1_cmain : 6; /**< [ 13: 8](R/W) SATA GEN1 Cmain value. Combined with the reset values of [SATA_G1_CPOST] and
+ [SATA_G1_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g1_cpost : 5; /**< [ 20: 16](R/W) SATA GEN1 Cpost value. Combined with the reset values of [SATA_G1_CMAIN] and
+ [SATA_G1_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g1_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN1.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_txdrv1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_txdrv1_bcfg bdk_gsernx_lanex_sata_txdrv1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002f80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_TXDRV1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) bdk_gsernx_lanex_sata_txdrv1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) "GSERNX_LANEX_SATA_TXDRV1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_txdrv2_bcfg
+ *
+ * GSER Lane SATA TX Drive Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values and TX bias/swing for SATA GEN2.
+ */
+union bdk_gsernx_lanex_sata_txdrv2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_txdrv2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t sata_g2_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN2.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g2_cpost : 5; /**< [ 20: 16](R/W) SATA GEN2 Cpost value. Combined with the reset values of [SATA_G2_CMAIN] and
+ [SATA_G2_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g2_cmain : 6; /**< [ 13: 8](R/W) SATA GEN2 Cmain value. Combined with the reset values of [SATA_G2_CPOST] and
+ [SATA_G2_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g2_cpre : 5; /**< [ 4: 0](R/W) SATA GEN2 Cpre value. Combined with the reset values of [SATA_G2_CPOST] and
+ [SATA_G2_CMAIN] this yields 3.5 dB TX deemphasis. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g2_cpre : 5; /**< [ 4: 0](R/W) SATA GEN2 Cpre value. Combined with the reset values of [SATA_G2_CPOST] and
+ [SATA_G2_CMAIN] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g2_cmain : 6; /**< [ 13: 8](R/W) SATA GEN2 Cmain value. Combined with the reset values of [SATA_G2_CPOST] and
+ [SATA_G2_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g2_cpost : 5; /**< [ 20: 16](R/W) SATA GEN2 Cpost value. Combined with the reset values of [SATA_G2_CMAIN] and
+ [SATA_G2_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g2_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN2.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_txdrv2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_txdrv2_bcfg bdk_gsernx_lanex_sata_txdrv2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002f90ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_TXDRV2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) bdk_gsernx_lanex_sata_txdrv2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) "GSERNX_LANEX_SATA_TXDRV2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_txdrv3_bcfg
+ *
+ * GSER Lane SATA TX Drive Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values and TX bias/swing for SATA GEN3.
+ */
+union bdk_gsernx_lanex_sata_txdrv3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_txdrv3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t sata_g3_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN3.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g3_cpost : 5; /**< [ 20: 16](R/W) SATA GEN3 Cpost value. Combined with the reset values of [SATA_G3_CMAIN] and
+ [SATA_G3_CPRE] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g3_cmain : 6; /**< [ 13: 8](R/W) SATA GEN3 Cmain value. Combined with the reset values of [SATA_G3_CPOST] and
+ [SATA_G3_CPRE] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g3_cpre : 5; /**< [ 4: 0](R/W) SATA GEN3 Cpre value. Combined with the reset values of [SATA_G3_CPOST] and
+ [SATA_G3_CMAIN] this yields 6 dB TX deemphasis. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g3_cpre : 5; /**< [ 4: 0](R/W) SATA GEN3 Cpre value. Combined with the reset values of [SATA_G3_CPOST] and
+ [SATA_G3_CMAIN] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g3_cmain : 6; /**< [ 13: 8](R/W) SATA GEN3 Cmain value. Combined with the reset values of [SATA_G3_CPOST] and
+ [SATA_G3_CPRE] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g3_cpost : 5; /**< [ 20: 16](R/W) SATA GEN3 Cpost value. Combined with the reset values of [SATA_G3_CMAIN] and
+ [SATA_G3_CPRE] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g3_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN3.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_txdrv3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_txdrv3_bcfg bdk_gsernx_lanex_sata_txdrv3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002fa0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_TXDRV3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) bdk_gsernx_lanex_sata_txdrv3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) "GSERNX_LANEX_SATA_TXDRV3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_0_dat
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Result Register 0
+ */
+union bdk_gsernx_lanex_scope_0_dat
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_0_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t cnt_done : 1; /**< [ 40: 40](RO/H) Indicates when the match counter has counted down from
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT] to 0x0. The error vector will no longer
+ be updated once the counter is done. To clear the flag a new
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] or GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] toggle
+ needs to happen. */
+ uint64_t ref_vec : 40; /**< [ 39: 0](RO/H) Stored doutq that will be used to compare against incoming
+ doutq. Its value is changed by toggling GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]
+ low then high, which will save the next doutq received in the PCS
+ layer as the new reference vector, or by setting
+ GSERN()_LANE()_SCOPE_CTL_2[REF_VEC_OVRRIDE] and
+ GSERN()_LANE()_SCOPE_CTL_2[REF_VEC_OVRRIDE_EN].
+ This field is only valid when GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] is asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t ref_vec : 40; /**< [ 39: 0](RO/H) Stored doutq that will be used to compare against incoming
+ doutq. Its value is changed by toggling GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]
+ low then high, which will save the next doutq received in the PCS
+ layer as the new reference vector, or by setting
+ GSERN()_LANE()_SCOPE_CTL_2[REF_VEC_OVRRIDE] and
+ GSERN()_LANE()_SCOPE_CTL_2[REF_VEC_OVRRIDE_EN].
+ This field is only valid when GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] is asserted. */
+ uint64_t cnt_done : 1; /**< [ 40: 40](RO/H) Indicates when the match counter has counted down from
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT] to 0x0. The error vector will no longer
+ be updated once the counter is done. To clear the flag a new
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] or GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] toggle
+ needs to happen. */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_0_dat_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_0_dat bdk_gsernx_lanex_scope_0_dat_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_0_DAT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_0_DAT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000900ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_0_DAT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) bdk_gsernx_lanex_scope_0_dat_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) "GSERNX_LANEX_SCOPE_0_DAT"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_1_dat
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Result Register 1
+ */
+union bdk_gsernx_lanex_scope_1_dat
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_1_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t err_vec : 40; /**< [ 39: 0](RO/H) Error vector that maintains status of mismatches between doutq &
+ doute. It updates every time there is a match between doutq & the
+ captured GSERN()_LANE()_SCOPE_0_DAT[REF_VEC]. To clear it a toggle to
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] or GSERN()_LANE()_SCOPE_CTL[CNT_EN] is
+ needed. This field is only valid when GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] is
+ set. */
+#else /* Word 0 - Little Endian */
+ uint64_t err_vec : 40; /**< [ 39: 0](RO/H) Error vector that maintains status of mismatches between doutq &
+ doute. It updates every time there is a match between doutq & the
+ captured GSERN()_LANE()_SCOPE_0_DAT[REF_VEC]. To clear it a toggle to
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] or GSERN()_LANE()_SCOPE_CTL[CNT_EN] is
+ needed. This field is only valid when GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] is
+ set. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_1_dat_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_1_dat bdk_gsernx_lanex_scope_1_dat_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_1_DAT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_1_DAT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000910ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_1_DAT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) bdk_gsernx_lanex_scope_1_dat_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) "GSERNX_LANEX_SCOPE_1_DAT"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_ctl
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Control Register
+ * Register controls for the PCS layer scope function. Use of this function
+ * requires enabling the doute eye data path in the analog macro, i.e.,
+ * GSERN()_LANE()_RST2_BCFG[LN_RESET_USE_EYE] should be asserted when the lane
+ * reset state machines bring the lane out of reset.
+ */
+union bdk_gsernx_lanex_scope_ctl
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t doutq_ld : 1; /**< [ 56: 56](R/W) Set to a doutq value for comparison against incoming
+ doutq. The incoming stream should guarantee a recurring doutq
+ pattern to capture valid error vector. This works only on a
+ positive-edge trigger which means a new value won't be stored until
+ a 0-\>1 transition happens. Assertion of GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]
+ also resets the match counter, GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] and
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]. Deassert [DOUTQ_LD] to
+ enable the match counter to count down and to enable collection of
+ new data in the error vector (also requires that
+ GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] is clear).
+
+ For diagnostic use only. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t scope_en : 1; /**< [ 49: 49](R/W) Set to enable collection of GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]
+ data. Deassertion stops collection of new mismatch bits, but does
+ not reset GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]. If
+ GSERN()_LANE()_SCOPE_CTL[CNT_EN] is also asserted, collection will stop
+ when the GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT] is reached. If not using
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT], software can control duration of
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC] data collection through
+ [SCOPE_EN]. All scope logic is conditionally clocked with the
+ condition being GSERN()_LANE()_SCOPE_CTL[SCOPE_EN], so deassert this bit
+ when not used to save power.
+
+ For diagnostic use only. */
+ uint64_t cnt_rst_n : 1; /**< [ 48: 48](R/W) Set low to reset the match counter, the done indicator, and the error
+ vector. The reset value for the counter is set by
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT]. GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] and
+ the error vector, GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC], reset to all zeros. Set
+ this bit high to enable the match counter to count down and to enable collection
+ of new data in the error vector (also requires that
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] is not set high). Cycle
+ GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] (low then high) to clear the counter and the
+ error vector, leaving GSERN()_LANE()_SCOPE_0_DAT[REF_VEC] unchanged, enabling
+ collection of a new error vector under updated receiver settings using the same
+ reference vector match pattern.
+
+ For diagnostic use only. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cnt_en : 1; /**< [ 40: 40](R/W) Enable use of the match counter to limit the number of doutq to
+ ref_vec matches over which the doutq to doute mismatch vector is
+ accumulated. If this bit is not asserted,
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC] accumulation is limited by
+ GSERN()_LANE()_SCOPE_CTL[SCOPE_EN] and/or GSERN()_LANE()_SCOPE_CTL[CNT_RST_N].
+
+ For diagnostic use only. */
+ uint64_t cnt_limit : 40; /**< [ 39: 0](R/W) Limit value the match counter starts decrementing
+ from. It gets loaded every time a new doutq load happens or a
+ counter reset happens.
+
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt_limit : 40; /**< [ 39: 0](R/W) Limit value the match counter starts decrementing
+ from. It gets loaded every time a new doutq load happens or a
+ counter reset happens.
+
+ For diagnostic use only. */
+ uint64_t cnt_en : 1; /**< [ 40: 40](R/W) Enable use of the match counter to limit the number of doutq to
+ ref_vec matches over which the doutq to doute mismatch vector is
+ accumulated. If this bit is not asserted,
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC] accumulation is limited by
+ GSERN()_LANE()_SCOPE_CTL[SCOPE_EN] and/or GSERN()_LANE()_SCOPE_CTL[CNT_RST_N].
+
+ For diagnostic use only. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cnt_rst_n : 1; /**< [ 48: 48](R/W) Set low to reset the match counter, the done indicator, and the error
+ vector. The reset value for the counter is set by
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT]. GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] and
+ the error vector, GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC], reset to all zeros. Set
+ this bit high to enable the match counter to count down and to enable collection
+ of new data in the error vector (also requires that
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] is not set high). Cycle
+ GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] (low then high) to clear the counter and the
+ error vector, leaving GSERN()_LANE()_SCOPE_0_DAT[REF_VEC] unchanged, enabling
+ collection of a new error vector under updated receiver settings using the same
+ reference vector match pattern.
+
+ For diagnostic use only. */
+ uint64_t scope_en : 1; /**< [ 49: 49](R/W) Set to enable collection of GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]
+ data. Deassertion stops collection of new mismatch bits, but does
+ not reset GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]. If
+ GSERN()_LANE()_SCOPE_CTL[CNT_EN] is also asserted, collection will stop
+ when the GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT] is reached. If not using
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT], software can control duration of
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC] data collection through
+ [SCOPE_EN]. All scope logic is conditionally clocked with the
+ condition being GSERN()_LANE()_SCOPE_CTL[SCOPE_EN], so deassert this bit
+ when not used to save power.
+
+ For diagnostic use only. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t doutq_ld : 1; /**< [ 56: 56](R/W) Set to a doutq value for comparison against incoming
+ doutq. The incoming stream should guarantee a recurring doutq
+ pattern to capture valid error vector. This works only on a
+ positive-edge trigger which means a new value won't be stored until
+ a 0-\>1 transition happens. Assertion of GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]
+ also resets the match counter, GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] and
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]. Deassert [DOUTQ_LD] to
+ enable the match counter to count down and to enable collection of
+ new data in the error vector (also requires that
+ GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] is clear).
+
+ For diagnostic use only. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_ctl_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_ctl bdk_gsernx_lanex_scope_ctl_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900008d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) bdk_gsernx_lanex_scope_ctl_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) "GSERNX_LANEX_SCOPE_CTL"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_ctl_2
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Control Register 2
+ * This register contains control signals to allow loading a specific reference vector
+ * for use in the scope logic instead of capturing the reference vector from the input
+ * data stream. For diagnostic use only.
+ */
+union bdk_gsernx_lanex_scope_ctl_2
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_ctl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t use_doute_cal : 1; /**< [ 41: 41](R/W) Set to select doute_cal data (receiver eye calibration path) for
+ scope comparisons with doutq (receiver normal quadrature path). If
+ clear, the default will be to use doute (receiver eye path) to
+ compare with doutq. The bit should be programmed as desired before
+ writing GSERN()_LANE()_SCOPE_CTL[SCOPE_EN] to one.
+
+ For diagnostic use only. */
+ uint64_t ref_vec_ovrride_en : 1; /**< [ 40: 40](R/W) Enable use of [REF_VEC_OVRRIDE] for the scope logic instead
+ of capturing the reference vector from the input data stream. This
+ control has priority over
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]. This field should be
+ deasserted when the override value, [REF_VEC_OVRRIDE], is
+ changed. [REF_VEC_OVRRIDE_EN] may be asserted in the same register
+ write that changes [REF_VEC_OVRRIDE].
+
+ For diagnostic use only. */
+ uint64_t ref_vec_ovrride : 40; /**< [ 39: 0](R/W) Selectable reference vector to use for comparison with doutq and doute for the
+ scope logic as an alternative to capturing the reference vector from the
+ incoming data stream. When used, this pattern should be recurring in the
+ incoming data stream to capture valid error vector data, since errors will only
+ be accumulated in the error vector when doutq matches the reference
+ vector. [REF_VEC_OVRRIDE_EN] should be deasserted when [REF_VEC_OVRRIDE] is
+ changed. [REF_VEC_OVRRIDE_EN] may be written to a one in the same register write
+ that changes [REF_VEC_OVRRIDE]. Note that the bit pattern in [REF_VEC_OVRRIDE]
+ must match the format produced by the receiver's deserializer for the data path
+ width in use.
+
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ref_vec_ovrride : 40; /**< [ 39: 0](R/W) Selectable reference vector to use for comparison with doutq and doute for the
+ scope logic as an alternative to capturing the reference vector from the
+ incoming data stream. When used, this pattern should be recurring in the
+ incoming data stream to capture valid error vector data, since errors will only
+ be accumulated in the error vector when doutq matches the reference
+ vector. [REF_VEC_OVRRIDE_EN] should be deasserted when [REF_VEC_OVRRIDE] is
+ changed. [REF_VEC_OVRRIDE_EN] may be written to a one in the same register write
+ that changes [REF_VEC_OVRRIDE]. Note that the bit pattern in [REF_VEC_OVRRIDE]
+ must match the format produced by the receiver's deserializer for the data path
+ width in use.
+
+ For diagnostic use only. */
+ uint64_t ref_vec_ovrride_en : 1; /**< [ 40: 40](R/W) Enable use of [REF_VEC_OVRRIDE] for the scope logic instead
+ of capturing the reference vector from the input data stream. This
+ control has priority over
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]. This field should be
+ deasserted when the override value, [REF_VEC_OVRRIDE], is
+ changed. [REF_VEC_OVRRIDE_EN] may be asserted in the same register
+ write that changes [REF_VEC_OVRRIDE].
+
+ For diagnostic use only. */
+ uint64_t use_doute_cal : 1; /**< [ 41: 41](R/W) Set to select doute_cal data (receiver eye calibration path) for
+ scope comparisons with doutq (receiver normal quadrature path). If
+ clear, the default will be to use doute (receiver eye path) to
+ compare with doutq. The bit should be programmed as desired before
+ writing GSERN()_LANE()_SCOPE_CTL[SCOPE_EN] to one.
+
+ For diagnostic use only. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_ctl_2_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_ctl_2 bdk_gsernx_lanex_scope_ctl_2_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900008e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_CTL_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) bdk_gsernx_lanex_scope_ctl_2_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) "GSERNX_LANEX_SCOPE_CTL_2"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_ctl_3
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Control Register 3
+ * The four bits in this register allow for shifting either the doutq or
+ * doute_cal data by 1 or 2 UI to allow for an offset in the framing of the
+ * deserialized data between these two data paths in the receiver. Software
+ * will need to iterate eye or scope measurement with identical settings
+ * for the quadurature and eye datapaths, adjusting the shift bits in this
+ * register until no differences are accumulated. (Note that shifting both
+ * doutq and doute_cal would typically not be useful, since the resulting
+ * alignment would be the same as if neither were shifted.)
+ */
+union bdk_gsernx_lanex_scope_ctl_3
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_ctl_3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t dbl_shift_doute : 1; /**< [ 9: 9](R/W) Assert to shift the doute_cal (receiver eye calibration path) data
+ by 2 UI earlier to align with doutq for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t shift_doute : 1; /**< [ 8: 8](R/W) Assert to shift the doute_cal (receiver eye path) data by 1 UI
+ earlier to align with doutq for eye and scope comparison logic. Only
+ data captured in the eye or scope logic is impacted by this
+ setting. Program as desired before enabling eye data capture. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t dbl_shift_doutq : 1; /**< [ 1: 1](R/W) Assert to shift the doutq (receiver normal quadrature path) data by
+ 2 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t shift_doutq : 1; /**< [ 0: 0](R/W) Assert to shift the doutq (receiver normal quadrature path) data by
+ 1 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. Program as desired before enabling eye data capture. */
+#else /* Word 0 - Little Endian */
+ uint64_t shift_doutq : 1; /**< [ 0: 0](R/W) Assert to shift the doutq (receiver normal quadrature path) data by
+ 1 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. Program as desired before enabling eye data capture. */
+ uint64_t dbl_shift_doutq : 1; /**< [ 1: 1](R/W) Assert to shift the doutq (receiver normal quadrature path) data by
+ 2 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t shift_doute : 1; /**< [ 8: 8](R/W) Assert to shift the doute_cal (receiver eye path) data by 1 UI
+ earlier to align with doutq for eye and scope comparison logic. Only
+ data captured in the eye or scope logic is impacted by this
+ setting. Program as desired before enabling eye data capture. */
+ uint64_t dbl_shift_doute : 1; /**< [ 9: 9](R/W) Assert to shift the doute_cal (receiver eye calibration path) data
+ by 2 UI earlier to align with doutq for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_ctl_3_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_ctl_3 bdk_gsernx_lanex_scope_ctl_3_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL_3(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL_3(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900008f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_CTL_3", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) bdk_gsernx_lanex_scope_ctl_3_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) "GSERNX_LANEX_SCOPE_CTL_3"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_srcmx_bcfg
+ *
+ * GSER Lane PCS Source Mux Control Register
+ */
+union bdk_gsernx_lanex_srcmx_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_srcmx_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t en_hldcdrfsm_on_idle : 1; /**< [ 49: 49](R/W) Enable holding the CSR finite state machine when the receiver idle filter
+ detects idle.
+ For diagnostic use only. */
+ uint64_t en_pauseadpt_on_idle : 1; /**< [ 48: 48](R/W) Enable pausing adaptation when the receiver idle filter detects idle.
+ For diagnostic use only. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t trn_tx_cgt_on : 1; /**< [ 43: 43](R/W) Force the clock gate for the training transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t trn_rx_cgt_on : 1; /**< [ 42: 42](R/W) Force the clock gate for the training receive data path clock on.
+ For diagnostic use only. */
+ uint64_t ocx_tx_cgt_on : 1; /**< [ 41: 41](R/W) Force on the clock gate for the OCX interface.
+ For diagnostic use only. */
+ uint64_t ocx_rx_cgt_on : 1; /**< [ 40: 40](R/W) Force on the clock gate for the OCX interface.
+ For diagnostic use only. */
+ uint64_t sata_tx_cgt_on : 1; /**< [ 39: 39](R/W) Force the clock gate for the SATA transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t sata_rx_cgt_on : 1; /**< [ 38: 38](R/W) Force the clock gate for the SATA receive data path clock on.
+ For diagnostic use only. */
+ uint64_t pcie_tx_cgt_on : 1; /**< [ 37: 37](R/W) Force the clock gate for the PCIe transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t pcie_rx_cgt_on : 1; /**< [ 36: 36](R/W) Force the clock gate for the PCIe receive data path clock on.
+ For diagnostic use only. */
+ uint64_t pat_tx_cgt_on : 1; /**< [ 35: 35](R/W) Force the clock gate for the pattern transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t pat_rx_cgt_on : 1; /**< [ 34: 34](R/W) Force the clock gate for the pattern receive data path clock on.
+ For diagnostic use only. */
+ uint64_t cgx_tx_cgt_on : 1; /**< [ 33: 33](R/W) Force the clock gate for the CGX transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t cgx_rx_cgt_on : 1; /**< [ 32: 32](R/W) Force the clock gate for the CGX receive data path clock on.
+ For diagnostic use only. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t txdivclk_mux_sel_ovrride_en : 1;/**< [ 23: 23](R/W) Mux selection override enable for lane txdivclk mux; enables use of
+ [TXDIVCLK_MUX_SEL_OVRRIDE]. This must be set to 1 for all lanes in a multi-lane
+ link.
+ 0 = Use the lane's local txdivclk.
+ 1 = Use [TXDIVCLK_MUX_SEL_OVRRIDE] instead of other sources for control of the
+ lane txdivclk mux. */
+ uint64_t reserved_19_22 : 4;
+ uint64_t txdivclk_mux_sel_ovrride : 3;/**< [ 18: 16](R/W) Mux selection override control for lane txdivclk mux, when enabled by
+ [TXDIVCLK_MUX_SEL_OVRRIDE_EN], the following values apply:
+ 0x0 = Use lane internal txdivclk (e.g. for single-lane links).
+ 0x1 = Use txdivclkx2 (e.g. for 2-lane links).
+ 0x2 = Use txdivclkx4 (e.g. for 4-lane links).
+ 0x3 = Use txdivclkx8 (e.g. for 8-lane links).
+ 0x4 = Use txdivclkx16 (e.g. for 16-lane links).
+ _ else = Reserved. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t tx_ctrl_sel : 5; /**< [ 12: 8](R/W) Lite layer transmit control-settings mux control:
+ 0x0 = no source selected; defaults to idle termination unless CSR overrides are
+ enabled by setting GSERN()_LANE()_TX_DRV_BCFG[EN_TX_DRV].
+ 0x1 = PCIe.
+ 0x2 = CGX.
+ 0x4 = SATA.
+ 0x8 = OCX.
+ 0x10 = Pattern memory generator.
+ _ else = reserved. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t tx_data_sel : 5; /**< [ 4: 0](R/W) Lite layer transmit data mux control:
+ 0x0 = No source selected, e.g., for PRBS testing.
+ 0x1 = PCIe.
+ 0x2 = CGX.
+ 0x4 = SATA.
+ 0x8 = OCX.
+ 0x10 = Pattern memory generator.
+ _ else = reserved. (This is a 1-hot vector.) */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_data_sel : 5; /**< [ 4: 0](R/W) Lite layer transmit data mux control:
+ 0x0 = No source selected, e.g., for PRBS testing.
+ 0x1 = PCIe.
+ 0x2 = CGX.
+ 0x4 = SATA.
+ 0x8 = OCX.
+ 0x10 = Pattern memory generator.
+ _ else = reserved. (This is a 1-hot vector.) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t tx_ctrl_sel : 5; /**< [ 12: 8](R/W) Lite layer transmit control-settings mux control:
+ 0x0 = no source selected; defaults to idle termination unless CSR overrides are
+ enabled by setting GSERN()_LANE()_TX_DRV_BCFG[EN_TX_DRV].
+ 0x1 = PCIe.
+ 0x2 = CGX.
+ 0x4 = SATA.
+ 0x8 = OCX.
+ 0x10 = Pattern memory generator.
+ _ else = reserved. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdivclk_mux_sel_ovrride : 3;/**< [ 18: 16](R/W) Mux selection override control for lane txdivclk mux, when enabled by
+ [TXDIVCLK_MUX_SEL_OVRRIDE_EN], the following values apply:
+ 0x0 = Use lane internal txdivclk (e.g. for single-lane links).
+ 0x1 = Use txdivclkx2 (e.g. for 2-lane links).
+ 0x2 = Use txdivclkx4 (e.g. for 4-lane links).
+ 0x3 = Use txdivclkx8 (e.g. for 8-lane links).
+ 0x4 = Use txdivclkx16 (e.g. for 16-lane links).
+ _ else = Reserved. */
+ uint64_t reserved_19_22 : 4;
+ uint64_t txdivclk_mux_sel_ovrride_en : 1;/**< [ 23: 23](R/W) Mux selection override enable for lane txdivclk mux; enables use of
+ [TXDIVCLK_MUX_SEL_OVRRIDE]. This must be set to 1 for all lanes in a multi-lane
+ link.
+ 0 = Use the lane's local txdivclk.
+ 1 = Use [TXDIVCLK_MUX_SEL_OVRRIDE] instead of other sources for control of the
+ lane txdivclk mux. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t cgx_rx_cgt_on : 1; /**< [ 32: 32](R/W) Force the clock gate for the CGX receive data path clock on.
+ For diagnostic use only. */
+ uint64_t cgx_tx_cgt_on : 1; /**< [ 33: 33](R/W) Force the clock gate for the CGX transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t pat_rx_cgt_on : 1; /**< [ 34: 34](R/W) Force the clock gate for the pattern receive data path clock on.
+ For diagnostic use only. */
+ uint64_t pat_tx_cgt_on : 1; /**< [ 35: 35](R/W) Force the clock gate for the pattern transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t pcie_rx_cgt_on : 1; /**< [ 36: 36](R/W) Force the clock gate for the PCIe receive data path clock on.
+ For diagnostic use only. */
+ uint64_t pcie_tx_cgt_on : 1; /**< [ 37: 37](R/W) Force the clock gate for the PCIe transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t sata_rx_cgt_on : 1; /**< [ 38: 38](R/W) Force the clock gate for the SATA receive data path clock on.
+ For diagnostic use only. */
+ uint64_t sata_tx_cgt_on : 1; /**< [ 39: 39](R/W) Force the clock gate for the SATA transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t ocx_rx_cgt_on : 1; /**< [ 40: 40](R/W) Force on the clock gate for the OCX interface.
+ For diagnostic use only. */
+ uint64_t ocx_tx_cgt_on : 1; /**< [ 41: 41](R/W) Force on the clock gate for the OCX interface.
+ For diagnostic use only. */
+ uint64_t trn_rx_cgt_on : 1; /**< [ 42: 42](R/W) Force the clock gate for the training receive data path clock on.
+ For diagnostic use only. */
+ uint64_t trn_tx_cgt_on : 1; /**< [ 43: 43](R/W) Force the clock gate for the training transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t en_pauseadpt_on_idle : 1; /**< [ 48: 48](R/W) Enable pausing adaptation when the receiver idle filter detects idle.
+ For diagnostic use only. */
+ uint64_t en_hldcdrfsm_on_idle : 1; /**< [ 49: 49](R/W) Enable holding the CSR finite state machine when the receiver idle filter
+ detects idle.
+ For diagnostic use only. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_srcmx_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_srcmx_bcfg bdk_gsernx_lanex_srcmx_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SRCMX_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SRCMX_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000a10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SRCMX_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) bdk_gsernx_lanex_srcmx_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) "GSERNX_LANEX_SRCMX_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_0_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 0
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t txt_post : 5; /**< [ 63: 59](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_main : 6; /**< [ 58: 53](RO/H) After TX BASE-R link training, this is the resultant MAIN Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_pre : 5; /**< [ 52: 48](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_swm : 1; /**< [ 47: 47](R/W) Set when TX BASE-R link training is to be performed under software control. For diagnostic
+ use only. */
+ uint64_t txt_cur_post : 5; /**< [ 46: 42](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C+1) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_main : 6; /**< [ 41: 36](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C0) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_pre : 5; /**< [ 35: 31](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C-1) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_prg : 1; /**< [ 30: 30](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, setting [TXT_CUR_PRG] writes the TX
+ equalizer
+ coefficients in GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRE],
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_MAIN],
+ and GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_POST] registers into the GSER TX equalizer.
+ For diagnostic use only. */
+ uint64_t rxt_adtmout_fast : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ For simulation use only. When set accelerates the link training time-out timer during
+ BASE-R link training. When set shortens the link training time-out timer to time-out
+ after 164 microseconds to facilitate shorter BASE-R training simulations runs.
+ For diagnostic use only. */
+ uint64_t rxt_adtmout_sel : 2; /**< [ 28: 27](R/W) Selects the timeout value for the BASE-R link training time-out timer.
+ This time-out timer value is only valid if
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE]
+ is cleared to 0 and BASE-R hardware training is enabled.
+
+ When GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] is cleared to 0 the link training
+ time-out timer value is set by [RXT_ADTMOUT_SEL] to the values shown.
+ 0x0 = 83.89 milliseconds.
+ 0x1 = 167.77 milliseconds.
+ 0x2 = 335.54 milliseconds.
+ 0x3 = 419.43 milliseconds.
+
+ When GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] is set to 1 the link training
+ time-out timer value is set by [RXT_ADTMOUT_SEL] to the values shown.
+ 0x0 = 81.92 microseconds.
+ 0x1 = 163.84 microseconds.
+ 0x2 = 327.68 microseconds.
+ 0x3 = 655.36 microseconds. */
+ uint64_t rxt_adtmout_disable : 1; /**< [ 26: 26](R/W) For BASE-R links one of the terminating condition for link training receiver adaptation
+ is a programmable time-out timer. When the receiver adaptation time-out timer
+ expires the link training process is concluded and the link is considered good and
+ the receiver ready status report bit is set in the local device.
+ Note that when BASE-R link training is performed under software control,
+ (GSERN()_LANE()_TRAIN_0_BCFG[RXT_SWM] is set), the receiver adaptation time-out timer is
+ disabled and not used.
+
+ Set this bit to a one to disable the link training receiver adaptation time-out
+ timer during BASE-R link training under hardware control. For diagnostic use only. */
+ uint64_t rxt_eer : 1; /**< [ 25: 25](WO/H) When RX BASE-R link training is being performed under software control,
+ (GSERN()_LANE()_TRAIN_0_BCFG[RXT_SWM] is set), writing this bit initiates an equalization
+ request to the SerDes receiver equalizer. Reading this bit always returns a zero. */
+ uint64_t rxt_esv : 1; /**< [ 24: 24](RO/H) When performing an equalization request ([RXT_EER]), this bit, when set, indicates that
+ the
+ equalization status (RXT_ESM) is valid. When issuing a [RXT_EER] request, it is expected
+ that [RXT_ESV] will get written to zero so that a valid RXT_ESM can be determined. */
+ uint64_t rxt_tx_post_dir : 2; /**< [ 23: 22](RO/H) RX recommended TXPOST direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_tx_main_dir : 2; /**< [ 21: 20](RO/H) RX recommended TXMAIN direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_tx_pre_dir : 2; /**< [ 19: 18](RO/H) RX recommended TXPRE direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t trn_short : 1; /**< [ 17: 17](R/W) Train short. Executes an abbreviated BASE-R training session.
+ For diagnostic use only. */
+ uint64_t ld_receiver_rdy : 1; /**< [ 16: 16](RO/H) At the completion of BASE-R training the local device sets receiver ready. This bit
+ reflects the state of the local device receiver ready status. For Debug use only.
+ This bit is only valid during BASE-R link training and at the conclusion of link
+ training. */
+ uint64_t frz_cdr_en : 1; /**< [ 15: 15](R/W) Freeze CDR enable. In CGX mode when set to a one enables the CGX MAC to
+ Freeze the receiver CDR during BASE-R autonegotiation (AN) and KR training
+ to prevent the RX CDR from locking onto the differential manchester encoded
+ AN and KR training frames. CGX asserts the rx cdr coast signal to the GSER
+ block to freeze the RX CDR. Clearing [FRZ_CDR_EN] prevents CGS from freezing
+ the RX CDR.
+ For diagnostic use only. */
+ uint64_t trn_ovrd_en : 1; /**< [ 14: 14](R/W) BASE-R Training Override Enable. Setting [TRN_OVRD_EN] will enable BASE-R training logic
+ for both CGX and OCX. This is a CSR override for the BASE-R training enable signals from
+ the CGX and OCX blocks. Either GSERN()_LANE()_TRAIN_0_BCFG[CFG_CGX] or
+ GSERN()_LANE()_TRAIN_0_BCFG[CFG_OCX] must be set to 1 before [TRN_OVRD_EN] is set to 1. Also
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL] must be programmed to select CGX or OCX mode
+ before [TRN_OVRD_EN] is set to 1.
+ For diagnostic use only. */
+ uint64_t reserved_8_13 : 6;
+ uint64_t cfg_ocx : 1; /**< [ 7: 7](R/W) Configure BASE-R training logic for OCX mode. When [CFG_OCX] is set the
+ Coefficient Update (CU) and Status Report (SR) messaging is reconfigured for
+ the OCX controller. The CU and SR messages must be sent and received in the
+ txdivclk and rxdivclk domains for the OCX controller.
+
+ When [CFG_OCX] is set, the GSERN()_LANE()_TRAIN_0_BCFG[CFG_CGX] field must be
+ cleared to zero. */
+ uint64_t rxt_adjmain : 1; /**< [ 6: 6](R/W) For all link training, this bit determines how the main tap is adjusted at the start
+ of link training. When set the main tap of link partner transmitter peak-to-peak level
+ is adjusted to optimize the AGC of the local device receiver. This is intended to prevent
+ receiver saturation on short or low loss links.
+
+ To perform main tap optimization of the link partner transmitter set this bit prior to
+ enabling link training. */
+ uint64_t rxt_initialize : 1; /**< [ 5: 5](R/W) For all link training, this bit determines how to configure the initialize bit in the
+ coefficient update message that is sent to the far end transmitter of RX training. When
+ set, a request is made that the coefficients be set to its INITIALIZE state. To perform an
+ initialize prior to link training, set this bit prior to performing link training. Note
+ that it is illegal to set both the preset and initialize bits at the same time. */
+ uint64_t rxt_preset : 1; /**< [ 4: 4](R/W) For all link training, this bit determines how to configure the preset bit in the
+ coefficient update message that is sent to the far end transmitter. When set, a one time
+ request is made that the coefficients be set to a state where equalization is turned off.
+
+ To perform a preset, set this bit prior to link training. Link training needs to be
+ disabled to complete the request and get the rxtrain state machine back to idle. Note that
+ it is illegal to set both the preset and initialize bits at the same time. For diagnostic
+ use only. */
+ uint64_t rxt_swm : 1; /**< [ 3: 3](R/W) Set when RX BASE-R link training is to be performed under software control.
+
+ See GSERN()_LANE()_TRAIN_0_BCFG[RXT_EER]. */
+ uint64_t cgx_quad : 1; /**< [ 2: 2](R/W) When set, indicates the QLM is in CGX quad aggregation mode. [CGX_QUAD] must only be
+ set when GSERN()_LANE()_SRCMX_BCFG[TX_DATA_SEL]=CGX is set and
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]=CGX is set and [CGX_DUAL] is clear.
+
+ When [CGX_QUAD] is set, GSER bundles all four lanes for one BCX controller.
+ [CGX_QUAD] must only be set for the XAUI/DXAUI, XLAUI, and CAUI protocols. */
+ uint64_t cgx_dual : 1; /**< [ 1: 1](R/W) When set, indicates the QLM is in CGX dual aggregation mode. [CGX_DUAL] must only be
+ set when GSERN()_LANE()_SRCMX_BCFG[TX_DATA_SEL]=CGX is set and
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]=CGX is set and [CGX_QUAD] is clear.
+
+ When [CGX_DUAL] is set, GSER bundles lanes 0 and 1 for one CGX controller and bundles
+ lanes 2 and 3 for another CGX controller. [CGX_DUAL] must only be set for the RXAUI
+ protocol. */
+ uint64_t cfg_cgx : 1; /**< [ 0: 0](R/W) When set, indicates the BASE-R training logic is in CGX mode. Enables SCLK to the CGX TX
+ and RX
+ data path and the BASE-R TX/RX Training blocks. [CFG_CGX] must be set to one when
+ either GSERN()_LANE()_TRAIN_0_BCFG[CGX_DUAL] or GSERN()_LANE()_TRAIN_0_BCFG[CGX_QUAD]
+ is set.
+
+ When [CFG_CGX] is set, the GSERN()_LANE()_TRAIN_0_BCFG[CFG_OCX] field must be
+ cleared to zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_cgx : 1; /**< [ 0: 0](R/W) When set, indicates the BASE-R training logic is in CGX mode. Enables SCLK to the CGX TX
+ and RX
+ data path and the BASE-R TX/RX Training blocks. [CFG_CGX] must be set to one when
+ either GSERN()_LANE()_TRAIN_0_BCFG[CGX_DUAL] or GSERN()_LANE()_TRAIN_0_BCFG[CGX_QUAD]
+ is set.
+
+ When [CFG_CGX] is set, the GSERN()_LANE()_TRAIN_0_BCFG[CFG_OCX] field must be
+ cleared to zero. */
+ uint64_t cgx_dual : 1; /**< [ 1: 1](R/W) When set, indicates the QLM is in CGX dual aggregation mode. [CGX_DUAL] must only be
+ set when GSERN()_LANE()_SRCMX_BCFG[TX_DATA_SEL]=CGX is set and
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]=CGX is set and [CGX_QUAD] is clear.
+
+ When [CGX_DUAL] is set, GSER bundles lanes 0 and 1 for one CGX controller and bundles
+ lanes 2 and 3 for another CGX controller. [CGX_DUAL] must only be set for the RXAUI
+ protocol. */
+ uint64_t cgx_quad : 1; /**< [ 2: 2](R/W) When set, indicates the QLM is in CGX quad aggregation mode. [CGX_QUAD] must only be
+ set when GSERN()_LANE()_SRCMX_BCFG[TX_DATA_SEL]=CGX is set and
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]=CGX is set and [CGX_DUAL] is clear.
+
+ When [CGX_QUAD] is set, GSER bundles all four lanes for one BCX controller.
+ [CGX_QUAD] must only be set for the XAUI/DXAUI, XLAUI, and CAUI protocols. */
+ uint64_t rxt_swm : 1; /**< [ 3: 3](R/W) Set when RX BASE-R link training is to be performed under software control.
+
+ See GSERN()_LANE()_TRAIN_0_BCFG[RXT_EER]. */
+ uint64_t rxt_preset : 1; /**< [ 4: 4](R/W) For all link training, this bit determines how to configure the preset bit in the
+ coefficient update message that is sent to the far end transmitter. When set, a one time
+ request is made that the coefficients be set to a state where equalization is turned off.
+
+ To perform a preset, set this bit prior to link training. Link training needs to be
+ disabled to complete the request and get the rxtrain state machine back to idle. Note that
+ it is illegal to set both the preset and initialize bits at the same time. For diagnostic
+ use only. */
+ uint64_t rxt_initialize : 1; /**< [ 5: 5](R/W) For all link training, this bit determines how to configure the initialize bit in the
+ coefficient update message that is sent to the far end transmitter of RX training. When
+ set, a request is made that the coefficients be set to its INITIALIZE state. To perform an
+ initialize prior to link training, set this bit prior to performing link training. Note
+ that it is illegal to set both the preset and initialize bits at the same time. */
+ uint64_t rxt_adjmain : 1; /**< [ 6: 6](R/W) For all link training, this bit determines how the main tap is adjusted at the start
+ of link training. When set the main tap of link partner transmitter peak-to-peak level
+ is adjusted to optimize the AGC of the local device receiver. This is intended to prevent
+ receiver saturation on short or low loss links.
+
+ To perform main tap optimization of the link partner transmitter set this bit prior to
+ enabling link training. */
+ uint64_t cfg_ocx : 1; /**< [ 7: 7](R/W) Configure BASE-R training logic for OCX mode. When [CFG_OCX] is set the
+ Coefficient Update (CU) and Status Report (SR) messaging is reconfigured for
+ the OCX controller. The CU and SR messages must be sent and received in the
+ txdivclk and rxdivclk domains for the OCX controller.
+
+ When [CFG_OCX] is set, the GSERN()_LANE()_TRAIN_0_BCFG[CFG_CGX] field must be
+ cleared to zero. */
+ uint64_t reserved_8_13 : 6;
+ uint64_t trn_ovrd_en : 1; /**< [ 14: 14](R/W) BASE-R Training Override Enable. Setting [TRN_OVRD_EN] will enable BASE-R training logic
+ for both CGX and OCX. This is a CSR override for the BASE-R training enable signals from
+ the CGX and OCX blocks. Either GSERN()_LANE()_TRAIN_0_BCFG[CFG_CGX] or
+ GSERN()_LANE()_TRAIN_0_BCFG[CFG_OCX] must be set to 1 before [TRN_OVRD_EN] is set to 1. Also
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL] must be programmed to select CGX or OCX mode
+ before [TRN_OVRD_EN] is set to 1.
+ For diagnostic use only. */
+ uint64_t frz_cdr_en : 1; /**< [ 15: 15](R/W) Freeze CDR enable. In CGX mode when set to a one enables the CGX MAC to
+ Freeze the receiver CDR during BASE-R autonegotiation (AN) and KR training
+ to prevent the RX CDR from locking onto the differential manchester encoded
+ AN and KR training frames. CGX asserts the rx cdr coast signal to the GSER
+ block to freeze the RX CDR. Clearing [FRZ_CDR_EN] prevents CGS from freezing
+ the RX CDR.
+ For diagnostic use only. */
+ uint64_t ld_receiver_rdy : 1; /**< [ 16: 16](RO/H) At the completion of BASE-R training the local device sets receiver ready. This bit
+ reflects the state of the local device receiver ready status. For Debug use only.
+ This bit is only valid during BASE-R link training and at the conclusion of link
+ training. */
+ uint64_t trn_short : 1; /**< [ 17: 17](R/W) Train short. Executes an abbreviated BASE-R training session.
+ For diagnostic use only. */
+ uint64_t rxt_tx_pre_dir : 2; /**< [ 19: 18](RO/H) RX recommended TXPRE direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_tx_main_dir : 2; /**< [ 21: 20](RO/H) RX recommended TXMAIN direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_tx_post_dir : 2; /**< [ 23: 22](RO/H) RX recommended TXPOST direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_esv : 1; /**< [ 24: 24](RO/H) When performing an equalization request ([RXT_EER]), this bit, when set, indicates that
+ the
+ equalization status (RXT_ESM) is valid. When issuing a [RXT_EER] request, it is expected
+ that [RXT_ESV] will get written to zero so that a valid RXT_ESM can be determined. */
+ uint64_t rxt_eer : 1; /**< [ 25: 25](WO/H) When RX BASE-R link training is being performed under software control,
+ (GSERN()_LANE()_TRAIN_0_BCFG[RXT_SWM] is set), writing this bit initiates an equalization
+ request to the SerDes receiver equalizer. Reading this bit always returns a zero. */
+ uint64_t rxt_adtmout_disable : 1; /**< [ 26: 26](R/W) For BASE-R links one of the terminating condition for link training receiver adaptation
+ is a programmable time-out timer. When the receiver adaptation time-out timer
+ expires the link training process is concluded and the link is considered good and
+ the receiver ready status report bit is set in the local device.
+ Note that when BASE-R link training is performed under software control,
+ (GSERN()_LANE()_TRAIN_0_BCFG[RXT_SWM] is set), the receiver adaptation time-out timer is
+ disabled and not used.
+
+ Set this bit to a one to disable the link training receiver adaptation time-out
+ timer during BASE-R link training under hardware control. For diagnostic use only. */
+ uint64_t rxt_adtmout_sel : 2; /**< [ 28: 27](R/W) Selects the timeout value for the BASE-R link training time-out timer.
+ This time-out timer value is only valid if
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE]
+ is cleared to 0 and BASE-R hardware training is enabled.
+
+ When GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] is cleared to 0 the link training
+ time-out timer value is set by [RXT_ADTMOUT_SEL] to the values shown.
+ 0x0 = 83.89 milliseconds.
+ 0x1 = 167.77 milliseconds.
+ 0x2 = 335.54 milliseconds.
+ 0x3 = 419.43 milliseconds.
+
+ When GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] is set to 1 the link training
+ time-out timer value is set by [RXT_ADTMOUT_SEL] to the values shown.
+ 0x0 = 81.92 microseconds.
+ 0x1 = 163.84 microseconds.
+ 0x2 = 327.68 microseconds.
+ 0x3 = 655.36 microseconds. */
+ uint64_t rxt_adtmout_fast : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ For simulation use only. When set accelerates the link training time-out timer during
+ BASE-R link training. When set shortens the link training time-out timer to time-out
+ after 164 microseconds to facilitate shorter BASE-R training simulations runs.
+ For diagnostic use only. */
+ uint64_t txt_cur_prg : 1; /**< [ 30: 30](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, setting [TXT_CUR_PRG] writes the TX
+ equalizer
+ coefficients in GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRE],
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_MAIN],
+ and GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_POST] registers into the GSER TX equalizer.
+ For diagnostic use only. */
+ uint64_t txt_cur_pre : 5; /**< [ 35: 31](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C-1) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_main : 6; /**< [ 41: 36](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C0) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_post : 5; /**< [ 46: 42](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C+1) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_swm : 1; /**< [ 47: 47](R/W) Set when TX BASE-R link training is to be performed under software control. For diagnostic
+ use only. */
+ uint64_t txt_pre : 5; /**< [ 52: 48](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_main : 6; /**< [ 58: 53](RO/H) After TX BASE-R link training, this is the resultant MAIN Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_post : 5; /**< [ 63: 59](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_0_bcfg bdk_gsernx_lanex_train_0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) bdk_gsernx_lanex_train_0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) "GSERNX_LANEX_TRAIN_0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_10_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 10
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_10_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_10_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t l_c1_e_adj_sgn : 1; /**< [ 58: 58](R/W) Sets the lower C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the lower C1_E sampler below the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the lower C1_E sampler above the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_4_BCFG[C1_E_ADJ_STEP] during KR training.
+ For diagnostic use only. */
+ uint64_t u_c1_e_adj_sgn : 1; /**< [ 57: 57](R/W) Sets the upper C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the upper C1_E sampler above the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the upper C1_E sampler below the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_10_BCFG[U_C1_E_ADJ_STEP] for BASE-R training.
+ For diagnostic use only. */
+ uint64_t u_c1_e_adj_step : 5; /**< [ 56: 52](R/W) Sets the C1 E sampler voltage level during eye monitor sampling when
+ GSERN()_LANE()_TRAIN_10_BCFG[FOM_TYPE] is set to one for BASE-R training.
+ Typically [U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15mv above the C1 Q sampler voltage level when
+ computing the FOM using the two step process, e.g. [FOM_TYPE] set to one,
+ with the error slicer level positioned above and below the data slicer
+ level. The error slicer level and positon relative to the data slicer
+ is controlled by [U_C1_E_ADJ_STEP] and
+ GSERN()_LANE()_TRAIN_10_BCFG[U_C1_E_ADJ_SGN] for BASE-R training.
+ Steps are in units of 5.08 mV per step.
+ For diagnostic use only. */
+ uint64_t l_c1_e_adj_step : 5; /**< [ 51: 47](R/W) Sets the C1 E sampler voltage level during eye monitor sampling when
+ GSERN()_LANE()_TRAIN_10_BCFG[FOM_TYPE] is set to one for BASE-R training.
+ Typically [U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15mv below the C1 Q sampler voltage level when
+ computing the FOM using the two step process, e.g. [FOM_TYPE] set to one,
+ with the error slicer level positioned above and below the data slicer
+ level. The error slicer level and positon relative to the data slicer
+ is controlled by [U_C1_E_ADJ_STEP] and
+ GSERN()_LANE()_TRAIN_10_BCFG[L_C1_E_ADJ_SGN] for BASE-R training.
+ Steps are in units of 5.08 mV per step.
+ For diagnostic use only. */
+ uint64_t fom_type : 1; /**< [ 46: 46](R/W) BASE-R and PCIE training selects the Figure of Merit (FOM) measurement type. For
+ diagnostic use only.
+ 0 = The raw FOM is measured by setting the eye monitor
+ error slicer below the data slicer nominal level and counting the errors
+ for each of the transition ones, non trasition ones, transition zeros, and
+ non transition zeros then summing the four error counts, convert to ones
+ complement, then normalize to a 12-bit unsigned integer.
+ 1 = The raw FOM calculation follows the steps above however the
+ eye monitor error measurements is a two step process with the error slicer
+ first set both below the nominal data slicer level and then on the second
+ measurement pass set above the data slicer nominal level.
+
+ Internal:
+ The first FOM method can detect a saturated receiver and stop training
+ if the eye is sufficiently open.
+ The second FOM method returns a lower value for overequalized eyes and
+ is useful for driving the training to a more optimal equalization
+ setting on longer links. */
+ uint64_t trn_fom_thrs_en : 1; /**< [ 45: 45](R/W) BASE-R training when set to 1 enables the FOM threshold value in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] for training convergence
+ detection. When the measured FOM exceeds the value in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 0x1, training
+ will terminate depending on the settings of the training termination
+ condition values set in
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ When BASE-R training converges due the FOM threshold being met or
+ exceeded GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS] will be set to 1
+ if GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 1.
+ For diagnostic use only. */
+ uint64_t exit_fom_thrs_val : 12; /**< [ 44: 33](R/W) BASE-R training sets the FOM threshold value used for training convergence
+ detection. When the measured FOM exceeds the value in [EXIT_FOM_THRS_VAL]
+ and GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 0x1, training
+ will terminate depending on the settings of the training termination
+ condition values set in
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ Refer to the description for GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN]
+ and GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+ For diagnostic use only. */
+ uint64_t ttrk_array_clr : 1; /**< [ 32: 32](R/W) KR training Local Device Tx Equalizer tracking array clear signal. Used to
+ clear the tracking array after KR training has completed.
+ For diagnostic use only. */
+ uint64_t ttrk_array_rd : 1; /**< [ 31: 31](R/W) KR training Local Device Tx Equalizer tracking array index Read signal. Used to
+ readback tap values from the tracking array after KR training has completed.
+ For diagnostic use only. */
+ uint64_t ttrk_array_addr : 7; /**< [ 30: 24](R/W) KR training Local Device Tx Equalizer tracking array index. Used to
+ readback tap values from the tracking array after KR training has completed.
+ For diagnostic use only.
+
+ Internal:
+ During KR training the local device transmitter tap values (C0,C+1,C-1)
+ are stored in the tap tracking array. The array holds up to 128 locations.
+ After KR training completes the array can be read back to determine the
+ training progression of the transmitter taps. This is helpful in debugging
+ KR training convergence problems of the local device transmitter. */
+ uint64_t ttrk_moves : 8; /**< [ 23: 16](RO/H) KR training Local Device Tx Equalizer number of tap adjustments during KR training.
+ For diagnostic use only. */
+ uint64_t ttrk_pre : 5; /**< [ 15: 11](RO/H) KR training Local Device Tx Equalizer Pre (C-1) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_main : 6; /**< [ 10: 5](RO/H) KR training Local Device Tx Equalizer Main (C0) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_post : 5; /**< [ 4: 0](RO/H) KR training Local Device Tx Equalizer Post (C+1) value from the tap tracking array.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ttrk_post : 5; /**< [ 4: 0](RO/H) KR training Local Device Tx Equalizer Post (C+1) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_main : 6; /**< [ 10: 5](RO/H) KR training Local Device Tx Equalizer Main (C0) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_pre : 5; /**< [ 15: 11](RO/H) KR training Local Device Tx Equalizer Pre (C-1) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_moves : 8; /**< [ 23: 16](RO/H) KR training Local Device Tx Equalizer number of tap adjustments during KR training.
+ For diagnostic use only. */
+ uint64_t ttrk_array_addr : 7; /**< [ 30: 24](R/W) KR training Local Device Tx Equalizer tracking array index. Used to
+ readback tap values from the tracking array after KR training has completed.
+ For diagnostic use only.
+
+ Internal:
+ During KR training the local device transmitter tap values (C0,C+1,C-1)
+ are stored in the tap tracking array. The array holds up to 128 locations.
+ After KR training completes the array can be read back to determine the
+ training progression of the transmitter taps. This is helpful in debugging
+ KR training convergence problems of the local device transmitter. */
+ uint64_t ttrk_array_rd : 1; /**< [ 31: 31](R/W) KR training Local Device Tx Equalizer tracking array index Read signal. Used to
+ readback tap values from the tracking array after KR training has completed.
+ For diagnostic use only. */
+ uint64_t ttrk_array_clr : 1; /**< [ 32: 32](R/W) KR training Local Device Tx Equalizer tracking array clear signal. Used to
+ clear the tracking array after KR training has completed.
+ For diagnostic use only. */
+ uint64_t exit_fom_thrs_val : 12; /**< [ 44: 33](R/W) BASE-R training sets the FOM threshold value used for training convergence
+ detection. When the measured FOM exceeds the value in [EXIT_FOM_THRS_VAL]
+ and GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 0x1, training
+ will terminate depending on the settings of the training termination
+ condition values set in
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ Refer to the description for GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN]
+ and GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+ For diagnostic use only. */
+ uint64_t trn_fom_thrs_en : 1; /**< [ 45: 45](R/W) BASE-R training when set to 1 enables the FOM threshold value in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] for training convergence
+ detection. When the measured FOM exceeds the value in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 0x1, training
+ will terminate depending on the settings of the training termination
+ condition values set in
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ When BASE-R training converges due the FOM threshold being met or
+ exceeded GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS] will be set to 1
+ if GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 1.
+ For diagnostic use only. */
+ uint64_t fom_type : 1; /**< [ 46: 46](R/W) BASE-R and PCIE training selects the Figure of Merit (FOM) measurement type. For
+ diagnostic use only.
+ 0 = The raw FOM is measured by setting the eye monitor
+ error slicer below the data slicer nominal level and counting the errors
+ for each of the transition ones, non trasition ones, transition zeros, and
+ non transition zeros then summing the four error counts, convert to ones
+ complement, then normalize to a 12-bit unsigned integer.
+ 1 = The raw FOM calculation follows the steps above however the
+ eye monitor error measurements is a two step process with the error slicer
+ first set both below the nominal data slicer level and then on the second
+ measurement pass set above the data slicer nominal level.
+
+ Internal:
+ The first FOM method can detect a saturated receiver and stop training
+ if the eye is sufficiently open.
+ The second FOM method returns a lower value for overequalized eyes and
+ is useful for driving the training to a more optimal equalization
+ setting on longer links. */
+ uint64_t l_c1_e_adj_step : 5; /**< [ 51: 47](R/W) Sets the C1 E sampler voltage level during eye monitor sampling when
+ GSERN()_LANE()_TRAIN_10_BCFG[FOM_TYPE] is set to one for BASE-R training.
+ Typically [U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15mv below the C1 Q sampler voltage level when
+ computing the FOM using the two step process, e.g. [FOM_TYPE] set to one,
+ with the error slicer level positioned above and below the data slicer
+ level. The error slicer level and positon relative to the data slicer
+ is controlled by [U_C1_E_ADJ_STEP] and
+ GSERN()_LANE()_TRAIN_10_BCFG[L_C1_E_ADJ_SGN] for BASE-R training.
+ Steps are in units of 5.08 mV per step.
+ For diagnostic use only. */
+ uint64_t u_c1_e_adj_step : 5; /**< [ 56: 52](R/W) Sets the C1 E sampler voltage level during eye monitor sampling when
+ GSERN()_LANE()_TRAIN_10_BCFG[FOM_TYPE] is set to one for BASE-R training.
+ Typically [U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15mv above the C1 Q sampler voltage level when
+ computing the FOM using the two step process, e.g. [FOM_TYPE] set to one,
+ with the error slicer level positioned above and below the data slicer
+ level. The error slicer level and positon relative to the data slicer
+ is controlled by [U_C1_E_ADJ_STEP] and
+ GSERN()_LANE()_TRAIN_10_BCFG[U_C1_E_ADJ_SGN] for BASE-R training.
+ Steps are in units of 5.08 mV per step.
+ For diagnostic use only. */
+ uint64_t u_c1_e_adj_sgn : 1; /**< [ 57: 57](R/W) Sets the upper C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the upper C1_E sampler above the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the upper C1_E sampler below the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_10_BCFG[U_C1_E_ADJ_STEP] for BASE-R training.
+ For diagnostic use only. */
+ uint64_t l_c1_e_adj_sgn : 1; /**< [ 58: 58](R/W) Sets the lower C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the lower C1_E sampler below the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the lower C1_E sampler above the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_4_BCFG[C1_E_ADJ_STEP] during KR training.
+ For diagnostic use only. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_10_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_10_bcfg bdk_gsernx_lanex_train_10_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_10_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_10_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003250ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_10_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) bdk_gsernx_lanex_train_10_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) "GSERNX_LANEX_TRAIN_10_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_1_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 1
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rxt_fom : 12; /**< [ 63: 52](RO/H) Figure of merit. An 11-bit output from the PHY indicating the quality of the
+ received data eye. A higher value indicates better link equalization, with 0x0
+ indicating worst equalization setting and 4095 indicating the best equalization
+ setting. */
+ uint64_t train_tx_rule : 8; /**< [ 51: 44](R/W) BASE-R training TX taps coefficient rule. Sets the upper limit of the permissible
+ range of the combined TX equalizer c(0), c(+1), and c(-1) taps so that the TX equalizer
+ operates within range specified in the 10GBASE-KR standard.
+ The TX coefficient rule requires (pre + post + main) \<= [TRAIN_TX_RULE].
+
+ The allowable range for [TRAIN_TX_RULE] is (24 decimal \<= [TRAIN_TX_RULE] \<= 48
+ decimal).
+ For 10GBASE-KR it is recommended to program [TRAIN_TX_RULE] to 0x30 (48 decimal).
+
+ c(-1) pre TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[PRE_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[PRE_MIN_LIMIT].
+
+ c(0) main TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MIN_LIMIT].
+
+ c(+1) post TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[POST_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[POST_MIN_LIMIT]. */
+ uint64_t trn_rx_nxt_st : 6; /**< [ 43: 38](RO/H) BASE-R training single step next state for the receive training state machine.
+ In single step mode this field holds the value of the next state of the receive
+ training state machine when the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP] bit is
+ set to a one.
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN] must be set to a one to enable single
+ step mode and the GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] must be set to a one
+ to force the receive training state machine to the STOP state.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST].
+
+ For diagnostic use only. */
+ uint64_t trn_ovrd_st : 6; /**< [ 37: 32](R/W) BASE-R training single step override state for the receive training
+ state machine. In single step mode allows for forcing the receive training
+ state machine to a specific state when exiting the STOP state.
+ Refer to the description for GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD].
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_ss_ovrd : 1; /**< [ 31: 31](R/W) BASE-R training single step state override control for the receive training
+ state machine.
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 and the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, indicated by the stop flag GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP]
+ set to one, the next state of the receive state machine, prior to entering the STOP
+ state is indicated by the value in the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST]
+ field. The next state of the receive state machine can be overridden, that is forced
+ to another state other than the next state by setting
+ the desired next state in the GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field and then
+ clearing the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP] to zero. The receive state
+ machine will exit the STOP state and proceed to state indicated in [TRN_OVRD_ST]
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t reserved_30 : 1;
+ uint64_t trn_rx_ss_sp : 1; /**< [ 29: 29](RO/H) BASE-R training single step stop flag for the receiver training state machine.
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, the [TRN_RX_SS_SP] flag will be set. Subsequently, writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to zero will cause the receive state machine
+ to exit the STOP state and jump to the state indicated in the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST] field.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_ss_st : 1; /**< [ 28: 28](WO/H) BASE-R training single-step start single-step stop.
+ Refer to the description for GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN].
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+
+ For diagnostic use only. */
+ uint64_t trn_ss_en : 1; /**< [ 27: 27](R/W) BASE-R training single step mode enable. When set to a 1 enables single stepping
+ the BASE-R link training receive state machines.
+
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, the [TRN_RX_SS_SP] flag will be set. Subsequently, writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 0 then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 1 will cause the receive state machine
+ to exit the STOP state and jump to the state indicated in the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST] field. Alternatively, the receive
+ state machine can be forced to a different state by writing the state value
+ to the GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field then set the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD] to 1 and then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 0 then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 1 to force the receive state machine to the
+ override state and then return to the STOP state.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t rx_train_fsm : 6; /**< [ 26: 21](RO/H) Value of the BASE-R hardware receiver link training state machine state during
+ link training single step mode. The values in this field are only valid when
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN] is set.
+ For diagnostic use only. */
+ uint64_t tx_train_fsm : 5; /**< [ 20: 16](RO/H) Value of the BASE-R hardware transmitter link training state machine state.
+ For diagnostic use only. */
+ uint64_t txt_post_init : 5; /**< [ 15: 11](R/W) During TX BASE-R link training, the TX posttap value that is used
+ when the initialize coefficients update is received. It is also the TX posttap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_main_init : 6; /**< [ 10: 5](R/W) During TX BASE-R link training, the TX swing-tap value that is used
+ when the initialize coefficients update is received. It is also the TX swing-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_pre_init : 5; /**< [ 4: 0](R/W) During TX BASE-R link training, the TX pretap value that is used
+ when the initialize coefficients update is received. It is also the TX pretap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t txt_pre_init : 5; /**< [ 4: 0](R/W) During TX BASE-R link training, the TX pretap value that is used
+ when the initialize coefficients update is received. It is also the TX pretap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_main_init : 6; /**< [ 10: 5](R/W) During TX BASE-R link training, the TX swing-tap value that is used
+ when the initialize coefficients update is received. It is also the TX swing-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_post_init : 5; /**< [ 15: 11](R/W) During TX BASE-R link training, the TX posttap value that is used
+ when the initialize coefficients update is received. It is also the TX posttap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t tx_train_fsm : 5; /**< [ 20: 16](RO/H) Value of the BASE-R hardware transmitter link training state machine state.
+ For diagnostic use only. */
+ uint64_t rx_train_fsm : 6; /**< [ 26: 21](RO/H) Value of the BASE-R hardware receiver link training state machine state during
+ link training single step mode. The values in this field are only valid when
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN] is set.
+ For diagnostic use only. */
+ uint64_t trn_ss_en : 1; /**< [ 27: 27](R/W) BASE-R training single step mode enable. When set to a 1 enables single stepping
+ the BASE-R link training receive state machines.
+
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, the [TRN_RX_SS_SP] flag will be set. Subsequently, writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 0 then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 1 will cause the receive state machine
+ to exit the STOP state and jump to the state indicated in the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST] field. Alternatively, the receive
+ state machine can be forced to a different state by writing the state value
+ to the GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field then set the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD] to 1 and then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 0 then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 1 to force the receive state machine to the
+ override state and then return to the STOP state.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_ss_st : 1; /**< [ 28: 28](WO/H) BASE-R training single-step start single-step stop.
+ Refer to the description for GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN].
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+
+ For diagnostic use only. */
+ uint64_t trn_rx_ss_sp : 1; /**< [ 29: 29](RO/H) BASE-R training single step stop flag for the receiver training state machine.
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, the [TRN_RX_SS_SP] flag will be set. Subsequently, writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to zero will cause the receive state machine
+ to exit the STOP state and jump to the state indicated in the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST] field.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t reserved_30 : 1;
+ uint64_t trn_ss_ovrd : 1; /**< [ 31: 31](R/W) BASE-R training single step state override control for the receive training
+ state machine.
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 and the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, indicated by the stop flag GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP]
+ set to one, the next state of the receive state machine, prior to entering the STOP
+ state is indicated by the value in the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST]
+ field. The next state of the receive state machine can be overridden, that is forced
+ to another state other than the next state by setting
+ the desired next state in the GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field and then
+ clearing the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP] to zero. The receive state
+ machine will exit the STOP state and proceed to state indicated in [TRN_OVRD_ST]
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_ovrd_st : 6; /**< [ 37: 32](R/W) BASE-R training single step override state for the receive training
+ state machine. In single step mode allows for forcing the receive training
+ state machine to a specific state when exiting the STOP state.
+ Refer to the description for GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD].
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_rx_nxt_st : 6; /**< [ 43: 38](RO/H) BASE-R training single step next state for the receive training state machine.
+ In single step mode this field holds the value of the next state of the receive
+ training state machine when the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP] bit is
+ set to a one.
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN] must be set to a one to enable single
+ step mode and the GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] must be set to a one
+ to force the receive training state machine to the STOP state.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST].
+
+ For diagnostic use only. */
+ uint64_t train_tx_rule : 8; /**< [ 51: 44](R/W) BASE-R training TX taps coefficient rule. Sets the upper limit of the permissible
+ range of the combined TX equalizer c(0), c(+1), and c(-1) taps so that the TX equalizer
+ operates within range specified in the 10GBASE-KR standard.
+ The TX coefficient rule requires (pre + post + main) \<= [TRAIN_TX_RULE].
+
+ The allowable range for [TRAIN_TX_RULE] is (24 decimal \<= [TRAIN_TX_RULE] \<= 48
+ decimal).
+ For 10GBASE-KR it is recommended to program [TRAIN_TX_RULE] to 0x30 (48 decimal).
+
+ c(-1) pre TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[PRE_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[PRE_MIN_LIMIT].
+
+ c(0) main TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MIN_LIMIT].
+
+ c(+1) post TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[POST_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[POST_MIN_LIMIT]. */
+ uint64_t rxt_fom : 12; /**< [ 63: 52](RO/H) Figure of merit. An 11-bit output from the PHY indicating the quality of the
+ received data eye. A higher value indicates better link equalization, with 0x0
+ indicating worst equalization setting and 4095 indicating the best equalization
+ setting. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_1_bcfg bdk_gsernx_lanex_train_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) bdk_gsernx_lanex_train_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) "GSERNX_LANEX_TRAIN_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_2_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 2
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trn_sat_mv_lmt : 4; /**< [ 63: 60](R/W) BASE-R training saturated move limit threshold.
+ See GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT_EN].
+ For diagnostic use only. */
+ uint64_t trn_sat_mv_lmt_en : 1; /**< [ 59: 59](R/W) BASE-R training saturated move limit threshold enable. During BASE-R training
+ if a consecutive number of saturated tap moves specified by
+ GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT] is met or exceeded training will conclude.
+ This is to prevent cases where the FOM can no longer be improved and the
+ link partner TX taps are at their minimum or maximum limits and the algorithm
+ is attempting to repeatedly move the Tx taps beyond their min/max limits.
+ If the threshold limit is met or exceeded and [TRN_SAT_MV_LMT_EN] is set to 1
+ training will terminate and the GSERN()_LANE()_TRAIN_3_BCFG[EXIT_SAT_MV_LMT]
+ flag will set.
+ For diagnostic use only. */
+ uint64_t trn_cfg_use_eye_en : 1; /**< [ 58: 58](R/W) BASE-R and PCIe training when [TRN_CFG_USE_EYE_EN] is set the training state machine
+ will control the eye monitor block while training is active the power down the
+ eye monitor at the conclusion of link training.
+ For diagnostic use only. */
+ uint64_t trn_rrrpt_en : 1; /**< [ 57: 57](R/W) BASE-R training when [TRN_RRRPT_EN] is set the training state machine
+ will repeatedly send Receiver Ready messages to the CGX/OCX MAC every
+ 128 services clocks when training completes. For diagnostic use only. */
+ uint64_t trn_preset_en : 1; /**< [ 56: 56](R/W) BASE-R training when [TRN_PRESET_EN] is set to one preset the link
+ partner TX equalizer when training starts. When [TRN_PRESET_EN]
+ is cleared to zero the link partner TX equalizer will start in the
+ INITIALIZE state. For BASE-R training it is recommended to
+ start link training with [TRN_PRESET_EN] set to one. */
+ uint64_t trn_main_en : 2; /**< [ 55: 54](R/W) BASE-R training decrements the link partner (LP) TX equalizer main (C0) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_MAIN_VAL].
+
+ 0x0 = Disabled, do not decrement LP main C0 tap following PRESET.
+ 0x1 = Decrement LP main C0 tap following PRESET until vga_gain\<3:0\>
+ is less than or equal to the value in [TRN_MAIN_VAL].
+ 0x2 = Decrement LP main C0 tap following PRESET by the number of
+ steps in the [TRN_MAIN_VAL].
+ 0x3 = Increment LP main C0 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_MAIN_VAL]. */
+ uint64_t trn_main_val : 6; /**< [ 53: 48](R/W) BASE-R training decrements the link partner (LP) TX equalizer main (C0) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_MAIN_EN].
+ See [TRN_MAIN_EN]. */
+ uint64_t max_tap_moves : 8; /**< [ 47: 40](R/W) BASE-R training sets the maximum number of link partner TX Equalizer Tap moves
+ allowed. Exceeding the [MAX_TAP_MOVES] forces training to terminate and local
+ device ready signaled if TRAIN_DONE_MASK[MAX_MOVES] is set.
+
+ Internal:
+ FIXME no such register TRAIN_DONE_MASK[MAX_MOVES], then remove above exempt attribute. */
+ uint64_t min_tap_moves : 8; /**< [ 39: 32](R/W) BASE-R training sets the minimum number of link partner TX Equalizer Tap moves
+ before training completion (local device ready) is permitted. */
+ uint64_t main_max_limit : 6; /**< [ 31: 26](R/W) BASE-R training sets the maximum limit of the local device transmitter main (C0) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the main tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the main (C0) tap value.
+ The allowable range for the main (C0) tap is 0x18 to 0x30. */
+ uint64_t post_max_limit : 5; /**< [ 25: 21](R/W) BASE-R training sets the maximum limit of the local device transmitter post (C+1) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the post tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the post (C+1) tap value.
+ The allowable range for the post (C+1) tap is 0 to 0xC. */
+ uint64_t pre_max_limit : 5; /**< [ 20: 16](R/W) BASE-R training sets the maximum limit of the local device transmitter pre (C-1) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the pre tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the pre (C-1) tap value.
+ The allowable range for the pre (C-1) tap is 0 to 0x10. */
+ uint64_t main_min_limit : 6; /**< [ 15: 10](R/W) BASE-R training sets the minimum limit of the local device transmitter main (C0) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the main tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the main (C0) tap value.
+ The allowable range for the main (C0) tap is 0x18 to 0x30. */
+ uint64_t post_min_limit : 5; /**< [ 9: 5](R/W) BASE-R training sets the minimum limit of the local device transmitter post (C+1) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the post tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the post (C+1) tap value.
+ The allowable range for the post (C+1) tap is 0 to 0x10. */
+ uint64_t pre_min_limit : 5; /**< [ 4: 0](R/W) BASE-R training sets the minimum limit of the local device transmitter pre (C-1) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the pre tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the pre (C-1) tap value.
+ The allowable range for the min (C-1) tap is 0 to 0x10. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_min_limit : 5; /**< [ 4: 0](R/W) BASE-R training sets the minimum limit of the local device transmitter pre (C-1) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the pre tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the pre (C-1) tap value.
+ The allowable range for the min (C-1) tap is 0 to 0x10. */
+ uint64_t post_min_limit : 5; /**< [ 9: 5](R/W) BASE-R training sets the minimum limit of the local device transmitter post (C+1) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the post tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the post (C+1) tap value.
+ The allowable range for the post (C+1) tap is 0 to 0x10. */
+ uint64_t main_min_limit : 6; /**< [ 15: 10](R/W) BASE-R training sets the minimum limit of the local device transmitter main (C0) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the main tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the main (C0) tap value.
+ The allowable range for the main (C0) tap is 0x18 to 0x30. */
+ uint64_t pre_max_limit : 5; /**< [ 20: 16](R/W) BASE-R training sets the maximum limit of the local device transmitter pre (C-1) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the pre tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the pre (C-1) tap value.
+ The allowable range for the pre (C-1) tap is 0 to 0x10. */
+ uint64_t post_max_limit : 5; /**< [ 25: 21](R/W) BASE-R training sets the maximum limit of the local device transmitter post (C+1) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the post tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the post (C+1) tap value.
+ The allowable range for the post (C+1) tap is 0 to 0xC. */
+ uint64_t main_max_limit : 6; /**< [ 31: 26](R/W) BASE-R training sets the maximum limit of the local device transmitter main (C0) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the main tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the main (C0) tap value.
+ The allowable range for the main (C0) tap is 0x18 to 0x30. */
+ uint64_t min_tap_moves : 8; /**< [ 39: 32](R/W) BASE-R training sets the minimum number of link partner TX Equalizer Tap moves
+ before training completion (local device ready) is permitted. */
+ uint64_t max_tap_moves : 8; /**< [ 47: 40](R/W) BASE-R training sets the maximum number of link partner TX Equalizer Tap moves
+ allowed. Exceeding the [MAX_TAP_MOVES] forces training to terminate and local
+ device ready signaled if TRAIN_DONE_MASK[MAX_MOVES] is set.
+
+ Internal:
+ FIXME no such register TRAIN_DONE_MASK[MAX_MOVES], then remove above exempt attribute. */
+ uint64_t trn_main_val : 6; /**< [ 53: 48](R/W) BASE-R training decrements the link partner (LP) TX equalizer main (C0) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_MAIN_EN].
+ See [TRN_MAIN_EN]. */
+ uint64_t trn_main_en : 2; /**< [ 55: 54](R/W) BASE-R training decrements the link partner (LP) TX equalizer main (C0) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_MAIN_VAL].
+
+ 0x0 = Disabled, do not decrement LP main C0 tap following PRESET.
+ 0x1 = Decrement LP main C0 tap following PRESET until vga_gain\<3:0\>
+ is less than or equal to the value in [TRN_MAIN_VAL].
+ 0x2 = Decrement LP main C0 tap following PRESET by the number of
+ steps in the [TRN_MAIN_VAL].
+ 0x3 = Increment LP main C0 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_MAIN_VAL]. */
+ uint64_t trn_preset_en : 1; /**< [ 56: 56](R/W) BASE-R training when [TRN_PRESET_EN] is set to one preset the link
+ partner TX equalizer when training starts. When [TRN_PRESET_EN]
+ is cleared to zero the link partner TX equalizer will start in the
+ INITIALIZE state. For BASE-R training it is recommended to
+ start link training with [TRN_PRESET_EN] set to one. */
+ uint64_t trn_rrrpt_en : 1; /**< [ 57: 57](R/W) BASE-R training when [TRN_RRRPT_EN] is set the training state machine
+ will repeatedly send Receiver Ready messages to the CGX/OCX MAC every
+ 128 services clocks when training completes. For diagnostic use only. */
+ uint64_t trn_cfg_use_eye_en : 1; /**< [ 58: 58](R/W) BASE-R and PCIe training when [TRN_CFG_USE_EYE_EN] is set the training state machine
+ will control the eye monitor block while training is active the power down the
+ eye monitor at the conclusion of link training.
+ For diagnostic use only. */
+ uint64_t trn_sat_mv_lmt_en : 1; /**< [ 59: 59](R/W) BASE-R training saturated move limit threshold enable. During BASE-R training
+ if a consecutive number of saturated tap moves specified by
+ GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT] is met or exceeded training will conclude.
+ This is to prevent cases where the FOM can no longer be improved and the
+ link partner TX taps are at their minimum or maximum limits and the algorithm
+ is attempting to repeatedly move the Tx taps beyond their min/max limits.
+ If the threshold limit is met or exceeded and [TRN_SAT_MV_LMT_EN] is set to 1
+ training will terminate and the GSERN()_LANE()_TRAIN_3_BCFG[EXIT_SAT_MV_LMT]
+ flag will set.
+ For diagnostic use only. */
+ uint64_t trn_sat_mv_lmt : 4; /**< [ 63: 60](R/W) BASE-R training saturated move limit threshold.
+ See GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT_EN].
+ For diagnostic use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_2_bcfg bdk_gsernx_lanex_train_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) bdk_gsernx_lanex_train_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) "GSERNX_LANEX_TRAIN_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_3_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 3
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t exit_fom_thrs : 1; /**< [ 63: 63](RO/H) BASE-R training exit condition flag indicates the measured FOM
+ was equal to or exceeded the FOM threshold value specified in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] when
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 1.
+
+ Used in conjustion with
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR] to
+ specify the BASE-R training convergence exit criteria. */
+ uint64_t train_tx_min_rule : 8; /**< [ 62: 55](R/W) BASE-R training TX taps minimum coefficient rule. Sets the lower limit of the permissible
+ range of the TX equalizer c(0), c(+1), and c(-1) taps so that the TX equalizer
+ operates within range specified in the IEEE 802.3-2012 Clause 72 10GBASE-KR
+ and IEEE 802.3bj-2014 Clause 93 100GBASE-KR4.
+ The TX coefficient minimum rule requires (main - pre - post) \>= [TRAIN_TX_MIN_RULE].
+
+ The allowable range for [TRAIN_TX_MIN_RULE] is
+ (6 decimal \<= [TRAIN_TX_MIN_RULE] \<= 16 decimal).
+ For 10GBASE-KR, 40GBASE-KR4 and 100GBASE-KR4 it is recommended to
+ program [TRAIN_TX_MIN_RULE] to 0x6.
+
+ c(-1) pre TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[PRE_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[PRE_MIN_LIMIT].
+
+ c(0) main TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MIN_LIMIT].
+
+ c(+1) post TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[POST_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[POST_MIN_LIMIT]. */
+ uint64_t exit_sat_mv_lmt : 1; /**< [ 54: 54](RO/H) BASE-R training saturated move limit threshold exit flag.
+ See GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT_EN].
+ For diagnostic use only. */
+ uint64_t exit_prbs11_ok : 1; /**< [ 53: 53](RO/H) Training exit condition PRBS11 in the BASE-R KR training frame is
+ error free.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_PRBS11_OK] will be set if the training was terminated
+ because the PRBS11 pattern extracted by the CGX or OCX MAC
+ indicates that the PRBS11 pattern is error free.
+
+ This bit will report the PRBS11 status when BASE-R training
+ completes even if GSERN()_LANE()_TRAIN_3_BCFG[LD_TRAIN_DONE\<21\>
+ or LD_TRAIN_DONE\<26\>] are not set.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled
+ for the [EXIT_PRBS11_OK] status to be reported.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only.
+
+ Internal:
+ FIXME what does LD_TRAIN_DONE refer to, then remove above exempt attribute. */
+ uint64_t exit_delta_ffom : 1; /**< [ 52: 52](RO/H) Training exit condition due to delta filtered FOM.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one the
+ [EXIT_DELTA_FFOM] bit will be set if the training was terminated
+ because the Delta Filtered FOM is within the high and low limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeded the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT]
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_rep_pattern : 1; /**< [ 51: 51](RO/H) Training exit condition repeating TAP moves pattern detected.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_REP_PATTERN] will be set if the training was terminated
+ because the training state machine discovered a repeating tap
+ move pattern. The GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must
+ be set to a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_tmt_timer : 1; /**< [ 50: 50](RO/H) Training timeout timer expired.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MAX_TAP_MOVES] will be set if the training was terminated
+ because the training state machine KR training time-out timer expired.
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_min_tap_moves : 1; /**< [ 49: 49](RO/H) Training exit condition exceeded minimum number of tap moves.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MIN_TAP_MOVES] will be set if the training was terminated
+ because the training state machine exceeded the minimum number of
+ tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES].
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_max_tap_moves : 1; /**< [ 48: 48](RO/H) Training exit condition exceeded maximum number of tap moves.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MAX_TAP_MOVES] will be set if the training was terminated
+ because the training state machine exceeded the maximum number of
+ tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MAX_TAP_MOVES].
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_dffom : 13; /**< [ 47: 35](RO/H) Training exit location delta filtered FOM value. Holds the delta filtered FOM
+ value at the completion of BASE-R training. Number represented in offset binary
+ notation. For diagnostic use only. */
+ uint64_t trn_ntap_mvs : 8; /**< [ 34: 27](RO/H) BASE-R training holds the number of link partner tap moves made during
+ link training. */
+ uint64_t term_prbs11_and : 1; /**< [ 26: 26](R/W) BASE-R training KR training PRBS11 pattern check extracted from the
+ KR training frame is error free. Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled to
+ enable PRBS11 pattern error checking. */
+ uint64_t term_dffom_and : 1; /**< [ 25: 25](R/W) BASE-R training KR training Delta Filtered FOM is within the high
+ and low limits. Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Delta filtered FOM is within the high and low
+ limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeds the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] */
+ uint64_t term_rep_pat_and : 1; /**< [ 24: 24](R/W) BASE-R training KR training taps move repeating pattern detected.
+ Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must be set to
+ a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence. */
+ uint64_t term_tmt_tmr_and : 1; /**< [ 23: 23](R/W) BASE-R training KR training time-out timer expired. Termination
+ AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero. */
+ uint64_t term_min_mvs_and : 1; /**< [ 22: 22](R/W) BASE-R training termination exceeded minimum number of tap moves.
+ Termination AND condition. See description below.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded minimum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES] sets the minimum
+ number of tap moves. */
+ uint64_t term_prbs11_or : 1; /**< [ 21: 21](R/W) BASE-R training KR training PRBS11 pattern check extracted from the
+ KR training frame is error free. Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled to
+ enable PRBS11 pattern error checking. */
+ uint64_t term_dffom_or : 1; /**< [ 20: 20](R/W) BASE-R training KR training Delta Filtered FOM is within the high
+ and low limits. Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Delta filtered FOM is within the high and low
+ limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeds the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] */
+ uint64_t term_rep_pat_or : 1; /**< [ 19: 19](R/W) BASE-R training KR training taps move repeating pattern detected.
+ Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must be set to
+ a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence. */
+ uint64_t term_tmt_tmr_or : 1; /**< [ 18: 18](R/W) BASE-R training KR training time-out timer expired. Termination
+ OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero. */
+ uint64_t term_max_mvs_or : 1; /**< [ 17: 17](R/W) BASE-R training termination exceeded maximum number of tap moves.
+ Termination OR condition. See description below.
+
+ BASE-R training termination condition register fields. Selects the conditions
+ used to terminate local device KR link training. Setting the associated
+ bit will enable the training termination condition. An AND-OR
+ tree is used to allow setting conditions that must occur together
+ (AND function) or any single condition (OR function) will trigger the
+ BASE-R training termination. AND and OR conditions can be combined.
+
+ \<page\>
+ OR CONDITIONS. Any condition that is true and has a set condition bit will
+ trigger training termination. Conditions with bits that are not set
+ (cleared to zero) are not used to trigger training termination.
+
+ [TERM_MAX_MVS_OR] = Exceeded maximum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MAX_TAP_MOVES] sets the maximum
+ number of tap moves.
+
+ [TERM_TMT_TMR_OR] = KR training time-out timer expired.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_TMT_TMR_OR].
+
+ [TERM_REP_PAT_OR] =Taps move repeating pattern detected.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_REP_PAT_OR].
+
+ [TERM_DFFOM_OR] = Delta Filtered FOM is within the high and low
+ limits.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_DFFOM_OR].
+
+ [TERM_PRBS11_OR] = PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_PRBS11_OR].
+
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR] =
+ Measured FOM equal or exceeds the FOM threshold
+ in GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] during KR
+ training. GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] must also
+ be set to 1.
+ See description in GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ \<page\>
+ AND CONDITIONS. The conditions associated with bits that are set must
+ all be true to trigger training termination. Conditions with bits that
+ are not set (cleared to zero) are not used to trigger training termination.
+
+ [TERM_MIN_MVS_AND] = Exceeded minimum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES] sets the minimum
+ number of tap moves.
+
+ [TERM_TMT_TMR_AND] = KR training time-out timer expired.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_TMT_TMR_AND].
+
+ [TERM_REP_PAT_AND] = Taps move repeating pattern detected.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_REP_PAT_AND].
+
+ [TERM_DFFOM_AND] = Delta Filtered FOM is within the high and low
+ limits.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_DFFOM_AND].
+
+ [TERM_PRBS11_AND] = PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_PRBS11_AND].
+
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] =
+ Measured FOM equal or exceeds the FOM threshold
+ in GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] during KR
+ training. GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] must also
+ be set to 1.
+ See description in GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND]. */
+ uint64_t inv_tx_post_dir : 1; /**< [ 16: 16](R/W) BASE-R training when set reverses the direction of the post tap (C+1)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t inv_tx_main_dir : 1; /**< [ 15: 15](R/W) BASE-R training when set reverses the direction of the main tap (C0)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t inv_tx_pre_dir : 1; /**< [ 14: 14](R/W) BASE-R training when set reverses the direction of the pre tap (C-1)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t trn_post_en : 2; /**< [ 13: 12](R/W) BASE-R training decrements the link partner (LP) TX equalizer post (C+1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_POST_VAL].
+
+ 0x0 = Disabled, do not decrement LP post C+1 tap following PRESET.
+ 0x1 = Reserved, do not use.
+ 0x2 = Decrement LP post C+1 tap following PRESET by the number of
+ steps in the [TRN_POST_VAL].
+ 0x3 = Increment LP post C+1 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_POST_VAL]. */
+ uint64_t trn_post_val : 5; /**< [ 11: 7](R/W) BASE-R training decrements the link partner (LP) TX equalizer post (C+1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. See [TRN_POST_EN]. */
+ uint64_t trn_pre_en : 2; /**< [ 6: 5](R/W) BASE-R training decrements the link partner (LP) TX equalizer pre (C-1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_PRE_VAL].
+
+ 0x0 = Disabled, do not decrement LP pre C-1 tap following PRESET.
+ 0x1 = Reserved, do not use.
+ 0x2 = Decrement LP pre C-1 tap following PRESET by the number of
+ steps in the [TRN_PRE_VAL].
+ 0x3 = Increment LP pre C-1 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_PRE_VAL]. */
+ uint64_t trn_pre_val : 5; /**< [ 4: 0](R/W) BASE-R training decrements the link partner (LP) TX equalizer pre (C-1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_PRE_EN].
+ See [TRN_PRE_EN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t trn_pre_val : 5; /**< [ 4: 0](R/W) BASE-R training decrements the link partner (LP) TX equalizer pre (C-1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_PRE_EN].
+ See [TRN_PRE_EN]. */
+ uint64_t trn_pre_en : 2; /**< [ 6: 5](R/W) BASE-R training decrements the link partner (LP) TX equalizer pre (C-1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_PRE_VAL].
+
+ 0x0 = Disabled, do not decrement LP pre C-1 tap following PRESET.
+ 0x1 = Reserved, do not use.
+ 0x2 = Decrement LP pre C-1 tap following PRESET by the number of
+ steps in the [TRN_PRE_VAL].
+ 0x3 = Increment LP pre C-1 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_PRE_VAL]. */
+ uint64_t trn_post_val : 5; /**< [ 11: 7](R/W) BASE-R training decrements the link partner (LP) TX equalizer post (C+1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. See [TRN_POST_EN]. */
+ uint64_t trn_post_en : 2; /**< [ 13: 12](R/W) BASE-R training decrements the link partner (LP) TX equalizer post (C+1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_POST_VAL].
+
+ 0x0 = Disabled, do not decrement LP post C+1 tap following PRESET.
+ 0x1 = Reserved, do not use.
+ 0x2 = Decrement LP post C+1 tap following PRESET by the number of
+ steps in the [TRN_POST_VAL].
+ 0x3 = Increment LP post C+1 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_POST_VAL]. */
+ uint64_t inv_tx_pre_dir : 1; /**< [ 14: 14](R/W) BASE-R training when set reverses the direction of the pre tap (C-1)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t inv_tx_main_dir : 1; /**< [ 15: 15](R/W) BASE-R training when set reverses the direction of the main tap (C0)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t inv_tx_post_dir : 1; /**< [ 16: 16](R/W) BASE-R training when set reverses the direction of the post tap (C+1)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t term_max_mvs_or : 1; /**< [ 17: 17](R/W) BASE-R training termination exceeded maximum number of tap moves.
+ Termination OR condition. See description below.
+
+ BASE-R training termination condition register fields. Selects the conditions
+ used to terminate local device KR link training. Setting the associated
+ bit will enable the training termination condition. An AND-OR
+ tree is used to allow setting conditions that must occur together
+ (AND function) or any single condition (OR function) will trigger the
+ BASE-R training termination. AND and OR conditions can be combined.
+
+ \<page\>
+ OR CONDITIONS. Any condition that is true and has a set condition bit will
+ trigger training termination. Conditions with bits that are not set
+ (cleared to zero) are not used to trigger training termination.
+
+ [TERM_MAX_MVS_OR] = Exceeded maximum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MAX_TAP_MOVES] sets the maximum
+ number of tap moves.
+
+ [TERM_TMT_TMR_OR] = KR training time-out timer expired.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_TMT_TMR_OR].
+
+ [TERM_REP_PAT_OR] =Taps move repeating pattern detected.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_REP_PAT_OR].
+
+ [TERM_DFFOM_OR] = Delta Filtered FOM is within the high and low
+ limits.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_DFFOM_OR].
+
+ [TERM_PRBS11_OR] = PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_PRBS11_OR].
+
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR] =
+ Measured FOM equal or exceeds the FOM threshold
+ in GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] during KR
+ training. GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] must also
+ be set to 1.
+ See description in GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ \<page\>
+ AND CONDITIONS. The conditions associated with bits that are set must
+ all be true to trigger training termination. Conditions with bits that
+ are not set (cleared to zero) are not used to trigger training termination.
+
+ [TERM_MIN_MVS_AND] = Exceeded minimum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES] sets the minimum
+ number of tap moves.
+
+ [TERM_TMT_TMR_AND] = KR training time-out timer expired.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_TMT_TMR_AND].
+
+ [TERM_REP_PAT_AND] = Taps move repeating pattern detected.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_REP_PAT_AND].
+
+ [TERM_DFFOM_AND] = Delta Filtered FOM is within the high and low
+ limits.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_DFFOM_AND].
+
+ [TERM_PRBS11_AND] = PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_PRBS11_AND].
+
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] =
+ Measured FOM equal or exceeds the FOM threshold
+ in GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] during KR
+ training. GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] must also
+ be set to 1.
+ See description in GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND]. */
+ uint64_t term_tmt_tmr_or : 1; /**< [ 18: 18](R/W) BASE-R training KR training time-out timer expired. Termination
+ OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero. */
+ uint64_t term_rep_pat_or : 1; /**< [ 19: 19](R/W) BASE-R training KR training taps move repeating pattern detected.
+ Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must be set to
+ a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence. */
+ uint64_t term_dffom_or : 1; /**< [ 20: 20](R/W) BASE-R training KR training Delta Filtered FOM is within the high
+ and low limits. Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Delta filtered FOM is within the high and low
+ limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeds the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] */
+ uint64_t term_prbs11_or : 1; /**< [ 21: 21](R/W) BASE-R training KR training PRBS11 pattern check extracted from the
+ KR training frame is error free. Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled to
+ enable PRBS11 pattern error checking. */
+ uint64_t term_min_mvs_and : 1; /**< [ 22: 22](R/W) BASE-R training termination exceeded minimum number of tap moves.
+ Termination AND condition. See description below.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded minimum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES] sets the minimum
+ number of tap moves. */
+ uint64_t term_tmt_tmr_and : 1; /**< [ 23: 23](R/W) BASE-R training KR training time-out timer expired. Termination
+ AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero. */
+ uint64_t term_rep_pat_and : 1; /**< [ 24: 24](R/W) BASE-R training KR training taps move repeating pattern detected.
+ Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must be set to
+ a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence. */
+ uint64_t term_dffom_and : 1; /**< [ 25: 25](R/W) BASE-R training KR training Delta Filtered FOM is within the high
+ and low limits. Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Delta filtered FOM is within the high and low
+ limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeds the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] */
+ uint64_t term_prbs11_and : 1; /**< [ 26: 26](R/W) BASE-R training KR training PRBS11 pattern check extracted from the
+ KR training frame is error free. Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled to
+ enable PRBS11 pattern error checking. */
+ uint64_t trn_ntap_mvs : 8; /**< [ 34: 27](RO/H) BASE-R training holds the number of link partner tap moves made during
+ link training. */
+ uint64_t exit_dffom : 13; /**< [ 47: 35](RO/H) Training exit location delta filtered FOM value. Holds the delta filtered FOM
+ value at the completion of BASE-R training. Number represented in offset binary
+ notation. For diagnostic use only. */
+ uint64_t exit_max_tap_moves : 1; /**< [ 48: 48](RO/H) Training exit condition exceeded maximum number of tap moves.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MAX_TAP_MOVES] will be set if the training was terminated
+ because the training state machine exceeded the maximum number of
+ tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MAX_TAP_MOVES].
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_min_tap_moves : 1; /**< [ 49: 49](RO/H) Training exit condition exceeded minimum number of tap moves.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MIN_TAP_MOVES] will be set if the training was terminated
+ because the training state machine exceeded the minimum number of
+ tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES].
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_tmt_timer : 1; /**< [ 50: 50](RO/H) Training timeout timer expired.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MAX_TAP_MOVES] will be set if the training was terminated
+ because the training state machine KR training time-out timer expired.
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_rep_pattern : 1; /**< [ 51: 51](RO/H) Training exit condition repeating TAP moves pattern detected.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_REP_PATTERN] will be set if the training was terminated
+ because the training state machine discovered a repeating tap
+ move pattern. The GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must
+ be set to a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_delta_ffom : 1; /**< [ 52: 52](RO/H) Training exit condition due to delta filtered FOM.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one the
+ [EXIT_DELTA_FFOM] bit will be set if the training was terminated
+ because the Delta Filtered FOM is within the high and low limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeded the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT]
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_prbs11_ok : 1; /**< [ 53: 53](RO/H) Training exit condition PRBS11 in the BASE-R KR training frame is
+ error free.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_PRBS11_OK] will be set if the training was terminated
+ because the PRBS11 pattern extracted by the CGX or OCX MAC
+ indicates that the PRBS11 pattern is error free.
+
+ This bit will report the PRBS11 status when BASE-R training
+ completes even if GSERN()_LANE()_TRAIN_3_BCFG[LD_TRAIN_DONE\<21\>
+ or LD_TRAIN_DONE\<26\>] are not set.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled
+ for the [EXIT_PRBS11_OK] status to be reported.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only.
+
+ Internal:
+ FIXME what does LD_TRAIN_DONE refer to, then remove above exempt attribute. */
+ uint64_t exit_sat_mv_lmt : 1; /**< [ 54: 54](RO/H) BASE-R training saturated move limit threshold exit flag.
+ See GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT_EN].
+ For diagnostic use only. */
+ uint64_t train_tx_min_rule : 8; /**< [ 62: 55](R/W) BASE-R training TX taps minimum coefficient rule. Sets the lower limit of the permissible
+ range of the TX equalizer c(0), c(+1), and c(-1) taps so that the TX equalizer
+ operates within range specified in the IEEE 802.3-2012 Clause 72 10GBASE-KR
+ and IEEE 802.3bj-2014 Clause 93 100GBASE-KR4.
+ The TX coefficient minimum rule requires (main - pre - post) \>= [TRAIN_TX_MIN_RULE].
+
+ The allowable range for [TRAIN_TX_MIN_RULE] is
+ (6 decimal \<= [TRAIN_TX_MIN_RULE] \<= 16 decimal).
+ For 10GBASE-KR, 40GBASE-KR4 and 100GBASE-KR4 it is recommended to
+ program [TRAIN_TX_MIN_RULE] to 0x6.
+
+ c(-1) pre TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[PRE_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[PRE_MIN_LIMIT].
+
+ c(0) main TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MIN_LIMIT].
+
+ c(+1) post TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[POST_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[POST_MIN_LIMIT]. */
+ uint64_t exit_fom_thrs : 1; /**< [ 63: 63](RO/H) BASE-R training exit condition flag indicates the measured FOM
+ was equal to or exceeded the FOM threshold value specified in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] when
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 1.
+
+ Used in conjustion with
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR] to
+ specify the BASE-R training convergence exit criteria. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_3_bcfg bdk_gsernx_lanex_train_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) bdk_gsernx_lanex_train_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) "GSERNX_LANEX_TRAIN_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_4_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 4
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t term_fom_thrs_and : 1; /**< [ 63: 63](R/W) BASE-R training termination condition measured FOM equal or
+ exceeds the FOM threshold set in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL].
+ Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded FOM threshold.
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL] sets the FOM
+ threshold.
+
+ Refer to the description for
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] and
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL], then remove
+ above exempt attribute. */
+ uint64_t term_fom_thrs_or : 1; /**< [ 62: 62](R/W) BASE-R training termination condition measured FOM equal or
+ exceeds the FOM threshold set in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL].
+ Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded FOM threshold.
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL] sets the FOM
+ threshold.
+
+ Refer to the description for
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] and
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL]. */
+ uint64_t en_prbs11_chk : 1; /**< [ 61: 61](R/W) BASE-R training enables the check for PRBS11 checking for training
+ convergence.
+ 0 = Disables PRBS11 checking.
+ 1 = Enables PRBS11 checking.
+
+ The CGX/OCX MAC extracts the PRBS11 pattern from the KR training frame
+ and checks the PRBS11 pattern for errors. The CGX/MAC signals to the
+ KR training frame if the PRBS11 pattern sampled from the KR training
+ frame is error free or contains errors.
+
+ When [EN_PRBS11_CHK] is set the KR training state machine will
+ sample the PRBS11 status signal from the MAC and if the PRBS11 is
+ error free will use this to signal training convergence and signal
+ receiver ready if this condition is enabled in the
+ GSERN()_LANE()_TRAIN_3_BCFG[LD_TRAIN_DONE\<21\> or LD_TRAIN_DONE\<26\>]
+ training termination condition fields.
+
+ Internal:
+ FIXME what does LD_TRAIN_DONE refer to? */
+ uint64_t en_rev_moves : 1; /**< [ 60: 60](R/W) BASE-R training controls the receiver adaptation algorithm to reverse previous
+ tap moves that resulted in a decrease in the receiver figure of merit
+ (FOM).
+ 0 = Prevents the adaptation algorithm state machine from
+ reversing previous tap moves that resulted in a lower FOM.
+ 1 = Enables the adaptation algorithm state machine
+ to reverse a previous tap move that resulted in a lower FOM value.
+
+ The receiver adaptation algorithm will not reverse previous tap moves until the
+ number of tap moves exceeds the minimum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES]. [EN_REV_MOVES] is normally enabled to
+ improve the adaptation convergence time. */
+ uint64_t tx_tap_stepsize : 1; /**< [ 59: 59](R/W) BASE-R training controls the transmitter Pre/Main/Post step size when a Coefficient Update
+ increment or decrement request is received. When [TX_TAP_STEPSIZE] is zero the
+ transmitter Pre/Main/Post step size is set to +/- 1. When [TX_TAP_STEPSIZE] is set to one
+ the
+ transmitter Pre/Main/Post step size is set to +/- 2. */
+ uint64_t train_rst : 1; /**< [ 58: 58](R/W) Set to force the training engine into reset. Set low to enable link
+ training. */
+ uint64_t train_ovrrd_en : 1; /**< [ 57: 57](R/W) Training engine eye monitor FOM request override enable.
+ If not programmed to PCIe, CGX, or OCX mode via GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ then program [TRAIN_OVRRD_EN] to 1 before using
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ] and
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] to request an RX equalizer
+ evaluation to measure the RX equalizer Figure of Merit (FOM). The 8-bit FOM is
+ returned in GSERN()_LANE()_TRAIN_5_BCFG[FOM] and the raw 12-bit FOM
+ is returned in GSERN()_LANE()_TRAIN_5_BCFG[RAW_FOM].
+ For diagnostic use only. */
+ uint64_t rxt_rev_dir : 1; /**< [ 56: 56](R/W) When set, reverses the direction of the
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_POST_DIR],
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_MAIN_DIR], and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_PRE_DIR]
+ link partner TX tap direction hints. For diagnostic use only. */
+ uint64_t adapt_axis : 3; /**< [ 55: 53](R/W) Sets the number or adaptation axes to use during receiver adaptation.
+ Typically set to 0x7 to enable all three adaptation axes. One-hot encoded.
+
+ Set to 0x1 to only enable axis 1 and disable axis 2 and axis 3.
+ Set to 0x3 to enable axis 1 and axis 2 but disable axis 3.
+ Set to 0x7 to enable axis 1, 2 and 3. (default.)
+ For diagnostic use only. */
+ uint64_t c1_e_adj_step : 5; /**< [ 52: 48](R/W) Reserved.
+ Internal:
+ Functionality moved to GSERN()_LANE()_TRAIN_10_BCFG.L_C1_E_ADJ_STEP */
+ uint64_t eq_eval_ovrrd_req : 1; /**< [ 47: 47](R/W) When set issues a receiver equalization evaluation request when
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] is set.
+ For diagnostic use only. */
+ uint64_t eq_eval_ovrrd_en : 1; /**< [ 46: 46](R/W) When set the RX equalization evaluation request is controlled by
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ].
+ For diagnostic use only. */
+ uint64_t err_cnt_div_ovrrd_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [ERR_CNT_DIV_OVRRD_EN] is set.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = DIV 2.
+ 0x2 = DIV 4.
+ 0x3 = DIV 8.
+ 0x4 = DIV 16.
+ 0x5 = DIV 32.
+ 0x6 = DIV 64.
+ 0x7 = DIV 128.
+ 0x8 = DIV 256.
+ 0x9 = DIV 512.
+ 0xA = DIV 1024.
+ 0xB = DIV 2048.
+ 0xC = DIV 4096.
+ 0xD = DIV 8192.
+ 0xE = DIV 16384.
+ 0xF = DIV 32768. */
+ uint64_t err_cnt_div_ovrrd_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ For diagnostic use only. */
+ uint64_t eye_cnt_ovrrd_en : 1; /**< [ 40: 40](R/W) Eye Cycle Count Override Enable. When set the number of eye monitor
+ cycles to sample and count during the BASE-R training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_4_BCFG[EYE_CNT_OVRRD_VAL].
+ For diagnostic use only. */
+ uint64_t eye_cnt_ovrrd_val : 40; /**< [ 39: 0](R/W) Sets the number of eye monitor cycles to sample/count during the BASE-R training
+ figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_4_BCFG[EYE_CNT_OVRRD_EN]=1.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t eye_cnt_ovrrd_val : 40; /**< [ 39: 0](R/W) Sets the number of eye monitor cycles to sample/count during the BASE-R training
+ figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_4_BCFG[EYE_CNT_OVRRD_EN]=1.
+ For diagnostic use only. */
+ uint64_t eye_cnt_ovrrd_en : 1; /**< [ 40: 40](R/W) Eye Cycle Count Override Enable. When set the number of eye monitor
+ cycles to sample and count during the BASE-R training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_4_BCFG[EYE_CNT_OVRRD_VAL].
+ For diagnostic use only. */
+ uint64_t err_cnt_div_ovrrd_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ For diagnostic use only. */
+ uint64_t err_cnt_div_ovrrd_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [ERR_CNT_DIV_OVRRD_EN] is set.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = DIV 2.
+ 0x2 = DIV 4.
+ 0x3 = DIV 8.
+ 0x4 = DIV 16.
+ 0x5 = DIV 32.
+ 0x6 = DIV 64.
+ 0x7 = DIV 128.
+ 0x8 = DIV 256.
+ 0x9 = DIV 512.
+ 0xA = DIV 1024.
+ 0xB = DIV 2048.
+ 0xC = DIV 4096.
+ 0xD = DIV 8192.
+ 0xE = DIV 16384.
+ 0xF = DIV 32768. */
+ uint64_t eq_eval_ovrrd_en : 1; /**< [ 46: 46](R/W) When set the RX equalization evaluation request is controlled by
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ].
+ For diagnostic use only. */
+ uint64_t eq_eval_ovrrd_req : 1; /**< [ 47: 47](R/W) When set issues a receiver equalization evaluation request when
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] is set.
+ For diagnostic use only. */
+ uint64_t c1_e_adj_step : 5; /**< [ 52: 48](R/W) Reserved.
+ Internal:
+ Functionality moved to GSERN()_LANE()_TRAIN_10_BCFG.L_C1_E_ADJ_STEP */
+ uint64_t adapt_axis : 3; /**< [ 55: 53](R/W) Sets the number or adaptation axes to use during receiver adaptation.
+ Typically set to 0x7 to enable all three adaptation axes. One-hot encoded.
+
+ Set to 0x1 to only enable axis 1 and disable axis 2 and axis 3.
+ Set to 0x3 to enable axis 1 and axis 2 but disable axis 3.
+ Set to 0x7 to enable axis 1, 2 and 3. (default.)
+ For diagnostic use only. */
+ uint64_t rxt_rev_dir : 1; /**< [ 56: 56](R/W) When set, reverses the direction of the
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_POST_DIR],
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_MAIN_DIR], and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_PRE_DIR]
+ link partner TX tap direction hints. For diagnostic use only. */
+ uint64_t train_ovrrd_en : 1; /**< [ 57: 57](R/W) Training engine eye monitor FOM request override enable.
+ If not programmed to PCIe, CGX, or OCX mode via GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ then program [TRAIN_OVRRD_EN] to 1 before using
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ] and
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] to request an RX equalizer
+ evaluation to measure the RX equalizer Figure of Merit (FOM). The 8-bit FOM is
+ returned in GSERN()_LANE()_TRAIN_5_BCFG[FOM] and the raw 12-bit FOM
+ is returned in GSERN()_LANE()_TRAIN_5_BCFG[RAW_FOM].
+ For diagnostic use only. */
+ uint64_t train_rst : 1; /**< [ 58: 58](R/W) Set to force the training engine into reset. Set low to enable link
+ training. */
+ uint64_t tx_tap_stepsize : 1; /**< [ 59: 59](R/W) BASE-R training controls the transmitter Pre/Main/Post step size when a Coefficient Update
+ increment or decrement request is received. When [TX_TAP_STEPSIZE] is zero the
+ transmitter Pre/Main/Post step size is set to +/- 1. When [TX_TAP_STEPSIZE] is set to one
+ the
+ transmitter Pre/Main/Post step size is set to +/- 2. */
+ uint64_t en_rev_moves : 1; /**< [ 60: 60](R/W) BASE-R training controls the receiver adaptation algorithm to reverse previous
+ tap moves that resulted in a decrease in the receiver figure of merit
+ (FOM).
+ 0 = Prevents the adaptation algorithm state machine from
+ reversing previous tap moves that resulted in a lower FOM.
+ 1 = Enables the adaptation algorithm state machine
+ to reverse a previous tap move that resulted in a lower FOM value.
+
+ The receiver adaptation algorithm will not reverse previous tap moves until the
+ number of tap moves exceeds the minimum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES]. [EN_REV_MOVES] is normally enabled to
+ improve the adaptation convergence time. */
+ uint64_t en_prbs11_chk : 1; /**< [ 61: 61](R/W) BASE-R training enables the check for PRBS11 checking for training
+ convergence.
+ 0 = Disables PRBS11 checking.
+ 1 = Enables PRBS11 checking.
+
+ The CGX/OCX MAC extracts the PRBS11 pattern from the KR training frame
+ and checks the PRBS11 pattern for errors. The CGX/MAC signals to the
+ KR training frame if the PRBS11 pattern sampled from the KR training
+ frame is error free or contains errors.
+
+ When [EN_PRBS11_CHK] is set the KR training state machine will
+ sample the PRBS11 status signal from the MAC and if the PRBS11 is
+ error free will use this to signal training convergence and signal
+ receiver ready if this condition is enabled in the
+ GSERN()_LANE()_TRAIN_3_BCFG[LD_TRAIN_DONE\<21\> or LD_TRAIN_DONE\<26\>]
+ training termination condition fields.
+
+ Internal:
+ FIXME what does LD_TRAIN_DONE refer to? */
+ uint64_t term_fom_thrs_or : 1; /**< [ 62: 62](R/W) BASE-R training termination condition measured FOM equal or
+ exceeds the FOM threshold set in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL].
+ Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded FOM threshold.
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL] sets the FOM
+ threshold.
+
+ Refer to the description for
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] and
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL]. */
+ uint64_t term_fom_thrs_and : 1; /**< [ 63: 63](R/W) BASE-R training termination condition measured FOM equal or
+ exceeds the FOM threshold set in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL].
+ Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded FOM threshold.
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL] sets the FOM
+ threshold.
+
+ Refer to the description for
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] and
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL], then remove
+ above exempt attribute. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_4_bcfg bdk_gsernx_lanex_train_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) bdk_gsernx_lanex_train_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) "GSERNX_LANEX_TRAIN_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_5_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 5
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pat_exit_cnt : 4; /**< [ 63: 60](R/W) BASE-R training controls the receiver adaptation algorithm training convergence
+ pattern matching logic. As BASE-R training progresses the Pre/Main/Post tap
+ direction change coefficient updates to the link partner start to dither around the
+ optimal tap values. The pattern matching logic looks for repeating patterns of
+ the tap dithering around the optimal value and is used as one metric to determine
+ that BASE-R training has converged and local device can signal receiver ready.
+
+ The [PAT_EXIT_CNT] variable sets the maximum length of the repeating pattern to search
+ for in the pattern matching array. The pattern matching array has twelve elements
+ therefore the maximum value of [PAT_EXIT_CNT] is 0xC. A value of 0x6 has been
+ found to be optimal for recognizing training tap convergence.
+
+ The GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] field is used in conjunction with the
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN] field to control the training convergence
+ pattern matching logic during BASE-R training. */
+ uint64_t pat_match_en : 1; /**< [ 59: 59](R/W) BASE-R training controls the receiver adaptation algorithm when [PAT_MATCH_EN] is set to
+ one
+ the training convergence pattern matching logic is enabled. The training pattern matching
+ logic tracks the link partner transmitter tap moves and sets a flag when the pattern
+ is found to be repeating in the taps moves tracking array. This is used to help
+ converge training adaptation. When [PAT_MATCH_EN] is cleared to zero the pattern matching
+ logic is disabled and not used to detect training convergence.
+
+ The GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN] field is used in conjunction with the
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] field to control the training convergence
+ pattern matching logic during BASE-R training. */
+ uint64_t fdltfom_hi_lmt : 8; /**< [ 58: 51](R/W) BASE-R training sets the Delta Filtered FOM upper limit for training convergence.
+ Value is a signed twos complement value. */
+ uint64_t fdltfom_lo_lmt : 8; /**< [ 50: 43](R/W) BASE-R training sets the Delta Filtered FOM lower limit for training convergence.
+ Value is a signed twos complement value. */
+ uint64_t inv_post_dir : 1; /**< [ 42: 42](R/W) BASE-R training when set reverses the direction of the post tap (C+1)
+ direction hint from the local device. */
+ uint64_t inv_main_dir : 1; /**< [ 41: 41](R/W) BASE-R training when set reverses the direction of the main tap (C0)
+ direction hint from the local device. */
+ uint64_t inv_pre_dir : 1; /**< [ 40: 40](R/W) BASE-R training when set reverses the direction of the pre tap (C-1)
+ direction hint from the local device. */
+ uint64_t use_ffom : 1; /**< [ 39: 39](R/W) Use filtered figure of merit for BASE-R transmitter adaptation logic.
+ For diagnostic use only.
+ 0 = The BASE-R transmitter adaptation logic use the unfiltered raw figure
+ of merit FOM for BASE-R Inc/Dec direction hint computation.
+ 1 = The BASE-R transmitter adaptation logic use the
+ filtered FOM for Inc/Dec direction hint computation. */
+ uint64_t dfom_tc : 3; /**< [ 38: 36](R/W) Delta filtered figure of merit (DFOM) filter time constant. The DFOM is filtered
+ by a cumulative moving average (CMA) filter. [DFOM_TC] sets the time constant
+ of the CMA filter.
+ Selectable time constant options are in the range 0 to 7 which sets the divider value
+ used to scale the summed DFOM input term and the filtered DFOM feedback term. This
+ provides
+ a smoothed delta filtered figure of merit for use by the BASE-R transmitter adaptation
+ logic.
+
+ For diagnostic use only.
+
+ 0x0 = No scaling.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128. */
+ uint64_t ffom_tc : 3; /**< [ 35: 33](R/W) Filtered figure of merit (FFOM) filter time constant. The raw figure of merit (raw FOM)
+ is filtered by a cumulative moving average (CMA) filter. [FFOM_TC] sets the time
+ constant of the CMA filter.
+ Selectable time constant options are in the range 0 to 7 which sets the divider value
+ used to scale the raw FOM input term and the filtered FOM feedback term. This provides
+ a smoothed filtered figure of merit for use by the BASE-R transmitter adaptation logic.
+
+ 0x0 = No scaling.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+
+ For diagnostic use only. */
+ uint64_t eq_eval_ack : 1; /**< [ 32: 32](RO/H) When set indicates a receiver equalization evaluation acknowledgment. Set in
+ response to request when GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] is set
+ and GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ] is set.
+
+ When [EQ_EVAL_ACK] is set, clear GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ]
+ which will in turn clear [EQ_EVAL_ACK] before issue another RX equalization
+ evaluation request via GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ].
+
+ For diagnostic use only. */
+ uint64_t filtered_fom : 12; /**< [ 31: 20](RO/H) Filtered figure of merit (FOM) from the receiver adaptation logic.
+ For diagnostic use only. */
+ uint64_t raw_fom : 12; /**< [ 19: 8](RO/H) Raw figure of merit (FOM) from the receiver adaptation logic.
+ For diagnostic use only. */
+ uint64_t fom : 8; /**< [ 7: 0](RO/H) Figure of merit (FOM) for PCIe and CGX logic used for link partner TX equalizer
+ adaptation. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t fom : 8; /**< [ 7: 0](RO/H) Figure of merit (FOM) for PCIe and CGX logic used for link partner TX equalizer
+ adaptation. For diagnostic use only. */
+ uint64_t raw_fom : 12; /**< [ 19: 8](RO/H) Raw figure of merit (FOM) from the receiver adaptation logic.
+ For diagnostic use only. */
+ uint64_t filtered_fom : 12; /**< [ 31: 20](RO/H) Filtered figure of merit (FOM) from the receiver adaptation logic.
+ For diagnostic use only. */
+ uint64_t eq_eval_ack : 1; /**< [ 32: 32](RO/H) When set indicates a receiver equalization evaluation acknowledgment. Set in
+ response to request when GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] is set
+ and GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ] is set.
+
+ When [EQ_EVAL_ACK] is set, clear GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ]
+ which will in turn clear [EQ_EVAL_ACK] before issue another RX equalization
+ evaluation request via GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ].
+
+ For diagnostic use only. */
+ uint64_t ffom_tc : 3; /**< [ 35: 33](R/W) Filtered figure of merit (FFOM) filter time constant. The raw figure of merit (raw FOM)
+ is filtered by a cumulative moving average (CMA) filter. [FFOM_TC] sets the time
+ constant of the CMA filter.
+ Selectable time constant options are in the range 0 to 7 which sets the divider value
+ used to scale the raw FOM input term and the filtered FOM feedback term. This provides
+ a smoothed filtered figure of merit for use by the BASE-R transmitter adaptation logic.
+
+ 0x0 = No scaling.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+
+ For diagnostic use only. */
+ uint64_t dfom_tc : 3; /**< [ 38: 36](R/W) Delta filtered figure of merit (DFOM) filter time constant. The DFOM is filtered
+ by a cumulative moving average (CMA) filter. [DFOM_TC] sets the time constant
+ of the CMA filter.
+ Selectable time constant options are in the range 0 to 7 which sets the divider value
+ used to scale the summed DFOM input term and the filtered DFOM feedback term. This
+ provides
+ a smoothed delta filtered figure of merit for use by the BASE-R transmitter adaptation
+ logic.
+
+ For diagnostic use only.
+
+ 0x0 = No scaling.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128. */
+ uint64_t use_ffom : 1; /**< [ 39: 39](R/W) Use filtered figure of merit for BASE-R transmitter adaptation logic.
+ For diagnostic use only.
+ 0 = The BASE-R transmitter adaptation logic use the unfiltered raw figure
+ of merit FOM for BASE-R Inc/Dec direction hint computation.
+ 1 = The BASE-R transmitter adaptation logic use the
+ filtered FOM for Inc/Dec direction hint computation. */
+ uint64_t inv_pre_dir : 1; /**< [ 40: 40](R/W) BASE-R training when set reverses the direction of the pre tap (C-1)
+ direction hint from the local device. */
+ uint64_t inv_main_dir : 1; /**< [ 41: 41](R/W) BASE-R training when set reverses the direction of the main tap (C0)
+ direction hint from the local device. */
+ uint64_t inv_post_dir : 1; /**< [ 42: 42](R/W) BASE-R training when set reverses the direction of the post tap (C+1)
+ direction hint from the local device. */
+ uint64_t fdltfom_lo_lmt : 8; /**< [ 50: 43](R/W) BASE-R training sets the Delta Filtered FOM lower limit for training convergence.
+ Value is a signed twos complement value. */
+ uint64_t fdltfom_hi_lmt : 8; /**< [ 58: 51](R/W) BASE-R training sets the Delta Filtered FOM upper limit for training convergence.
+ Value is a signed twos complement value. */
+ uint64_t pat_match_en : 1; /**< [ 59: 59](R/W) BASE-R training controls the receiver adaptation algorithm when [PAT_MATCH_EN] is set to
+ one
+ the training convergence pattern matching logic is enabled. The training pattern matching
+ logic tracks the link partner transmitter tap moves and sets a flag when the pattern
+ is found to be repeating in the taps moves tracking array. This is used to help
+ converge training adaptation. When [PAT_MATCH_EN] is cleared to zero the pattern matching
+ logic is disabled and not used to detect training convergence.
+
+ The GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN] field is used in conjunction with the
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] field to control the training convergence
+ pattern matching logic during BASE-R training. */
+ uint64_t pat_exit_cnt : 4; /**< [ 63: 60](R/W) BASE-R training controls the receiver adaptation algorithm training convergence
+ pattern matching logic. As BASE-R training progresses the Pre/Main/Post tap
+ direction change coefficient updates to the link partner start to dither around the
+ optimal tap values. The pattern matching logic looks for repeating patterns of
+ the tap dithering around the optimal value and is used as one metric to determine
+ that BASE-R training has converged and local device can signal receiver ready.
+
+ The [PAT_EXIT_CNT] variable sets the maximum length of the repeating pattern to search
+ for in the pattern matching array. The pattern matching array has twelve elements
+ therefore the maximum value of [PAT_EXIT_CNT] is 0xC. A value of 0x6 has been
+ found to be optimal for recognizing training tap convergence.
+
+ The GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] field is used in conjunction with the
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN] field to control the training convergence
+ pattern matching logic during BASE-R training. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_5_bcfg bdk_gsernx_lanex_train_5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003200ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) bdk_gsernx_lanex_train_5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) "GSERNX_LANEX_TRAIN_5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_6_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 6
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_6_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_6_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t frame_err : 1; /**< [ 63: 63](RO/H) Framing error. When set to a one and the
+ GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check indicates that the DOUTE and DOUTQ pipes could
+ not be aligned to produce error free eye monitor data.
+ For diagnostic use only. */
+ uint64_t no_shft_path_gd : 1; /**< [ 62: 62](RO/H) The non-shifted error path completed the framing test without errors.
+ Valid when the GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check.
+ For diagnostic use only. */
+ uint64_t shft_path_gd : 1; /**< [ 61: 61](RO/H) The shifted error path completed the framing test without errors.
+ Valid when the GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check.
+ For diagnostic use only. */
+ uint64_t en_frmoffs_chk : 1; /**< [ 60: 60](R/W) Enable framing offset check. When [EN_FRMOFFS_CHK] is set to a one the training
+ eye monitor state machine checks if framing offset is needed between the receiver
+ DOUTQ and DOUTE pipes. The framing offset check is performed when BASE-R or PCIe
+ Gen3 training is first enabled.
+ The GSERN()_LANE()_TRAIN_6_BCFG[SHFT_PATH_GD] or
+ GSERN()_LANE()_TRAIN_6_BCFG[NO_SHFT_PATH_GD] flag will be set to indicate which
+ framing offset was required. If no framing offset can be found to that produces
+ an error free eye measurement then the GSERN()_LANE()_TRAIN_6_BCFG[FRAME_ERR] flag will
+ be set.
+ For diagnostic use only. */
+ uint64_t en_rxwt_ctr : 1; /**< [ 59: 59](R/W) Enable receiver adaptation wait timer. When [EN_RXWT_CTR] is set to a one the
+ training state machine eye monitor measurement to measure the figure of merit
+ (FOM) is delayed by 10 microseconds to allow the receiver equalizer to adjust
+ to the link partner TX equalizer tap adjustments (BASE-R training and PCIe
+ training) during link training.
+ For diagnostic use only. */
+ uint64_t en_teoffs : 1; /**< [ 58: 58](R/W) Enable E-path QAC time offset adjustment. This is a diagnostic control used
+ to adjust the QAC E-path time offset. Typically the E-path QAC time offset is
+ set to 0.5UI. Setting [EN_TEOFFS] to a one enables the training state machine
+ to adjust the E-path QAC time offset by the value specified in
+ GSERN()_LANE()_TRAIN_6_BCFG[PRG_TEOFFS].
+ For diagnostic use only. */
+ uint64_t prg_teoffs : 6; /**< [ 57: 52](R/W) Programmable E-path QAC time offset. This is a diagnostic control used to set the
+ eye monitor Epath QAC offset. Use to trim the qac_eoffs offset during eye
+ monitor usage when used in BASE-R and PCIE training to measure the RX eye figure of
+ merit (FOM). Typically set to the middle of the eye, e.g. 0.5UI.
+
+ _ Target_eoffs = [PRG_TEOFFS] + (GSERN()_LANE()_RX_QAC_BSTS[QAC_EOFFS]
+ - GSERN()_LANE()_TRAIN_6_BCFG[PRG_TDELTA]).
+ _ [PRG_TEOFFS] = round(0.5UI/(1/63UI) = 6'h20.
+
+ typically but other values can be set for testing purposes.
+ For diagnostic use only.
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_6_BCFG[PRG_TDELTA], then remove above exempt attribute. */
+ uint64_t trn_tst_pat : 2; /**< [ 51: 50](R/W) Training test pattern. This is a diagnostic control used to send a sequence
+ of predetermined cost values to the BASE-R training logic to mimic training of a
+ predetermined channel between the local device and link partner. This is to
+ facilitate BASE-R testing between channels in a manufacturing test environment.
+ When training starts the predetermined set of cost values (raw figure of merit)
+ values will be provided to the BASE-R receiver and used to steer the training
+ logic and tap convergence logic.
+
+ Used only when GSERN()_LANE()_TRAIN_6_BCFG[TRN_TST_PATEN] is set to one.
+ For diagnostic use only.
+
+ 0x0 = Test training pattern with cost cache disabled 32 dB channel.
+ 0x1 = Test training pattern with cost cache enabled 32 dB channel.
+ 0x2 = Test training pattern with cost cache disabled 32 dB channel.
+ 0x3 = Test training pattern with cost cache enabled 8 dB channel. */
+ uint64_t trn_tst_paten : 1; /**< [ 49: 49](R/W) Training test pattern enable. This is a diagnostic control used to send a sequence
+ of predetermined cost values to the BASE-R training logic to mimic training of a
+ predetermined channel between the local device and link partner. This is to
+ facilitate BASE-R testing between channels in a manufacturing test environment.
+ Used in conjunction with GSERN()_LANE()_TRAIN_6_BCFG[TRN_TST_PAT].
+ For diagnostic use only. */
+ uint64_t sav_cost_cache : 1; /**< [ 48: 48](R/W) Save cost cache contents when BASE-R training is completed. This is a diagnostic
+ control used to preserve the cost cache contents after training is complete.
+ When [SAV_COST_CACHE] is set to one the cost cache is not automatically clear at the
+ completion of BASE-R training. When [SAV_COST_CACHE] is cleared to zero the cost
+ cached is cleared when training is complete so that the BASE-R training logic can
+ process a new request for BASE-R training in cases where training is restarted.
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[COST_CACHE_EN] is set to one.
+ For diagnostic use only. */
+ uint64_t ccache_hits_min : 5; /**< [ 47: 43](R/W) Cost cache hits minimum. When BASE-R training is using the cost average cache to
+ improve the gradient estimation process to get more accurate tap moves during the
+ final stages of training convergence [CCACHE_HITS_MIN] sets the minimum number of
+ cache hits that must be accumulate before the cost cache will be used.
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[COST_CACHE_EN] is set to one.
+ For diagnostic use only. */
+ uint64_t cost_cache_en : 1; /**< [ 42: 42](R/W) Cost cache enable. When set BASE-R training will use the cost average cache to
+ improve the gradient estimation process to get more accurate tap moves during
+ the final stages of training convergence. For diagnostic use only. */
+ uint64_t dffom_exit_en : 1; /**< [ 41: 41](R/W) Delta Filtered FOM Exit Enable. When set to one BASE-R training will conclude and local
+ device will signal ready if the Delta Filtered FOM is within the high and low limits
+ specified in the GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT] for the number of tap move iterations
+ specified in the GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] field.
+ For diagnostic use only. */
+ uint64_t delta_ffom_ccnt : 5; /**< [ 40: 36](R/W) Delta Filtered FOM Convergence Count. Used during BASE-R training to specify the
+ number of successive iterations required for the Delta Filtered FOM to be within
+ the high and low limits specified in the GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT] to signal that BASE-R training is converged
+ on the Local Device receiver.
+
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN] is set to a one.
+
+ For diagnostic use only. */
+ uint64_t exit_loc_main : 8; /**< [ 35: 28](RO/H) Training Exit Location Main tap value. Holds the exit location of the LP Main tap
+ at the completion of BASE-R training when training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_loc_post : 8; /**< [ 27: 20](RO/H) Training Exit Location Post tap value. Holds the exit location of the LP Post tap
+ at the completion of BASE-R training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_loc_pre : 8; /**< [ 19: 12](RO/H) Training Exit Location Pre tap value. Holds the exit location of the LP Pre tap
+ at the completion of BASE-R training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_fom_val : 12; /**< [ 11: 0](RO/H) Pattern match logic exit value. Holds the Figure of merit (FOM) at the completion of
+ BASE-R
+ training when training is converged using the pattern matching logic.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t exit_fom_val : 12; /**< [ 11: 0](RO/H) Pattern match logic exit value. Holds the Figure of merit (FOM) at the completion of
+ BASE-R
+ training when training is converged using the pattern matching logic.
+ For diagnostic use only. */
+ uint64_t exit_loc_pre : 8; /**< [ 19: 12](RO/H) Training Exit Location Pre tap value. Holds the exit location of the LP Pre tap
+ at the completion of BASE-R training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_loc_post : 8; /**< [ 27: 20](RO/H) Training Exit Location Post tap value. Holds the exit location of the LP Post tap
+ at the completion of BASE-R training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_loc_main : 8; /**< [ 35: 28](RO/H) Training Exit Location Main tap value. Holds the exit location of the LP Main tap
+ at the completion of BASE-R training when training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t delta_ffom_ccnt : 5; /**< [ 40: 36](R/W) Delta Filtered FOM Convergence Count. Used during BASE-R training to specify the
+ number of successive iterations required for the Delta Filtered FOM to be within
+ the high and low limits specified in the GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT] to signal that BASE-R training is converged
+ on the Local Device receiver.
+
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN] is set to a one.
+
+ For diagnostic use only. */
+ uint64_t dffom_exit_en : 1; /**< [ 41: 41](R/W) Delta Filtered FOM Exit Enable. When set to one BASE-R training will conclude and local
+ device will signal ready if the Delta Filtered FOM is within the high and low limits
+ specified in the GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT] for the number of tap move iterations
+ specified in the GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] field.
+ For diagnostic use only. */
+ uint64_t cost_cache_en : 1; /**< [ 42: 42](R/W) Cost cache enable. When set BASE-R training will use the cost average cache to
+ improve the gradient estimation process to get more accurate tap moves during
+ the final stages of training convergence. For diagnostic use only. */
+ uint64_t ccache_hits_min : 5; /**< [ 47: 43](R/W) Cost cache hits minimum. When BASE-R training is using the cost average cache to
+ improve the gradient estimation process to get more accurate tap moves during the
+ final stages of training convergence [CCACHE_HITS_MIN] sets the minimum number of
+ cache hits that must be accumulate before the cost cache will be used.
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[COST_CACHE_EN] is set to one.
+ For diagnostic use only. */
+ uint64_t sav_cost_cache : 1; /**< [ 48: 48](R/W) Save cost cache contents when BASE-R training is completed. This is a diagnostic
+ control used to preserve the cost cache contents after training is complete.
+ When [SAV_COST_CACHE] is set to one the cost cache is not automatically clear at the
+ completion of BASE-R training. When [SAV_COST_CACHE] is cleared to zero the cost
+ cached is cleared when training is complete so that the BASE-R training logic can
+ process a new request for BASE-R training in cases where training is restarted.
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[COST_CACHE_EN] is set to one.
+ For diagnostic use only. */
+ uint64_t trn_tst_paten : 1; /**< [ 49: 49](R/W) Training test pattern enable. This is a diagnostic control used to send a sequence
+ of predetermined cost values to the BASE-R training logic to mimic training of a
+ predetermined channel between the local device and link partner. This is to
+ facilitate BASE-R testing between channels in a manufacturing test environment.
+ Used in conjunction with GSERN()_LANE()_TRAIN_6_BCFG[TRN_TST_PAT].
+ For diagnostic use only. */
+ uint64_t trn_tst_pat : 2; /**< [ 51: 50](R/W) Training test pattern. This is a diagnostic control used to send a sequence
+ of predetermined cost values to the BASE-R training logic to mimic training of a
+ predetermined channel between the local device and link partner. This is to
+ facilitate BASE-R testing between channels in a manufacturing test environment.
+ When training starts the predetermined set of cost values (raw figure of merit)
+ values will be provided to the BASE-R receiver and used to steer the training
+ logic and tap convergence logic.
+
+ Used only when GSERN()_LANE()_TRAIN_6_BCFG[TRN_TST_PATEN] is set to one.
+ For diagnostic use only.
+
+ 0x0 = Test training pattern with cost cache disabled 32 dB channel.
+ 0x1 = Test training pattern with cost cache enabled 32 dB channel.
+ 0x2 = Test training pattern with cost cache disabled 32 dB channel.
+ 0x3 = Test training pattern with cost cache enabled 8 dB channel. */
+ uint64_t prg_teoffs : 6; /**< [ 57: 52](R/W) Programmable E-path QAC time offset. This is a diagnostic control used to set the
+ eye monitor Epath QAC offset. Use to trim the qac_eoffs offset during eye
+ monitor usage when used in BASE-R and PCIE training to measure the RX eye figure of
+ merit (FOM). Typically set to the middle of the eye, e.g. 0.5UI.
+
+ _ Target_eoffs = [PRG_TEOFFS] + (GSERN()_LANE()_RX_QAC_BSTS[QAC_EOFFS]
+ - GSERN()_LANE()_TRAIN_6_BCFG[PRG_TDELTA]).
+ _ [PRG_TEOFFS] = round(0.5UI/(1/63UI) = 6'h20.
+
+ typically but other values can be set for testing purposes.
+ For diagnostic use only.
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_6_BCFG[PRG_TDELTA], then remove above exempt attribute. */
+ uint64_t en_teoffs : 1; /**< [ 58: 58](R/W) Enable E-path QAC time offset adjustment. This is a diagnostic control used
+ to adjust the QAC E-path time offset. Typically the E-path QAC time offset is
+ set to 0.5UI. Setting [EN_TEOFFS] to a one enables the training state machine
+ to adjust the E-path QAC time offset by the value specified in
+ GSERN()_LANE()_TRAIN_6_BCFG[PRG_TEOFFS].
+ For diagnostic use only. */
+ uint64_t en_rxwt_ctr : 1; /**< [ 59: 59](R/W) Enable receiver adaptation wait timer. When [EN_RXWT_CTR] is set to a one the
+ training state machine eye monitor measurement to measure the figure of merit
+ (FOM) is delayed by 10 microseconds to allow the receiver equalizer to adjust
+ to the link partner TX equalizer tap adjustments (BASE-R training and PCIe
+ training) during link training.
+ For diagnostic use only. */
+ uint64_t en_frmoffs_chk : 1; /**< [ 60: 60](R/W) Enable framing offset check. When [EN_FRMOFFS_CHK] is set to a one the training
+ eye monitor state machine checks if framing offset is needed between the receiver
+ DOUTQ and DOUTE pipes. The framing offset check is performed when BASE-R or PCIe
+ Gen3 training is first enabled.
+ The GSERN()_LANE()_TRAIN_6_BCFG[SHFT_PATH_GD] or
+ GSERN()_LANE()_TRAIN_6_BCFG[NO_SHFT_PATH_GD] flag will be set to indicate which
+ framing offset was required. If no framing offset can be found to that produces
+ an error free eye measurement then the GSERN()_LANE()_TRAIN_6_BCFG[FRAME_ERR] flag will
+ be set.
+ For diagnostic use only. */
+ uint64_t shft_path_gd : 1; /**< [ 61: 61](RO/H) The shifted error path completed the framing test without errors.
+ Valid when the GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check.
+ For diagnostic use only. */
+ uint64_t no_shft_path_gd : 1; /**< [ 62: 62](RO/H) The non-shifted error path completed the framing test without errors.
+ Valid when the GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check.
+ For diagnostic use only. */
+ uint64_t frame_err : 1; /**< [ 63: 63](RO/H) Framing error. When set to a one and the
+ GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check indicates that the DOUTE and DOUTQ pipes could
+ not be aligned to produce error free eye monitor data.
+ For diagnostic use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_6_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_6_bcfg bdk_gsernx_lanex_train_6_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_6_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_6_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003210ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_6_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) bdk_gsernx_lanex_train_6_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) "GSERNX_LANEX_TRAIN_6_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_7_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 7
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_7_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_7_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_fasteq_val : 5; /**< [ 63: 59](R/W) Reserved.
+ Internal:
+ PCIe fast equalization delay value for simulation.
+ Used in conjunction with GSERN()_LANE()_TRAIN_7_BCFG[PCIE_FASTEQ]
+ When testing PCIe Gen3/Gen4 equalization in simulation.
+ The default value of 0x6 programs the PCIe equalization FOM and
+ link evaluation direction change request acknowledgement handshake
+ to 1.6 microseconds to accelerate simulation modeling of the PCIe
+ Gen3/Gen4 equalization phases 2 and 3. .
+ For simulation use only. */
+ uint64_t pcie_fasteq : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ PCIe fast equalization mode for simulation.
+ When testing PCIe Gen3/Gen4 equalization in simulation setting [PCIE_FASTEQ]
+ to 1 will reduce the PCIe equalization response to 1.6 microseconds.
+ Can be used in conjunction with GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN].
+ If the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN] is not used the raw FOM
+ value returned will be zero. Further the [PCIE_FASTEQ] is set the link evaluation
+ feedback direction change for C(-1), C(0), and C(+1) will indicate no change.
+ For simulation use only. */
+ uint64_t pcie_dir_eq_done : 1; /**< [ 57: 57](RO/H) PCIe direction change equalization done flag. During PCIe Gen3/Gen4
+ direction change equalization reflects the state of the direction
+ equalization done flag. When set to 1 indicates that the current
+ direction change equalization tap adjustment sequence is complete.
+ Reset automatically by hardware when PCIe Gen3/Gen4 equalization is
+ completed. */
+ uint64_t pcie_term_adtmout : 1; /**< [ 56: 56](R/W) PCIe terminate direction change feedback equalization when reached the
+ the equalization timeout specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode the equalization timeout period is controlled by
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST].
+ When [PCIE_TERM_ADTMOUT] sets when the equalization timeout timer expires
+ the equalization logic will signal equalization complete on the next
+ equalization request from the PCIe controller.
+ The training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_adtmout_fast : 1; /**< [ 55: 55](R/W) Reserved.
+ Internal:
+ For simulation use only. When set accelerates the PCIe Gen3/Gen4 direction change
+ feedback equalization timeout timer period. When set shortens the direction change
+ equalization time-out timer.
+ See the description for
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL].
+ For diagnostic use only. */
+ uint64_t pcie_adtmout_disable : 1; /**< [ 54: 54](R/W) PCIe Gen3/Gen4 direction change feedback equalization timeout timer disable.
+ When [PCIE_ADTMOUT_DISABLE] is set to 1 the timeout timer that runs during
+ PCIe Gen3/Gen4 direction change feecback equalization is disabled. When
+ [PCIE_ADTMOUT_DISABLE] is cleared to 0 the equalization timeout timer is enabled.
+ The equalization timeout period is controlled by
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST].
+ For diagnostic use only. */
+ uint64_t pcie_adtmout_sel : 2; /**< [ 53: 52](R/W) Selects the timeout value for the PCIe Gen3/Gen4 direction change feedback equalization.
+ This time-out timer value is only valid if
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_DISABLE]
+ is cleared to 0.
+
+ When GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST] is cleared to 0 the link training
+ time-out timer value is set by [PCIE_ADTMOUT_SEL] to the values shown.
+ 0x0 = 5.24 milliseconds.
+ 0x1 = 10.49 milliseconds.
+ 0x2 = 13.1 milliseconds.
+ 0x3 = 15.73 milliseconds.
+
+ When GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST] is set to 1 the link training
+ time-out timer value is set by [PCIE_ADTMOUT_SEL] to the values shown.
+ 0x0 = 81.92 microseconds.
+ 0x1 = 163.84 microseconds.
+ 0x2 = 327.68 microseconds.
+ 0x3 = 655.36 microseconds. */
+ uint64_t pcie_term_max_mvs : 1; /**< [ 51: 51](R/W) PCIe terminate direction change feedback equalization when reached the
+ the maximum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAX_MOVES].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MAX_MOVES] sets the maximum number of tap moves to make
+ before signaling equalization complete. When [PCIE_TERM_MAX_MVS] is set
+ to 1 the training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_term_min_mvs : 1; /**< [ 50: 50](R/W) PCIe terminate direction change feedback equalization when exceeded the
+ the minimum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MIN_MOVES].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the minimum number of tap moves to make
+ before signaling equalization complete. When [PCIE_TERM_MIN_MVS] is set
+ to 1 the training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_max_moves : 8; /**< [ 49: 42](R/W) PCIe maximum tap moves. During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the maximum number of tap moves to make
+ before signaling equalization complete. */
+ uint64_t pcie_min_moves : 8; /**< [ 41: 34](R/W) PCIe minimum tap moves. During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the minimum number of tap moves to make
+ before signaling equalization complete. */
+ uint64_t pcie_rev_dir_hints : 1; /**< [ 33: 33](R/W) When set, reverses the direction of the
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_POST_DIR],
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAIN_DIR], and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_PRE_DIR]
+ Tx tap direction feedback hints. For diagnostic use only. */
+ uint64_t pcie_inv_post_dir : 1; /**< [ 32: 32](R/W) PCIe direction change equalization invert post tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_POST_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_inv_main_dir : 1; /**< [ 31: 31](R/W) PCIe direction change equalization invert main tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAIN_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_inv_pre_dir : 1; /**< [ 30: 30](R/W) PCIe direction change equalization invert pre tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_PRE_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_post_dir : 2; /**< [ 29: 28](RO/H) PCIe direction change equalization post (C+1) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_POST_DIR] field reflects the value of the post (C+1) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved. */
+ uint64_t pcie_main_dir : 2; /**< [ 27: 26](RO/H) PCIe direction change equalization main (C0) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_MAIN_DIR] field reflects the value of the main (C0) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved.
+
+ The main direction will always be 0x0 no change. The PCIe
+ MAC computes the Main (C0) tap direction change. */
+ uint64_t pcie_pre_dir : 2; /**< [ 25: 24](RO/H) PCIe direction change equalization pre (C-1) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_PRE_DIR] field reflects the value of the pre (C-1) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved. */
+ uint64_t pcie_tst_array_rdy : 1; /**< [ 23: 23](RO/H) PCIe test FOM array ready. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL].
+
+ Internal:
+ PCIe test FOM array ready. For verification diagnostic use only.
+ All entries of the PCIe test FOM array are cleared following release
+ of reset. When [PCIE_TST_ARRAY_RDY] is set to 1 the PCIe test FOM
+ array is ready and can be used for PCIe training testing. Do not
+ read or write the PCIe test FOM array while [PCIE_TST_ARRAY_RDY] is
+ cleared to 0. When the GSER QLM is released from reset the
+ [PCIE_TST_ARRAY_RDY] will transition from 0 to 1 after 128 service
+ clock cycles. */
+ uint64_t pcie_tst_fom_mode : 1; /**< [ 22: 22](R/W) PCIe test FOM array mode. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL].
+
+ Internal:
+ PCIe test FOM array mode. For verification diagnostic use only.
+ 0x0 = Test FOM array is used to load and play back test FOMs for PCIe link
+ training.
+ 0x1 = Test FOM array is used to capture raw FOMs during link training for
+ diagnostic verification. */
+ uint64_t pcie_tst_fom_en : 1; /**< [ 21: 21](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_rd : 1; /**< [ 20: 20](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_ld : 1; /**< [ 19: 19](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_addr : 7; /**< [ 18: 12](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_val : 12; /**< [ 11: 0](R/W/H) PCIe test figure of merit array enable. For verification diagnostic use only.
+ Internal:
+ Used to load the test raw figure of merit (raw FOM) array with test
+ FOM values to play back during PCIe Gen3/Gen4 training to check the
+ training preset selection logic and PCIE training logic.
+ An 11-bit by 32 word array is used to hold the test raw FOM values.
+ The array FOM values are initialized by writing the
+ [PCIE_TST_FOM_ADDR] field with a value
+ from 0x0 to 0x7F to index a location in the array, then writing the
+ [PCIE_TST_FOM_VAL] with a 12-bit quantity representing the raw
+ FOM value to be written to the array location, then writing the
+ [PCIE_TST_FOM_LD] bit to 1 to write
+ the raw FOM 12-bit value to the array, and the writing the
+ [PCIE_TST_FOM_LD] bit to 0 to complete
+ array write operation.
+ Before writing the array software should poll the
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_ARRAY_RDY] and wait for
+ [PCIE_TST_ARRAY_RDY] field to be set to 1 before reading or writing
+ the test fom array. Also write
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_MODE] to 0.
+
+ Each array location is written with the desired raw FOM value following
+ the thse steps.
+
+ After all array locations are written, the array locations can be read
+ back. Write the [PCIE_TST_FOM_ADDR] to point
+ to the desired array location, next write
+ [PCIE_TST_FOM_RD] to 1 to enable read back mode.
+ Read the [PCIE_TST_FOM_VAL] field to readback the 12-bit test raw FOM
+ value from the array. Finally write
+ [PCIE_TST_FOM_RD] to 0 to disable read back mode.
+
+ To enable the PCI Express Test FOM array during PCIe Gen3/Gen4 link training
+ write [PCIE_TST_FOM_EN] to 1. Note prior to
+ writing [PCIE_TST_FOM_EN] to 1, ensure that
+ [PCIE_TST_FOM_RD] is cleared to 0 and
+ [PCIE_TST_FOM_LD] is cleared to 0.
+
+ During PCIe Gen3/Gen4 link training each time a Preset receiver evaluation
+ request is received the training logic will return the 12-bit raw FOM
+ from the current test FOM array location to the PIPE PCS logic and then
+ move to the next test FOM array location. The test FOM array always
+ starts at location 0x0 and increments to the next location in the FOM
+ array after each preset evaluation.
+
+ Related Registers
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_ADDR]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_LD]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_RD]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_MODE]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_ARRAY_RDY] */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_tst_fom_val : 12; /**< [ 11: 0](R/W/H) PCIe test figure of merit array enable. For verification diagnostic use only.
+ Internal:
+ Used to load the test raw figure of merit (raw FOM) array with test
+ FOM values to play back during PCIe Gen3/Gen4 training to check the
+ training preset selection logic and PCIE training logic.
+ An 11-bit by 32 word array is used to hold the test raw FOM values.
+ The array FOM values are initialized by writing the
+ [PCIE_TST_FOM_ADDR] field with a value
+ from 0x0 to 0x7F to index a location in the array, then writing the
+ [PCIE_TST_FOM_VAL] with a 12-bit quantity representing the raw
+ FOM value to be written to the array location, then writing the
+ [PCIE_TST_FOM_LD] bit to 1 to write
+ the raw FOM 12-bit value to the array, and the writing the
+ [PCIE_TST_FOM_LD] bit to 0 to complete
+ array write operation.
+ Before writing the array software should poll the
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_ARRAY_RDY] and wait for
+ [PCIE_TST_ARRAY_RDY] field to be set to 1 before reading or writing
+ the test fom array. Also write
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_MODE] to 0.
+
+ Each array location is written with the desired raw FOM value following
+ the thse steps.
+
+ After all array locations are written, the array locations can be read
+ back. Write the [PCIE_TST_FOM_ADDR] to point
+ to the desired array location, next write
+ [PCIE_TST_FOM_RD] to 1 to enable read back mode.
+ Read the [PCIE_TST_FOM_VAL] field to readback the 12-bit test raw FOM
+ value from the array. Finally write
+ [PCIE_TST_FOM_RD] to 0 to disable read back mode.
+
+ To enable the PCI Express Test FOM array during PCIe Gen3/Gen4 link training
+ write [PCIE_TST_FOM_EN] to 1. Note prior to
+ writing [PCIE_TST_FOM_EN] to 1, ensure that
+ [PCIE_TST_FOM_RD] is cleared to 0 and
+ [PCIE_TST_FOM_LD] is cleared to 0.
+
+ During PCIe Gen3/Gen4 link training each time a Preset receiver evaluation
+ request is received the training logic will return the 12-bit raw FOM
+ from the current test FOM array location to the PIPE PCS logic and then
+ move to the next test FOM array location. The test FOM array always
+ starts at location 0x0 and increments to the next location in the FOM
+ array after each preset evaluation.
+
+ Related Registers
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_ADDR]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_LD]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_RD]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_MODE]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_ARRAY_RDY] */
+ uint64_t pcie_tst_fom_addr : 7; /**< [ 18: 12](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_ld : 1; /**< [ 19: 19](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_rd : 1; /**< [ 20: 20](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_en : 1; /**< [ 21: 21](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_mode : 1; /**< [ 22: 22](R/W) PCIe test FOM array mode. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL].
+
+ Internal:
+ PCIe test FOM array mode. For verification diagnostic use only.
+ 0x0 = Test FOM array is used to load and play back test FOMs for PCIe link
+ training.
+ 0x1 = Test FOM array is used to capture raw FOMs during link training for
+ diagnostic verification. */
+ uint64_t pcie_tst_array_rdy : 1; /**< [ 23: 23](RO/H) PCIe test FOM array ready. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL].
+
+ Internal:
+ PCIe test FOM array ready. For verification diagnostic use only.
+ All entries of the PCIe test FOM array are cleared following release
+ of reset. When [PCIE_TST_ARRAY_RDY] is set to 1 the PCIe test FOM
+ array is ready and can be used for PCIe training testing. Do not
+ read or write the PCIe test FOM array while [PCIE_TST_ARRAY_RDY] is
+ cleared to 0. When the GSER QLM is released from reset the
+ [PCIE_TST_ARRAY_RDY] will transition from 0 to 1 after 128 service
+ clock cycles. */
+ uint64_t pcie_pre_dir : 2; /**< [ 25: 24](RO/H) PCIe direction change equalization pre (C-1) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_PRE_DIR] field reflects the value of the pre (C-1) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved. */
+ uint64_t pcie_main_dir : 2; /**< [ 27: 26](RO/H) PCIe direction change equalization main (C0) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_MAIN_DIR] field reflects the value of the main (C0) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved.
+
+ The main direction will always be 0x0 no change. The PCIe
+ MAC computes the Main (C0) tap direction change. */
+ uint64_t pcie_post_dir : 2; /**< [ 29: 28](RO/H) PCIe direction change equalization post (C+1) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_POST_DIR] field reflects the value of the post (C+1) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved. */
+ uint64_t pcie_inv_pre_dir : 1; /**< [ 30: 30](R/W) PCIe direction change equalization invert pre tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_PRE_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_inv_main_dir : 1; /**< [ 31: 31](R/W) PCIe direction change equalization invert main tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAIN_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_inv_post_dir : 1; /**< [ 32: 32](R/W) PCIe direction change equalization invert post tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_POST_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_rev_dir_hints : 1; /**< [ 33: 33](R/W) When set, reverses the direction of the
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_POST_DIR],
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAIN_DIR], and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_PRE_DIR]
+ Tx tap direction feedback hints. For diagnostic use only. */
+ uint64_t pcie_min_moves : 8; /**< [ 41: 34](R/W) PCIe minimum tap moves. During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the minimum number of tap moves to make
+ before signaling equalization complete. */
+ uint64_t pcie_max_moves : 8; /**< [ 49: 42](R/W) PCIe maximum tap moves. During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the maximum number of tap moves to make
+ before signaling equalization complete. */
+ uint64_t pcie_term_min_mvs : 1; /**< [ 50: 50](R/W) PCIe terminate direction change feedback equalization when exceeded the
+ the minimum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MIN_MOVES].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the minimum number of tap moves to make
+ before signaling equalization complete. When [PCIE_TERM_MIN_MVS] is set
+ to 1 the training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_term_max_mvs : 1; /**< [ 51: 51](R/W) PCIe terminate direction change feedback equalization when reached the
+ the maximum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAX_MOVES].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MAX_MOVES] sets the maximum number of tap moves to make
+ before signaling equalization complete. When [PCIE_TERM_MAX_MVS] is set
+ to 1 the training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_adtmout_sel : 2; /**< [ 53: 52](R/W) Selects the timeout value for the PCIe Gen3/Gen4 direction change feedback equalization.
+ This time-out timer value is only valid if
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_DISABLE]
+ is cleared to 0.
+
+ When GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST] is cleared to 0 the link training
+ time-out timer value is set by [PCIE_ADTMOUT_SEL] to the values shown.
+ 0x0 = 5.24 milliseconds.
+ 0x1 = 10.49 milliseconds.
+ 0x2 = 13.1 milliseconds.
+ 0x3 = 15.73 milliseconds.
+
+ When GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST] is set to 1 the link training
+ time-out timer value is set by [PCIE_ADTMOUT_SEL] to the values shown.
+ 0x0 = 81.92 microseconds.
+ 0x1 = 163.84 microseconds.
+ 0x2 = 327.68 microseconds.
+ 0x3 = 655.36 microseconds. */
+ uint64_t pcie_adtmout_disable : 1; /**< [ 54: 54](R/W) PCIe Gen3/Gen4 direction change feedback equalization timeout timer disable.
+ When [PCIE_ADTMOUT_DISABLE] is set to 1 the timeout timer that runs during
+ PCIe Gen3/Gen4 direction change feecback equalization is disabled. When
+ [PCIE_ADTMOUT_DISABLE] is cleared to 0 the equalization timeout timer is enabled.
+ The equalization timeout period is controlled by
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST].
+ For diagnostic use only. */
+ uint64_t pcie_adtmout_fast : 1; /**< [ 55: 55](R/W) Reserved.
+ Internal:
+ For simulation use only. When set accelerates the PCIe Gen3/Gen4 direction change
+ feedback equalization timeout timer period. When set shortens the direction change
+ equalization time-out timer.
+ See the description for
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL].
+ For diagnostic use only. */
+ uint64_t pcie_term_adtmout : 1; /**< [ 56: 56](R/W) PCIe terminate direction change feedback equalization when reached the
+ the equalization timeout specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode the equalization timeout period is controlled by
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST].
+ When [PCIE_TERM_ADTMOUT] sets when the equalization timeout timer expires
+ the equalization logic will signal equalization complete on the next
+ equalization request from the PCIe controller.
+ The training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_dir_eq_done : 1; /**< [ 57: 57](RO/H) PCIe direction change equalization done flag. During PCIe Gen3/Gen4
+ direction change equalization reflects the state of the direction
+ equalization done flag. When set to 1 indicates that the current
+ direction change equalization tap adjustment sequence is complete.
+ Reset automatically by hardware when PCIe Gen3/Gen4 equalization is
+ completed. */
+ uint64_t pcie_fasteq : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ PCIe fast equalization mode for simulation.
+ When testing PCIe Gen3/Gen4 equalization in simulation setting [PCIE_FASTEQ]
+ to 1 will reduce the PCIe equalization response to 1.6 microseconds.
+ Can be used in conjunction with GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN].
+ If the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN] is not used the raw FOM
+ value returned will be zero. Further the [PCIE_FASTEQ] is set the link evaluation
+ feedback direction change for C(-1), C(0), and C(+1) will indicate no change.
+ For simulation use only. */
+ uint64_t pcie_fasteq_val : 5; /**< [ 63: 59](R/W) Reserved.
+ Internal:
+ PCIe fast equalization delay value for simulation.
+ Used in conjunction with GSERN()_LANE()_TRAIN_7_BCFG[PCIE_FASTEQ]
+ When testing PCIe Gen3/Gen4 equalization in simulation.
+ The default value of 0x6 programs the PCIe equalization FOM and
+ link evaluation direction change request acknowledgement handshake
+ to 1.6 microseconds to accelerate simulation modeling of the PCIe
+ Gen3/Gen4 equalization phases 2 and 3. .
+ For simulation use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_7_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_7_bcfg bdk_gsernx_lanex_train_7_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_7_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_7_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003220ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_7_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) bdk_gsernx_lanex_train_7_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) "GSERNX_LANEX_TRAIN_7_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_8_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 8
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_8_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_8_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t pcie_l_c1_e_adj_sgn : 1; /**< [ 60: 60](R/W) Sets the lower C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the lower C1_E sampler below the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the lower C1_E sampler above the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_L_C1_E_ADJ_STEP] during PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_u_c1_e_adj_sgn : 1; /**< [ 59: 59](R/W) Sets the upper C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the upper C1_E sampler above the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the upper C1_E sampler below the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_U_C1_E_ADJ_STEP] for PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_u_c1_e_adj_step : 5; /**< [ 58: 54](R/W) Sets the C1 E sampler voltage level during eye monitor sampling.
+ Typically [PCIE_U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15 mV above the C1 Q sampler voltage level.
+ Steps are in units of 5.08 mV per step.
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_U_C1_E_ADJ_SGN] for PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_adapt_axis : 3; /**< [ 53: 51](R/W) Sets the number or adaptation axes to use during receiver adaptation.
+ Typically set to 0x7 to enable all three adaptation axes. One-hot encoded.
+
+ Set to 0x1 to only enable axis 1 and disable axis 2 and axis 3.
+ Set to 0x3 to enable axis 1 and axis 2 but disable axis 3.
+ Set to 0x7 to enable axis 1, 2 and 3. (default.)
+ For diagnostic use only. */
+ uint64_t pcie_l_c1_e_adj_step : 5; /**< [ 50: 46](R/W) Sets the lower C1 E sampler voltage level during eye monitor sampling.
+ Typically set to 0x2 to position the eye monitor
+ error sampler at ~15mV below the C1 Q sampler voltage level.
+ Steps are in units of 5.08 mV per step.
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_L_C1_E_ADJ_SGN] during PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_ecnt_div_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [PCIE_ECNT_DIV_EN] is set.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+ 0x8 = Divide by 256.
+ 0x9 = Divide by 512.
+ 0xA = Divide by 1024.
+ 0xB = Divide by 2048.
+ 0xC = Divide by 4096.
+ 0xD = Divide by 8192.
+ 0xE = Divide by 16384.
+ 0xF = Divide by 32768. */
+ uint64_t pcie_ecnt_div_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ For diagnostic use only. */
+ uint64_t pcie_eye_cnt_en : 1; /**< [ 40: 40](R/W) Eye cycle count enable. When set the number of eye monitor
+ cycles to sample and count during the PCIe Gen3/Gen4 training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_VAL].
+ For diagnostic use only. */
+ uint64_t pcie_eye_cnt_val : 40; /**< [ 39: 0](R/W) PCIe eye count value Preset FOM. Sets the number of eye monitor cycles to sample/count
+ during the PCIe training figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_EN]=1.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_eye_cnt_val : 40; /**< [ 39: 0](R/W) PCIe eye count value Preset FOM. Sets the number of eye monitor cycles to sample/count
+ during the PCIe training figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_EN]=1.
+ For diagnostic use only. */
+ uint64_t pcie_eye_cnt_en : 1; /**< [ 40: 40](R/W) Eye cycle count enable. When set the number of eye monitor
+ cycles to sample and count during the PCIe Gen3/Gen4 training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_VAL].
+ For diagnostic use only. */
+ uint64_t pcie_ecnt_div_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ For diagnostic use only. */
+ uint64_t pcie_ecnt_div_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [PCIE_ECNT_DIV_EN] is set.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+ 0x8 = Divide by 256.
+ 0x9 = Divide by 512.
+ 0xA = Divide by 1024.
+ 0xB = Divide by 2048.
+ 0xC = Divide by 4096.
+ 0xD = Divide by 8192.
+ 0xE = Divide by 16384.
+ 0xF = Divide by 32768. */
+ uint64_t pcie_l_c1_e_adj_step : 5; /**< [ 50: 46](R/W) Sets the lower C1 E sampler voltage level during eye monitor sampling.
+ Typically set to 0x2 to position the eye monitor
+ error sampler at ~15mV below the C1 Q sampler voltage level.
+ Steps are in units of 5.08 mV per step.
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_L_C1_E_ADJ_SGN] during PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_adapt_axis : 3; /**< [ 53: 51](R/W) Sets the number or adaptation axes to use during receiver adaptation.
+ Typically set to 0x7 to enable all three adaptation axes. One-hot encoded.
+
+ Set to 0x1 to only enable axis 1 and disable axis 2 and axis 3.
+ Set to 0x3 to enable axis 1 and axis 2 but disable axis 3.
+ Set to 0x7 to enable axis 1, 2 and 3. (default.)
+ For diagnostic use only. */
+ uint64_t pcie_u_c1_e_adj_step : 5; /**< [ 58: 54](R/W) Sets the C1 E sampler voltage level during eye monitor sampling.
+ Typically [PCIE_U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15 mV above the C1 Q sampler voltage level.
+ Steps are in units of 5.08 mV per step.
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_U_C1_E_ADJ_SGN] for PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_u_c1_e_adj_sgn : 1; /**< [ 59: 59](R/W) Sets the upper C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the upper C1_E sampler above the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the upper C1_E sampler below the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_U_C1_E_ADJ_STEP] for PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_l_c1_e_adj_sgn : 1; /**< [ 60: 60](R/W) Sets the lower C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the lower C1_E sampler below the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the lower C1_E sampler above the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_L_C1_E_ADJ_STEP] during PCIE training.
+ For diagnostic use only. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_8_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_8_bcfg bdk_gsernx_lanex_train_8_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_8_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_8_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003230ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_8_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) bdk_gsernx_lanex_train_8_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) "GSERNX_LANEX_TRAIN_8_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_9_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 9
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_9_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_9_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t pcie_dir_fom_en : 1; /**< [ 58: 58](R/W) Enable PCIe Gen3 and Gen4 equalization direction change minimum FOM for termination.
+ During PCIe Gen3 and Gen4 equalization using the direction change method
+ the GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_THRS] field sets the minimum threshold
+ for the raw 12-bit FOM value that when exceeded will terminate direction change
+ equalization.
+ [PCIE_DIR_FOM_EN] must be set to 1 to allow the direction change state machine
+ to terminate equalization when the measured raw FOM has exceeded the value in the
+ GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_THRS] field.
+ For diagnostic use only. */
+ uint64_t pcie_dir_fom_thrs : 12; /**< [ 57: 46](R/W) PCIe Gen3 and Gen4 equalization direction change FOM threshold for termination.
+ During PCIe Gen3 and Gen4 equalization using the direction change method
+ [PCIE_DIR_FOM_THRS] sets the minimum threshold for the raw 12-bit FOM
+ value that when exceeded will terminate direction change equalization.
+ The GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_EN] field must be set to 1 to
+ allow the direction change state machine to terminate equalization when the
+ raw FOM has exceeded the value in [PCIE_DIR_FOM_THRS].
+ For diagnostic use only. */
+ uint64_t pcie_dir_ecnt_div_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [PCIE_DIR_ECNT_DIV_EN] is set.
+ Used when direction change equalization is enabled.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+ 0x8 = Divide by 256.
+ 0x9 = Divide by 512.
+ 0xA = Divide by 1024.
+ 0xB = Divide by 2048.
+ 0xC = Divide by 4096.
+ 0xD = Divide by 8192.
+ 0xE = Divide by 16384.
+ 0xF = Divide by 32768. */
+ uint64_t pcie_dir_ecnt_div_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ Used when direction change equalization is enabled.
+ For diagnostic use only. */
+ uint64_t pcie_dir_eye_cnt_en : 1; /**< [ 40: 40](R/W) Eye cycle count enable. When set the number of eye monitor
+ cycles to sample and count during the PCIe Gen3/Gen4 training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_EYE_CNT_VAL].
+ Used when direction change equalization is enabled.
+ For diagnostic use only. */
+ uint64_t pcie_dir_eye_cnt_val : 40; /**< [ 39: 0](R/W) PCIe eye count value in direction change mode. Sets the number of eye monitor cycles to
+ sample/count during the PCIe training figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_EYE_CNT_EN]=1.
+ See GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_VAL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_dir_eye_cnt_val : 40; /**< [ 39: 0](R/W) PCIe eye count value in direction change mode. Sets the number of eye monitor cycles to
+ sample/count during the PCIe training figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_EYE_CNT_EN]=1.
+ See GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_VAL]. */
+ uint64_t pcie_dir_eye_cnt_en : 1; /**< [ 40: 40](R/W) Eye cycle count enable. When set the number of eye monitor
+ cycles to sample and count during the PCIe Gen3/Gen4 training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_EYE_CNT_VAL].
+ Used when direction change equalization is enabled.
+ For diagnostic use only. */
+ uint64_t pcie_dir_ecnt_div_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ Used when direction change equalization is enabled.
+ For diagnostic use only. */
+ uint64_t pcie_dir_ecnt_div_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [PCIE_DIR_ECNT_DIV_EN] is set.
+ Used when direction change equalization is enabled.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+ 0x8 = Divide by 256.
+ 0x9 = Divide by 512.
+ 0xA = Divide by 1024.
+ 0xB = Divide by 2048.
+ 0xC = Divide by 4096.
+ 0xD = Divide by 8192.
+ 0xE = Divide by 16384.
+ 0xF = Divide by 32768. */
+ uint64_t pcie_dir_fom_thrs : 12; /**< [ 57: 46](R/W) PCIe Gen3 and Gen4 equalization direction change FOM threshold for termination.
+ During PCIe Gen3 and Gen4 equalization using the direction change method
+ [PCIE_DIR_FOM_THRS] sets the minimum threshold for the raw 12-bit FOM
+ value that when exceeded will terminate direction change equalization.
+ The GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_EN] field must be set to 1 to
+ allow the direction change state machine to terminate equalization when the
+ raw FOM has exceeded the value in [PCIE_DIR_FOM_THRS].
+ For diagnostic use only. */
+ uint64_t pcie_dir_fom_en : 1; /**< [ 58: 58](R/W) Enable PCIe Gen3 and Gen4 equalization direction change minimum FOM for termination.
+ During PCIe Gen3 and Gen4 equalization using the direction change method
+ the GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_THRS] field sets the minimum threshold
+ for the raw 12-bit FOM value that when exceeded will terminate direction change
+ equalization.
+ [PCIE_DIR_FOM_EN] must be set to 1 to allow the direction change state machine
+ to terminate equalization when the measured raw FOM has exceeded the value in the
+ GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_THRS] field.
+ For diagnostic use only. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_9_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_9_bcfg bdk_gsernx_lanex_train_9_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_9_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_9_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003240ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_9_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) bdk_gsernx_lanex_train_9_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) "GSERNX_LANEX_TRAIN_9_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_1_bcfg
+ *
+ * GSER Lane TX Base Configuration Register 1
+ * lane transmitter configuration Register 1
+ */
+union bdk_gsernx_lanex_tx_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t tx_acjtag : 1; /**< [ 56: 56](R/W) TBD */
+ uint64_t tx_dacj : 8; /**< [ 55: 48](R/W) ACJTAG block data bits (some redundant). */
+ uint64_t reserved_41_47 : 7;
+ uint64_t tx_enloop : 1; /**< [ 40: 40](R/W) Set to enable the DDR loopback mux in the custom transmitter to
+ send a copy of transmit data back into the receive path. */
+ uint64_t reserved_33_39 : 7;
+ uint64_t nvlink : 1; /**< [ 32: 32](R/W) Transmitter lower impedance termination control (43 ohm instead of 50 ohm). */
+ uint64_t reserved_26_31 : 6;
+ uint64_t rx_mod4 : 1; /**< [ 25: 25](R/W) Use PCS layer receive data path clock ratio of 16:1 or 32:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer receive clock ratio of 20:1 or 40:1.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[RX_MOD4]
+ together set the width of the parallel receive data path (pipe) in the
+ custom receiver. GSERN()_LANE()_TX_1_BCFG[RX_POST4] and
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] together control the clock ratio of the
+ serializer in the custom receiver.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] and GSERN()_LANE()_TX_1_BCFG[MOD4] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t rx_post4 : 1; /**< [ 24: 24](R/W) Use PCS layer receive data path clock ratio of 32:1 or 40:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer receive clock ratio of 16:1 or 20:1. (The
+ function is similar to [DIV20] but for the receiver instead of the
+ transmitter.)
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[RX_MOD4]
+ together set the width of the parallel receive data path (pipe) in the
+ custom receiver. GSERN()_LANE()_TX_1_BCFG[RX_POST4] and
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] together control the clock ratio of the
+ serializer in the custom receiver.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[DIV20] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t mod4 : 1; /**< [ 17: 17](R/W) Use PCS layer transmit data path clock ratio of 16:1 or 32:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer transmit clock ratio of 20:1 or 40:1.
+
+ Should be programed as desired before sequencing the transmitter reset
+ state machine.
+
+ GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4] together set
+ the width of the parallel transmit data path (pipe) in the custom
+ transmitter. GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4]
+ together control the clock ratio of the serializer in the custom
+ transmitter.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] and GSERN()_LANE()_TX_1_BCFG[MOD4] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t div20 : 1; /**< [ 16: 16](R/W) Use PCS layer transmit data path clock ratio of 32:1 or 40:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer transmit clock ratio of 16:1 or 20:1.
+
+ Should be programed as desired before sequencing the transmitter reset
+ state machine.
+
+ GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4] together set
+ the width of the parallel transmit data path (pipe) in the custom
+ transmitter. GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4]
+ together control the clock ratio of the serializer in the custom
+ transnmitter.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[DIV20] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t tx_enfast : 1; /**< [ 8: 8](R/W) Enable fast slew on the TX preamp output. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t tx_encm : 1; /**< [ 0: 0](R/W) Enable common mode correction in the transmitter. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_encm : 1; /**< [ 0: 0](R/W) Enable common mode correction in the transmitter. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t tx_enfast : 1; /**< [ 8: 8](R/W) Enable fast slew on the TX preamp output. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t div20 : 1; /**< [ 16: 16](R/W) Use PCS layer transmit data path clock ratio of 32:1 or 40:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer transmit clock ratio of 16:1 or 20:1.
+
+ Should be programed as desired before sequencing the transmitter reset
+ state machine.
+
+ GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4] together set
+ the width of the parallel transmit data path (pipe) in the custom
+ transmitter. GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4]
+ together control the clock ratio of the serializer in the custom
+ transnmitter.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[DIV20] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t mod4 : 1; /**< [ 17: 17](R/W) Use PCS layer transmit data path clock ratio of 16:1 or 32:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer transmit clock ratio of 20:1 or 40:1.
+
+ Should be programed as desired before sequencing the transmitter reset
+ state machine.
+
+ GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4] together set
+ the width of the parallel transmit data path (pipe) in the custom
+ transmitter. GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4]
+ together control the clock ratio of the serializer in the custom
+ transmitter.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] and GSERN()_LANE()_TX_1_BCFG[MOD4] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t rx_post4 : 1; /**< [ 24: 24](R/W) Use PCS layer receive data path clock ratio of 32:1 or 40:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer receive clock ratio of 16:1 or 20:1. (The
+ function is similar to [DIV20] but for the receiver instead of the
+ transmitter.)
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[RX_MOD4]
+ together set the width of the parallel receive data path (pipe) in the
+ custom receiver. GSERN()_LANE()_TX_1_BCFG[RX_POST4] and
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] together control the clock ratio of the
+ serializer in the custom receiver.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[DIV20] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t rx_mod4 : 1; /**< [ 25: 25](R/W) Use PCS layer receive data path clock ratio of 16:1 or 32:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer receive clock ratio of 20:1 or 40:1.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[RX_MOD4]
+ together set the width of the parallel receive data path (pipe) in the
+ custom receiver. GSERN()_LANE()_TX_1_BCFG[RX_POST4] and
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] together control the clock ratio of the
+ serializer in the custom receiver.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] and GSERN()_LANE()_TX_1_BCFG[MOD4] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t nvlink : 1; /**< [ 32: 32](R/W) Transmitter lower impedance termination control (43 ohm instead of 50 ohm). */
+ uint64_t reserved_33_39 : 7;
+ uint64_t tx_enloop : 1; /**< [ 40: 40](R/W) Set to enable the DDR loopback mux in the custom transmitter to
+ send a copy of transmit data back into the receive path. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t tx_dacj : 8; /**< [ 55: 48](R/W) ACJTAG block data bits (some redundant). */
+ uint64_t tx_acjtag : 1; /**< [ 56: 56](R/W) TBD */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_1_bcfg bdk_gsernx_lanex_tx_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b40ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) bdk_gsernx_lanex_tx_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) "GSERNX_LANEX_TX_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_bsts
+ *
+ * GSER Lane TX Base Status Register
+ * lane transmitter status
+ */
+union bdk_gsernx_lanex_tx_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t rxdetn : 1; /**< [ 2: 2](RO/H) Transmitter block detection of receiver termination presence,
+ low-side. Asserted indicates termination presence was
+ detected. Valid only if [RXDETCOMPLETE] is set. */
+ uint64_t rxdetp : 1; /**< [ 1: 1](RO/H) Transmitter block detection of receiver termination presence,
+ high-side. Asserted indicates termination presence was
+ detected. Valid only if [RXDETCOMPLETE] is set. */
+ uint64_t rxdetcomplete : 1; /**< [ 0: 0](RO/H) Receiver presence detection engine has completed. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxdetcomplete : 1; /**< [ 0: 0](RO/H) Receiver presence detection engine has completed. */
+ uint64_t rxdetp : 1; /**< [ 1: 1](RO/H) Transmitter block detection of receiver termination presence,
+ high-side. Asserted indicates termination presence was
+ detected. Valid only if [RXDETCOMPLETE] is set. */
+ uint64_t rxdetn : 1; /**< [ 2: 2](RO/H) Transmitter block detection of receiver termination presence,
+ low-side. Asserted indicates termination presence was
+ detected. Valid only if [RXDETCOMPLETE] is set. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_bsts bdk_gsernx_lanex_tx_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b60ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_BSTS(a,b) bdk_gsernx_lanex_tx_bsts_t
+#define bustype_BDK_GSERNX_LANEX_TX_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_BSTS(a,b) "GSERNX_LANEX_TX_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_TX_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_drv2_bcfg
+ *
+ * GSER Lane TX Drive Override Base Configuration Register 2
+ * Upper limits on the allowed preemphasis and postemphasis values before translating to the
+ * raw transmitter control settings.
+ */
+union bdk_gsernx_lanex_tx_drv2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_drv2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t cpost_limit : 5; /**< [ 12: 8](R/W) Upper limit for the postemphasis value. The valid range is 0x0 to 0x10. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t cpre_limit : 5; /**< [ 4: 0](R/W) Upper limit for the preemphasis value. The valid range is 0x0 to 0x10. */
+#else /* Word 0 - Little Endian */
+ uint64_t cpre_limit : 5; /**< [ 4: 0](R/W) Upper limit for the preemphasis value. The valid range is 0x0 to 0x10. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t cpost_limit : 5; /**< [ 12: 8](R/W) Upper limit for the postemphasis value. The valid range is 0x0 to 0x10. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_drv2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_drv2_bcfg bdk_gsernx_lanex_tx_drv2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b20ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_DRV2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) bdk_gsernx_lanex_tx_drv2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) "GSERNX_LANEX_TX_DRV2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_drv_bcfg
+ *
+ * GSER Lane TX Drive Override Base Configuration Register
+ * Lane transmitter drive override values and enables configuration
+ * Register. Default values are chosen to provide the "idle" configuration
+ * when the lane reset state machine completes. The transmitter "idle"
+ * configuration drives the output to mid-rail with 2 pull-up and 2
+ * pull-down legs active.
+ *
+ * These value fields in this register are in effect when the
+ * corresponding enable fields ([EN_TX_DRV], [EN_TX_CSPD], and
+ * GSERN()_LANE()_TX_DRV_BCFG[EN_TX_BS]) are set.
+ */
+union bdk_gsernx_lanex_tx_drv_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_drv_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tx_cspd : 1; /**< [ 63: 63](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t reserved_62 : 1;
+ uint64_t tx_bs : 6; /**< [ 61: 56](R/W) TX bias/swing selection. This setting only takes effect if [EN_TX_BS]
+ is asserted and [TX_CSPD] is deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t en_tx_cspd : 1; /**< [ 50: 50](R/W) Enables use of [TX_CSPD] an overrides to
+ set the current source power down control of the transmitter. */
+ uint64_t en_tx_bs : 1; /**< [ 49: 49](R/W) Enables use of [TX_BS] as an override to
+ set the bias/swing control of the transmitter. */
+ uint64_t en_tx_drv : 1; /**< [ 48: 48](R/W) Enables use of the transmit drive strength fields in this register as overrides
+ to explicitly set the base transmitter controls. (All fields except [TX_BS] and
+ [TX_CSPD], which have separate override enables.) For diagnostic use only. */
+ uint64_t reserved_42_47 : 6;
+ uint64_t muxpost : 2; /**< [ 41: 40](R/W) Postcursor mux controls. */
+ uint64_t cpostb : 3; /**< [ 39: 37](R/W) Post cursor block 1 coefficient. */
+ uint64_t cposta : 3; /**< [ 36: 34](R/W) Post cursor block 0 coefficient. */
+ uint64_t enpost : 2; /**< [ 33: 32](R/W) Postcursor block enables. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t muxmain : 4; /**< [ 26: 23](R/W) Main mux controls (some redundant). */
+ uint64_t cmaind : 3; /**< [ 22: 20](R/W) Main block 3 coefficient. */
+ uint64_t enmain : 4; /**< [ 19: 16](R/W) Main block enables. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t muxpre : 2; /**< [ 9: 8](R/W) Precursor mux controls. */
+ uint64_t cpreb : 3; /**< [ 7: 5](R/W) Precursor Block 1 coefficient. */
+ uint64_t cprea : 3; /**< [ 4: 2](R/W) Precursor Block 0 coefficient. */
+ uint64_t enpre : 2; /**< [ 1: 0](R/W) Precursor block enables. */
+#else /* Word 0 - Little Endian */
+ uint64_t enpre : 2; /**< [ 1: 0](R/W) Precursor block enables. */
+ uint64_t cprea : 3; /**< [ 4: 2](R/W) Precursor Block 0 coefficient. */
+ uint64_t cpreb : 3; /**< [ 7: 5](R/W) Precursor Block 1 coefficient. */
+ uint64_t muxpre : 2; /**< [ 9: 8](R/W) Precursor mux controls. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t enmain : 4; /**< [ 19: 16](R/W) Main block enables. */
+ uint64_t cmaind : 3; /**< [ 22: 20](R/W) Main block 3 coefficient. */
+ uint64_t muxmain : 4; /**< [ 26: 23](R/W) Main mux controls (some redundant). */
+ uint64_t reserved_27_31 : 5;
+ uint64_t enpost : 2; /**< [ 33: 32](R/W) Postcursor block enables. */
+ uint64_t cposta : 3; /**< [ 36: 34](R/W) Post cursor block 0 coefficient. */
+ uint64_t cpostb : 3; /**< [ 39: 37](R/W) Post cursor block 1 coefficient. */
+ uint64_t muxpost : 2; /**< [ 41: 40](R/W) Postcursor mux controls. */
+ uint64_t reserved_42_47 : 6;
+ uint64_t en_tx_drv : 1; /**< [ 48: 48](R/W) Enables use of the transmit drive strength fields in this register as overrides
+ to explicitly set the base transmitter controls. (All fields except [TX_BS] and
+ [TX_CSPD], which have separate override enables.) For diagnostic use only. */
+ uint64_t en_tx_bs : 1; /**< [ 49: 49](R/W) Enables use of [TX_BS] as an override to
+ set the bias/swing control of the transmitter. */
+ uint64_t en_tx_cspd : 1; /**< [ 50: 50](R/W) Enables use of [TX_CSPD] an overrides to
+ set the current source power down control of the transmitter. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t tx_bs : 6; /**< [ 61: 56](R/W) TX bias/swing selection. This setting only takes effect if [EN_TX_BS]
+ is asserted and [TX_CSPD] is deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t reserved_62 : 1;
+ uint64_t tx_cspd : 1; /**< [ 63: 63](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_drv_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_drv_bcfg bdk_gsernx_lanex_tx_drv_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_DRV_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) bdk_gsernx_lanex_tx_drv_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) "GSERNX_LANEX_TX_DRV_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_drv_bsts
+ *
+ * GSER Lane TX Drive Base Status Register
+ * Lane transmitter drive setup status, i.e., settings which the
+ * transmitter is actually using. During a transmitter receiver presence
+ * detection sequence the fields of this register not reliable, i.e.,
+ * following a write of GSERN()_LANE()_TX_RXD_BCFG[TRIGGER] to one this register is not
+ * reliable until after GSERN()_LANE()_TX_BSTS[RXDETCOMPLETE] reads as one.
+ */
+union bdk_gsernx_lanex_tx_drv_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_drv_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tx_cspd : 1; /**< [ 63: 63](RO/H) TX current source power down (cspd) setting in use, a second
+ bias/swing leg with the same weight as TX_BS[3], but with opposite
+ polarity for the control signal. */
+ uint64_t reserved_62 : 1;
+ uint64_t tx_bs : 6; /**< [ 61: 56](RO/H) TX bias/swing selection in use. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t tx_invalid : 1; /**< [ 51: 51](RO/H) Invalid status generated by the gser_lane_pnr_txdrv_remap module
+ indicating an invalid combination of (cpre, cpost, cmain, bit-stuff)
+ was requested. */
+ uint64_t reserved_42_50 : 9;
+ uint64_t muxpost : 2; /**< [ 41: 40](RO/H) Postcursor mux controls in use. */
+ uint64_t cpostb : 3; /**< [ 39: 37](RO/H) Post cursor block 1 coefficient in use. */
+ uint64_t cposta : 3; /**< [ 36: 34](RO/H) Post cursor block 0 coefficient in use. */
+ uint64_t enpost : 2; /**< [ 33: 32](RO/H) Postcursor block enables in use. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t muxmain : 4; /**< [ 26: 23](RO/H) Main mux controls (some redundant) in use. */
+ uint64_t cmaind : 3; /**< [ 22: 20](RO/H) Main block 3 coefficient in use. */
+ uint64_t enmain : 4; /**< [ 19: 16](RO/H) Main block enables in use. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t muxpre : 2; /**< [ 9: 8](RO/H) Precursor mux controls in use. */
+ uint64_t cpreb : 3; /**< [ 7: 5](RO/H) Precursor Block 1 coefficient in use. */
+ uint64_t cprea : 3; /**< [ 4: 2](RO/H) Precursor Block 0 coefficient in use. */
+ uint64_t enpre : 2; /**< [ 1: 0](RO/H) Precursor block enables in use. */
+#else /* Word 0 - Little Endian */
+ uint64_t enpre : 2; /**< [ 1: 0](RO/H) Precursor block enables in use. */
+ uint64_t cprea : 3; /**< [ 4: 2](RO/H) Precursor Block 0 coefficient in use. */
+ uint64_t cpreb : 3; /**< [ 7: 5](RO/H) Precursor Block 1 coefficient in use. */
+ uint64_t muxpre : 2; /**< [ 9: 8](RO/H) Precursor mux controls in use. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t enmain : 4; /**< [ 19: 16](RO/H) Main block enables in use. */
+ uint64_t cmaind : 3; /**< [ 22: 20](RO/H) Main block 3 coefficient in use. */
+ uint64_t muxmain : 4; /**< [ 26: 23](RO/H) Main mux controls (some redundant) in use. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t enpost : 2; /**< [ 33: 32](RO/H) Postcursor block enables in use. */
+ uint64_t cposta : 3; /**< [ 36: 34](RO/H) Post cursor block 0 coefficient in use. */
+ uint64_t cpostb : 3; /**< [ 39: 37](RO/H) Post cursor block 1 coefficient in use. */
+ uint64_t muxpost : 2; /**< [ 41: 40](RO/H) Postcursor mux controls in use. */
+ uint64_t reserved_42_50 : 9;
+ uint64_t tx_invalid : 1; /**< [ 51: 51](RO/H) Invalid status generated by the gser_lane_pnr_txdrv_remap module
+ indicating an invalid combination of (cpre, cpost, cmain, bit-stuff)
+ was requested. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t tx_bs : 6; /**< [ 61: 56](RO/H) TX bias/swing selection in use. */
+ uint64_t reserved_62 : 1;
+ uint64_t tx_cspd : 1; /**< [ 63: 63](RO/H) TX current source power down (cspd) setting in use, a second
+ bias/swing leg with the same weight as TX_BS[3], but with opposite
+ polarity for the control signal. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_drv_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_drv_bsts bdk_gsernx_lanex_tx_drv_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_DRV_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) bdk_gsernx_lanex_tx_drv_bsts_t
+#define bustype_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) "GSERNX_LANEX_TX_DRV_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_rxd_bcfg
+ *
+ * GSER Lane TX Receive Presence Detector Base Configuration Register
+ * The lane transmitter receiver presence detector controls are in this
+ * register. When the transmitter's receiver presence detection sequencer
+ * is triggered (by asserting [TRIGGER]), the transmitter needs to
+ * be in a weak idle state, i.e., all fields of GSERN()_LANE()_TX_DRV_BSTS
+ * should reflect the reset default values of the same fields in
+ * GSERN()_LANE()_TX_DRV_BCFG.
+ */
+union bdk_gsernx_lanex_tx_rxd_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_rxd_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t ovrride_det_en : 1; /**< [ 33: 33](R/W) Enable use of the [OVRRIDE_DET] value for the result of PCIe transmitter
+ receiver presense detection instead of the normal measured result.
+
+ Internal:
+ When asserted, this control will also suppress the normal pull-down and release
+ of the transmit signals that takes place during receiver presence detaction. */
+ uint64_t ovrride_det : 1; /**< [ 32: 32](R/W) When enabled by [OVRRIDE_DET_EN], the PCIe transmitter receiver presence
+ detector will use this value instead of that measured by the functional
+ circuit. This provides a mechanism to force recognition of a known number of
+ lanes in the link independent of the normal receiver presence detection
+ procedure. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t release_wait : 6; /**< [ 29: 24](R/W) Wait time after asserting rxd_samp and rxd_samn to capture the
+ result before releasing tx_rxd, rxd_samp, and rxd_samn,
+ expressed as a count of txdivclk cycles minus one, e.g., set to 0
+ to get 1 cycles. Typically set for 8 ns, or a count of 1 cycle when
+ using for PCIe gen1 (125 MHz txdivclk). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t sample_wait : 6; /**< [ 21: 16](R/W) Wait time after asserting tx_rxd before asserting rxd_samp and
+ rxd_samn to sample the result, expressed as a count of lane PLL
+ reference clock cycles minus 1, e.g., set to 1 to get 2 cycles.
+ Typically set for 16 ns, or a count of 2 cycles for PCIe gen1
+ (125 MHz txdivclk). */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_disable : 1; /**< [ 11: 11](R/W) Disable all transmitter eqdrv blocks during the receiver-present
+ detection sequence. When asserted, this temporarily overrides the
+ enmain, empre, and enpost settings in
+ GSERN()_LANE()_TX_DRV_BCFG, tri-stating the transmitter
+ during the sequence instead of leaving it in weak idle. */
+ uint64_t samn_en : 1; /**< [ 10: 10](R/W) Enable sampling of the transmitter's receiver termination presence
+ detector on the padn output. */
+ uint64_t samp_en : 1; /**< [ 9: 9](R/W) Enable sampling of the transmitter's receiver termination presence
+ detector on the padp output. */
+ uint64_t rxd_en : 1; /**< [ 8: 8](R/W) Enable assertion of the RXD pulldown on the (common) termination
+ point for differential pair prior to sampling the pad voltages. Set
+ to one for the normal detection sequence to work correctly. Setting
+ to zero is a verification hook to allow sampling the pad values
+ without first pulling the pads low. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t trigger : 1; /**< [ 0: 0](R/W/H) Enable the sequencer which exercises the transmitter's receiver
+ termination presence detection. An asserting edge will start the
+ sequencer. This field self-clears when the sequence has completed. */
+#else /* Word 0 - Little Endian */
+ uint64_t trigger : 1; /**< [ 0: 0](R/W/H) Enable the sequencer which exercises the transmitter's receiver
+ termination presence detection. An asserting edge will start the
+ sequencer. This field self-clears when the sequence has completed. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t rxd_en : 1; /**< [ 8: 8](R/W) Enable assertion of the RXD pulldown on the (common) termination
+ point for differential pair prior to sampling the pad voltages. Set
+ to one for the normal detection sequence to work correctly. Setting
+ to zero is a verification hook to allow sampling the pad values
+ without first pulling the pads low. */
+ uint64_t samp_en : 1; /**< [ 9: 9](R/W) Enable sampling of the transmitter's receiver termination presence
+ detector on the padp output. */
+ uint64_t samn_en : 1; /**< [ 10: 10](R/W) Enable sampling of the transmitter's receiver termination presence
+ detector on the padn output. */
+ uint64_t tx_disable : 1; /**< [ 11: 11](R/W) Disable all transmitter eqdrv blocks during the receiver-present
+ detection sequence. When asserted, this temporarily overrides the
+ enmain, empre, and enpost settings in
+ GSERN()_LANE()_TX_DRV_BCFG, tri-stating the transmitter
+ during the sequence instead of leaving it in weak idle. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t sample_wait : 6; /**< [ 21: 16](R/W) Wait time after asserting tx_rxd before asserting rxd_samp and
+ rxd_samn to sample the result, expressed as a count of lane PLL
+ reference clock cycles minus 1, e.g., set to 1 to get 2 cycles.
+ Typically set for 16 ns, or a count of 2 cycles for PCIe gen1
+ (125 MHz txdivclk). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t release_wait : 6; /**< [ 29: 24](R/W) Wait time after asserting rxd_samp and rxd_samn to capture the
+ result before releasing tx_rxd, rxd_samp, and rxd_samn,
+ expressed as a count of txdivclk cycles minus one, e.g., set to 0
+ to get 1 cycles. Typically set for 8 ns, or a count of 1 cycle when
+ using for PCIe gen1 (125 MHz txdivclk). */
+ uint64_t reserved_30_31 : 2;
+ uint64_t ovrride_det : 1; /**< [ 32: 32](R/W) When enabled by [OVRRIDE_DET_EN], the PCIe transmitter receiver presence
+ detector will use this value instead of that measured by the functional
+ circuit. This provides a mechanism to force recognition of a known number of
+ lanes in the link independent of the normal receiver presence detection
+ procedure. */
+ uint64_t ovrride_det_en : 1; /**< [ 33: 33](R/W) Enable use of the [OVRRIDE_DET] value for the result of PCIe transmitter
+ receiver presense detection instead of the normal measured result.
+
+ Internal:
+ When asserted, this control will also suppress the normal pull-down and release
+ of the transmit signals that takes place during receiver presence detaction. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_rxd_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_rxd_bcfg bdk_gsernx_lanex_tx_rxd_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_RXD_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_RXD_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b50ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_RXD_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) bdk_gsernx_lanex_tx_rxd_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) "GSERNX_LANEX_TX_RXD_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_txdivclk_ctr
+ *
+ * GSER Lane TX Div Clock Cycle Counter Register
+ * A free-running counter of lane txdivclk cycles to enable rough confirmation of
+ * SerDes transmit data rate. Read the counter; wait some time, e.g., 100ms; read the
+ * counter; calculate frequency based on the difference in values during the known wait
+ * time and the programmed data path width.
+ */
+union bdk_gsernx_lanex_txdivclk_ctr
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_txdivclk_ctr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Running count of txdivclk cycles. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Running count of txdivclk cycles. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_txdivclk_ctr_s cn; */
+};
+typedef union bdk_gsernx_lanex_txdivclk_ctr bdk_gsernx_lanex_txdivclk_ctr_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TXDIVCLK_CTR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TXDIVCLK_CTR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900030b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TXDIVCLK_CTR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) bdk_gsernx_lanex_txdivclk_ctr_t
+#define bustype_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) "GSERNX_LANEX_TXDIVCLK_CTR"
+#define device_bar_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) (a),(b),-1,-1
+
+#endif /* __BDK_CSRS_GSERN_H__ */