summaryrefslogtreecommitdiff
path: root/src/soc
diff options
context:
space:
mode:
authorFurquan Shaikh <furquan@google.com>2020-03-26 15:36:19 -0700
committerFurquan Shaikh <furquan@google.com>2020-04-02 16:53:55 +0000
commit5b1f335ef8aed95e01f040bc7074fb00acc8ab7e (patch)
treea119e13f3d7a88955dee92a9de23ab6973f5673e /src/soc
parent3c57819005af59064ea0397e8b1ed59fab5a8f7c (diff)
soc/intel/tigerlake: Reorganize memory initialization support
This change reorganizes memory initialization code for LPDDR4x on TGL to allow sharing of code when adding support for other memory types. In follow-up changes, support for DDR4 will be added. 1. It adds configuration for memory topology which is currently only MEMORY_DOWN, however DDR4 requires more topologies to be supported. 2. spd_info structure is organized to allow mixed topologies as well. 3. DQ/DQS maps are organized to reflect hardware configuration. TEST=Verified that volteer still boots and memory initialization is successful. Signed-off-by: Furquan Shaikh <furquan@google.com> Change-Id: Ib625f2ab30a6e1362a310d9abb3f2051f85c3013 Reviewed-on: https://review.coreboot.org/c/coreboot/+/39865 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: EricR Lai <ericr_lai@compal.corp-partner.google.com>
Diffstat (limited to 'src/soc')
-rw-r--r--src/soc/intel/tigerlake/include/soc/meminit.h80
-rw-r--r--src/soc/intel/tigerlake/meminit.c341
2 files changed, 282 insertions, 139 deletions
diff --git a/src/soc/intel/tigerlake/include/soc/meminit.h b/src/soc/intel/tigerlake/include/soc/meminit.h
index 2345b2b12d..aab155e43c 100644
--- a/src/soc/intel/tigerlake/include/soc/meminit.h
+++ b/src/soc/intel/tigerlake/include/soc/meminit.h
@@ -12,46 +12,65 @@
#include <stdint.h>
#include <fsp/soc_binding.h>
-#define BYTES_PER_CHANNEL 2
-#define BITS_PER_BYTE 8
-#define DQS_PER_CHANNEL 2
-#define NUM_CHANNELS 8
+#define BITS_PER_BYTE 8
-struct spd_by_pointer {
- size_t spd_data_len;
- uintptr_t spd_data_ptr;
+#define LPDDR4X_CHANNELS 8
+#define LPDDR4X_BYTES_PER_CHANNEL 2
+
+enum mem_topology {
+ MEMORY_DOWN, /* Supports reading SPD from CBFS or in-memory pointer. */
};
-enum mem_info_read_type {
- NOT_EXISTING, /* No memory in this channel */
- READ_SPD_CBFS, /* Find spd file in CBFS. */
- READ_SPD_MEMPTR /* Find spd data from pointer. */
+enum md_spd_loc {
+ /* Read SPD from pointer provided to memory location. */
+ SPD_MEMPTR,
+ /* Read SPD using index into spd.bin in CBFS. */
+ SPD_CBFS,
};
struct spd_info {
- enum mem_info_read_type read_type;
- union spd_data_by {
- /* To identify spd file when read_type is READ_SPD_CBFS. */
- int spd_index;
+ enum mem_topology topology;
+
+ /* SPD info for Memory down topology */
+ enum md_spd_loc md_spd_loc;
+ union {
+ /* Used for SPD_CBFS */
+ uint8_t cbfs_index;
- /* To find spd data when read_type is READ_SPD_MEMPTR. */
- struct spd_by_pointer spd_data_ptr_info;
- } spd_spec;
+ struct {
+ /* Used for SPD_MEMPTR */
+ uintptr_t data_ptr;
+ size_t data_len;
+ };
+ };
};
/* Board-specific memory configuration information */
-struct mb_lpddr4x_cfg {
- /* DQ mapping */
- uint8_t dq_map[NUM_CHANNELS][BYTES_PER_CHANNEL * BITS_PER_BYTE];
+struct lpddr4x_cfg {
+ /*
+ * DQ CPU<>DRAM map:
+ * LPDDR4x memory interface has 2 DQs per channel. Each DQ consists of 8 bits(1
+ * byte). Thus, dq_map is represented as DDR[7-0]_DQ[1-0][7:0], where
+ * DDR[7-0] : LPDDR4x channel #
+ * DQ[1-0] : DQ # within the channel
+ * [7:0] : Bits within the DQ
+ *
+ * Index of the array represents DQ pin# on the CPU, whereas value in
+ * the array represents DQ pin# on the memory part.
+ */
+ uint8_t dq_map[LPDDR4X_CHANNELS][LPDDR4X_BYTES_PER_CHANNEL][BITS_PER_BYTE];
/*
- * DQS CPU<>DRAM map. Each array entry represents a
- * mapping of a dq bit on the CPU to the bit it's connected to on
- * the memory part. The array index represents the dqs bit number
- * on the memory part, and the values in the array represent which
- * pin on the CPU that DRAM pin connects to.
+ * DQS CPU<>DRAM map:
+ * LPDDR4x memory interface has 2 DQS pairs(P/N) per channel. Thus, dqs_map is
+ * represented as DDR[7-0]_DQS[1:0], where
+ * DDR[7-0] : LPDDR4x channel #
+ * DQS[1-0] : DQS # within the channel
+ *
+ * Index of the array represents DQS pin# on the CPU, whereas value in
+ * the array represents DQ pin# on the memory part.
*/
- uint8_t dqs_map[NUM_CHANNELS][DQS_PER_CHANNEL];
+ uint8_t dqs_map[LPDDR4X_CHANNELS][LPDDR4X_BYTES_PER_CHANNEL];
/*
* Early Command Training Enable/Disable Control
@@ -60,10 +79,7 @@ struct mb_lpddr4x_cfg {
uint8_t ect;
};
-/* Initialize default memory configurations for dimm0-only lpddr4x */
-void meminit_lpddr4x_dimm0(FSP_M_CONFIG *mem_cfg,
- const struct mb_lpddr4x_cfg *board_cfg,
- const struct spd_info *spd,
- bool half_populated);
+void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
+ const struct spd_info *spd, bool half_populated);
#endif /* _SOC_TIGERLAKE_MEMINIT_H_ */
diff --git a/src/soc/intel/tigerlake/meminit.c b/src/soc/intel/tigerlake/meminit.c
index e6cdae0a30..864f0795e4 100644
--- a/src/soc/intel/tigerlake/meminit.c
+++ b/src/soc/intel/tigerlake/meminit.c
@@ -12,6 +12,10 @@
#include <spd_bin.h>
#include <string.h>
+/* If memory is half-populated, then upper half of the channels need to be left empty. */
+#define LPDDR4X_CHANNEL_UNPOPULATED(ch, half_populated) \
+ ((half_populated) && ((ch) >= (LPDDR4X_CHANNELS / 2)))
+
enum dimm_enable_options {
ENABLE_BOTH_DIMMS = 0,
DISABLE_DIMM0 = 1,
@@ -19,145 +23,268 @@ enum dimm_enable_options {
DISABLE_BOTH_DIMMS = 3
};
-#define MEM_INIT_CH_DQ_DQS_MAP(_mem_cfg, _b_cfg, _ch) \
- do { \
- memcpy(&_mem_cfg->DqMapCpu2DramCh ## _ch, \
- &_b_cfg->dq_map[_ch], \
- sizeof(_b_cfg->dq_map[_ch])); \
- memcpy(&_mem_cfg->DqsMapCpu2DramCh ## _ch, \
- &_b_cfg->dqs_map[_ch], \
- sizeof(_b_cfg->dqs_map[_ch])); \
- } while (0)
+static uint8_t get_dimm_cfg(uintptr_t dimm0, uintptr_t dimm1)
+{
+ if (dimm0 && dimm1)
+ return ENABLE_BOTH_DIMMS;
+ if (!dimm0 && !dimm1)
+ return DISABLE_BOTH_DIMMS;
+ if (!dimm1)
+ return DISABLE_DIMM1;
+ if (!dimm0)
+ die("Disabling of only dimm0 is not supported!\n");
+ return DISABLE_BOTH_DIMMS;
+}
-static void spd_read_from_cbfs(const struct spd_info *spd,
- uintptr_t *spd_data_ptr, size_t *spd_data_len)
+static void init_spd_upds(FSP_M_CONFIG *mem_cfg, int channel, uintptr_t spd_dimm0,
+ uintptr_t spd_dimm1)
{
- struct region_device spd_rdev;
- size_t spd_index = spd->spd_spec.spd_index;
+ mem_cfg->Reserved9[channel] = get_dimm_cfg(spd_dimm0, spd_dimm1);
- printk(BIOS_DEBUG, "SPD INDEX = %lu\n", spd_index);
- if (get_spd_cbfs_rdev(&spd_rdev, spd_index) < 0)
- die("spd.bin not found or incorrect index\n");
+ switch (channel) {
+ case 0:
+ mem_cfg->MemorySpdPtr00 = spd_dimm0;
+ mem_cfg->MemorySpdPtr01 = spd_dimm1;
+ break;
- *spd_data_len = region_device_sz(&spd_rdev);
+ case 1:
+ mem_cfg->MemorySpdPtr02 = spd_dimm0;
+ mem_cfg->MemorySpdPtr03 = spd_dimm1;
+ break;
- /* Memory leak is ok since we have memory mapped boot media */
- assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED));
+ case 2:
+ mem_cfg->MemorySpdPtr04 = spd_dimm0;
+ mem_cfg->MemorySpdPtr05 = spd_dimm1;
+ break;
+
+ case 3:
+ mem_cfg->MemorySpdPtr06 = spd_dimm0;
+ mem_cfg->MemorySpdPtr07 = spd_dimm1;
+ break;
+
+ case 4:
+ mem_cfg->MemorySpdPtr08 = spd_dimm0;
+ mem_cfg->MemorySpdPtr09 = spd_dimm1;
+ break;
+
+ case 5:
+ mem_cfg->MemorySpdPtr10 = spd_dimm0;
+ mem_cfg->MemorySpdPtr11 = spd_dimm1;
+ break;
+
+ case 6:
+ mem_cfg->MemorySpdPtr12 = spd_dimm0;
+ mem_cfg->MemorySpdPtr13 = spd_dimm1;
+ break;
- *spd_data_ptr = (uintptr_t)rdev_mmap_full(&spd_rdev);
+ case 7:
+ mem_cfg->MemorySpdPtr14 = spd_dimm0;
+ mem_cfg->MemorySpdPtr15 = spd_dimm1;
+ break;
+
+ default:
+ die("Invalid channel: %d\n", channel);
+ }
}
-static void get_spd_data(const struct spd_info *spd,
- uintptr_t *spd_data_ptr, size_t *spd_data_len)
+static inline void init_spd_upds_empty(FSP_M_CONFIG *mem_cfg, int channel)
{
- if (spd->read_type == READ_SPD_MEMPTR) {
- *spd_data_ptr = spd->spd_spec.spd_data_ptr_info.spd_data_ptr;
- *spd_data_len = spd->spd_spec.spd_data_ptr_info.spd_data_len;
- return;
- }
+ init_spd_upds(mem_cfg, channel, 0, 0);
+}
- if (spd->read_type == READ_SPD_CBFS) {
- spd_read_from_cbfs(spd, spd_data_ptr, spd_data_len);
- return;
+static inline void init_spd_upds_dimm0(FSP_M_CONFIG *mem_cfg, int channel, uintptr_t spd_dimm0)
+{
+ init_spd_upds(mem_cfg, channel, spd_dimm0, 0);
+}
+
+static void init_dq_upds(FSP_M_CONFIG *mem_cfg, int byte_pair, const uint8_t *dq_byte0,
+ const uint8_t *dq_byte1)
+{
+ uint8_t *dq_upd;
+
+ switch (byte_pair) {
+ case 0:
+ dq_upd = mem_cfg->DqMapCpu2DramCh0;
+ break;
+ case 1:
+ dq_upd = mem_cfg->DqMapCpu2DramCh1;
+ break;
+ case 2:
+ dq_upd = mem_cfg->DqMapCpu2DramCh2;
+ break;
+ case 3:
+ dq_upd = mem_cfg->DqMapCpu2DramCh3;
+ break;
+ case 4:
+ dq_upd = mem_cfg->DqMapCpu2DramCh4;
+ break;
+ case 5:
+ dq_upd = mem_cfg->DqMapCpu2DramCh5;
+ break;
+ case 6:
+ dq_upd = mem_cfg->DqMapCpu2DramCh6;
+ break;
+ case 7:
+ dq_upd = mem_cfg->DqMapCpu2DramCh7;
+ break;
+ default:
+ die("Invalid byte_pair: %d\n", byte_pair);
}
- die("no valid way to read SPD info");
+ if (dq_byte0 && dq_byte1) {
+ memcpy(dq_upd, dq_byte0, BITS_PER_BYTE);
+ memcpy(dq_upd + BITS_PER_BYTE, dq_byte1, BITS_PER_BYTE);
+ } else {
+ memset(dq_upd, 0, BITS_PER_BYTE * 2);
+ }
}
-static void meminit_dq_dqs_map(FSP_M_CONFIG *mem_cfg,
- const struct mb_lpddr4x_cfg *board_cfg,
- bool half_populated)
+static inline void init_dq_upds_empty(FSP_M_CONFIG *mem_cfg, int byte_pair)
{
- MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 0);
- MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 1);
- MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 2);
- MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 3);
-
- if (half_populated)
- return;
-
- MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 4);
- MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 5);
- MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 6);
- MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 7);
+ init_dq_upds(mem_cfg, byte_pair, NULL, NULL);
}
-static void meminit_channels_dimm0(FSP_M_CONFIG *mem_cfg,
- const struct mb_lpddr4x_cfg *board_cfg,
- uintptr_t spd_data_ptr,
- bool half_populated)
+static void init_dqs_upds(FSP_M_CONFIG *mem_cfg, int byte_pair, uint8_t dqs_byte0,
+ uint8_t dqs_byte1)
{
- uint8_t dimm_cfg = DISABLE_DIMM1; /* Use only DIMM0 */
-
- /* Channel 0 */
- mem_cfg->Reserved9[0] = dimm_cfg;
- mem_cfg->MemorySpdPtr00 = spd_data_ptr;
- mem_cfg->MemorySpdPtr01 = 0;
-
- /* Channel 1 */
- mem_cfg->Reserved9[1] = dimm_cfg;
- mem_cfg->MemorySpdPtr02 = spd_data_ptr;
- mem_cfg->MemorySpdPtr03 = 0;
-
- /* Channel 2 */
- mem_cfg->Reserved9[2] = dimm_cfg;
- mem_cfg->MemorySpdPtr04 = spd_data_ptr;
- mem_cfg->MemorySpdPtr05 = 0;
-
- /* Channel 3 */
- mem_cfg->Reserved9[3] = dimm_cfg;
- mem_cfg->MemorySpdPtr06 = spd_data_ptr;
- mem_cfg->MemorySpdPtr07 = 0;
-
- if (half_populated) {
- printk(BIOS_INFO, "%s: DRAM half-populated\n", __func__);
- dimm_cfg = DISABLE_BOTH_DIMMS;
- spd_data_ptr = 0;
+ uint8_t *dqs_upd;
+
+ switch (byte_pair) {
+ case 0:
+ dqs_upd = mem_cfg->DqsMapCpu2DramCh0;
+ break;
+ case 1:
+ dqs_upd = mem_cfg->DqsMapCpu2DramCh1;
+ break;
+ case 2:
+ dqs_upd = mem_cfg->DqsMapCpu2DramCh2;
+ break;
+ case 3:
+ dqs_upd = mem_cfg->DqsMapCpu2DramCh3;
+ break;
+ case 4:
+ dqs_upd = mem_cfg->DqsMapCpu2DramCh4;
+ break;
+ case 5:
+ dqs_upd = mem_cfg->DqsMapCpu2DramCh5;
+ break;
+ case 6:
+ dqs_upd = mem_cfg->DqsMapCpu2DramCh6;
+ break;
+ case 7:
+ dqs_upd = mem_cfg->DqsMapCpu2DramCh7;
+ break;
+ default:
+ die("Invalid byte_pair: %d\n", byte_pair);
}
- /* Channel 4 */
- mem_cfg->Reserved9[4] = dimm_cfg;
- mem_cfg->MemorySpdPtr08 = spd_data_ptr;
- mem_cfg->MemorySpdPtr09 = 0;
+ dqs_upd[0] = dqs_byte0;
+ dqs_upd[1] = dqs_byte1;
+}
- /* Channel 5 */
- mem_cfg->Reserved9[5] = dimm_cfg;
- mem_cfg->MemorySpdPtr10 = spd_data_ptr;
- mem_cfg->MemorySpdPtr11 = 0;
+static inline void init_dqs_upds_empty(FSP_M_CONFIG *mem_cfg, int byte_pair)
+{
+ init_dqs_upds(mem_cfg, byte_pair, 0, 0);
+}
- /* Channel 6 */
- mem_cfg->Reserved9[6] = dimm_cfg;
- mem_cfg->MemorySpdPtr12 = spd_data_ptr;
- mem_cfg->MemorySpdPtr13 = 0;
+static void read_spd_from_cbfs(uint8_t index, uintptr_t *data, size_t *len)
+{
+ struct region_device spd_rdev;
- /* Channel 7 */
- mem_cfg->Reserved9[7] = dimm_cfg;
- mem_cfg->MemorySpdPtr14 = spd_data_ptr;
- mem_cfg->MemorySpdPtr15 = 0;
+ printk(BIOS_DEBUG, "SPD INDEX = %u\n", index);
+ if (get_spd_cbfs_rdev(&spd_rdev, index) < 0)
+ die("spd.bin not found or incorrect index\n");
- meminit_dq_dqs_map(mem_cfg, board_cfg, half_populated);
-}
+ /* Memory leak is ok since we have memory mapped boot media */
+ assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED));
-/* Initialize onboard memory configurations for lpddr4x */
-void meminit_lpddr4x_dimm0(FSP_M_CONFIG *mem_cfg,
- const struct mb_lpddr4x_cfg *board_cfg,
- const struct spd_info *spd,
- bool half_populated)
+ *len = region_device_sz(&spd_rdev);
+ *data = (uintptr_t)rdev_mmap_full(&spd_rdev);
+}
+static void read_md_spd(const struct spd_info *info, uintptr_t *data, size_t *len)
{
- size_t spd_data_len;
- uintptr_t spd_data_ptr;
+ if (info->md_spd_loc == SPD_MEMPTR) {
+ *data = info->data_ptr;
+ *len = info->data_len;
+ } else if (info->md_spd_loc == SPD_CBFS) {
+ read_spd_from_cbfs(info->cbfs_index, data, len);
+ } else {
+ die("Not a valid location(%d) for Memory-down SPD!\n", info->md_spd_loc);
+ }
- get_spd_data(spd, &spd_data_ptr, &spd_data_len);
- print_spd_info((unsigned char *)spd_data_ptr);
+ print_spd_info((unsigned char *)data);
+}
+
+void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
+ const struct spd_info *info, bool half_populated)
+
+{
+ size_t spd_len;
+ uintptr_t spd_data;
+ int i;
- mem_cfg->MemorySpdDataLen = spd_data_len;
- meminit_channels_dimm0(mem_cfg, board_cfg, spd_data_ptr,
- half_populated);
+ if (info->topology != MEMORY_DOWN)
+ die("LPDDR4x only support memory-down topology.\n");
- /* LPDDR4 does not allow interleaved memory */
+ /* LPDDR4x does not allow interleaved memory */
mem_cfg->DqPinsInterleaved = 0;
mem_cfg->ECT = board_cfg->ect;
mem_cfg->MrcSafeConfig = 0x1;
+
+ read_md_spd(info, &spd_data, &spd_len);
+ mem_cfg->MemorySpdDataLen = spd_len;
+
+ for (i = 0; i < LPDDR4X_CHANNELS; i++) {
+ if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
+ init_spd_upds_empty(mem_cfg, i);
+ else
+ init_spd_upds_dimm0(mem_cfg, i, spd_data);
+ }
+
+ /*
+ * LPDDR4x memory interface has 2 DQs per channel. Each DQ consists of 8 bits (1
+ * byte). However, FSP UPDs for DQ Map expect a DQ pair (i.e. mapping for 2 bytes) in
+ * each UPD.
+ *
+ * Thus, init_dq_upds() needs to be called for dq pair of each channel.
+ * DqMapCpu2DramCh0 --> dq_map[CHAN=0][0-1]
+ * DqMapCpu2DramCh1 --> dq_map[CHAN=1][0-1]
+ * DqMapCpu2DramCh2 --> dq_map[CHAN=2][0-1]
+ * DqMapCpu2DramCh3 --> dq_map[CHAN=3][0-1]
+ * DqMapCpu2DramCh4 --> dq_map[CHAN=4][0-1]
+ * DqMapCpu2DramCh5 --> dq_map[CHAN=5][0-1]
+ * DqMapCpu2DramCh6 --> dq_map[CHAN=6][0-1]
+ * DqMapCpu2DramCh7 --> dq_map[CHAN=7][0-1]
+ */
+ for (i = 0; i < LPDDR4X_CHANNELS; i++) {
+ if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
+ init_dq_upds_empty(mem_cfg, i);
+ else
+ init_dq_upds(mem_cfg, i, board_cfg->dq_map[i][0],
+ board_cfg->dq_map[i][1]);
+ }
+
+ /*
+ * LPDDR4x memory interface has 2 DQS pairs per channel. FSP UPDs for DQS Map expect a
+ * pair in each UPD.
+ *
+ * Thus, init_dqs_upds() needs to be called for dqs pair of each channel.
+ * DqsMapCpu2DramCh0 --> dqs_map[CHAN=0][0-1]
+ * DqsMapCpu2DramCh1 --> dqs_map[CHAN=1][0-1]
+ * DqsMapCpu2DramCh2 --> dqs_map[CHAN=2][0-1]
+ * DqsMapCpu2DramCh3 --> dqs_map[CHAN=3][0-1]
+ * DqsMapCpu2DramCh4 --> dqs_map[CHAN=4][0-1]
+ * DqsMapCpu2DramCh5 --> dqs_map[CHAN=5][0-1]
+ * DqsMapCpu2DramCh6 --> dqs_map[CHAN=6][0-1]
+ * DqsMapCpu2DramCh7 --> dqs_map[CHAN=7][0-1]
+ */
+ for (i = 0; i < LPDDR4X_CHANNELS; i++) {
+ if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
+ init_dqs_upds_empty(mem_cfg, i);
+ else
+ init_dqs_upds(mem_cfg, i, board_cfg->dqs_map[i][0],
+ board_cfg->dqs_map[i][1]);
+ }
}