diff options
author | Philipp Hug <philipp@hug.cx> | 2019-02-06 06:48:51 +0100 |
---|---|---|
committer | ron minnich <rminnich@gmail.com> | 2019-02-13 04:49:14 +0000 |
commit | b09e5001f3071e82ccf7ec64c9cf9a4768d660b1 (patch) | |
tree | f1a673efd5da4bcd24886b7c049335f52b8c1c75 /src/arch/riscv | |
parent | 540a66404591ef41e2581df01647e5788ef0c808 (diff) |
riscv: Add initial support for 32bit boards
* Adding separate targets for 32bit and 64bit qemu
* Using the riscv64 toolchain for 32bit builds requires setting -m elf32lriscv
* rv32/rv64 is currently configured with ARCH_RISCV_RV32/RV64 and not per stage.
This should probably be changed later.
TEST=Boots to "Payload not loaded." on 32bit qemu using the following commands:
util/riscv/make-spike-elf.sh build/coreboot.rom build/coreboot.elf
qemu-system-riscv32 -M virt -m 1024M -nographic -kernel build/coreboot.elf
Change-Id: I35e59b459d1770df10b51fe9e77dcc474d7c75a0
Signed-off-by: Philipp Hug <philipp@hug.cx>
Reviewed-on: https://review.coreboot.org/c/31253
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: ron minnich <rminnich@gmail.com>
Diffstat (limited to 'src/arch/riscv')
-rw-r--r-- | src/arch/riscv/Makefile.inc | 12 | ||||
-rw-r--r-- | src/arch/riscv/bootblock.S | 3 | ||||
-rw-r--r-- | src/arch/riscv/include/arch/smp/spinlock.h | 2 | ||||
-rw-r--r-- | src/arch/riscv/include/bits.h | 17 | ||||
-rw-r--r-- | src/arch/riscv/include/mcall.h | 11 | ||||
-rw-r--r-- | src/arch/riscv/ramstage.S | 3 | ||||
-rw-r--r-- | src/arch/riscv/smp.c | 14 |
7 files changed, 46 insertions, 16 deletions
diff --git a/src/arch/riscv/Makefile.inc b/src/arch/riscv/Makefile.inc index 9d91f0cefe..d5f62954eb 100644 --- a/src/arch/riscv/Makefile.inc +++ b/src/arch/riscv/Makefile.inc @@ -86,6 +86,10 @@ $(objcbfs)/bootblock.debug: $$(bootblock-objs) bootblock-c-ccopts += $(riscv_flags) bootblock-S-ccopts += $(riscv_asm_flags) +ifeq ($(CONFIG_ARCH_RISCV_RV32),y) +LDFLAGS_bootblock += -m elf32lriscv +endif #CONFIG_ARCH_RISCV_RV32 + endif #CONFIG_ARCH_BOOTBLOCK_RISCV ################################################################################ @@ -116,6 +120,10 @@ $(objcbfs)/romstage.debug: $$(romstage-objs) romstage-c-ccopts += $(riscv_flags) romstage-S-ccopts += $(riscv_asm_flags) +ifeq ($(CONFIG_ARCH_RISCV_RV32),y) +LDFLAGS_romstage += -m elf32lriscv +endif #CONFIG_ARCH_RISCV_RV32 + endif #CONFIG_ARCH_ROMSTAGE_RISCV ################################################################################ @@ -161,5 +169,9 @@ $(objcbfs)/ramstage.debug: $$(ramstage-objs) ramstage-c-ccopts += $(riscv_flags) ramstage-S-ccopts += $(riscv_asm_flags) +ifeq ($(CONFIG_ARCH_RISCV_RV32),y) +LDFLAGS_ramstage += -m elf32lriscv +endif #CONFIG_ARCH_RISCV_RV32 + endif #CONFIG_ARCH_RAMSTAGE_RISCV endif #CONFIG_ARCH_RISCV diff --git a/src/arch/riscv/bootblock.S b/src/arch/riscv/bootblock.S index d4b8be7c2a..b0796f9fbc 100644 --- a/src/arch/riscv/bootblock.S +++ b/src/arch/riscv/bootblock.S @@ -16,6 +16,7 @@ */ #include <arch/encoding.h> +#include <bits.h> #include <mcall.h> .section ".text._start", "ax", %progbits @@ -44,7 +45,7 @@ _start: slli t1, a0, RISCV_PGSHIFT add t0, t0, t1 li t1, 0xDEADBEEF - sd t1, 0(t0) + STORE t1, 0(t0) li t1, RISCV_PGSIZE - HLS_SIZE add sp, t0, t1 diff --git a/src/arch/riscv/include/arch/smp/spinlock.h b/src/arch/riscv/include/arch/smp/spinlock.h index dc561d30a1..95e60bfefc 100644 --- a/src/arch/riscv/include/arch/smp/spinlock.h +++ b/src/arch/riscv/include/arch/smp/spinlock.h @@ -21,7 +21,7 @@ #define barrier() { asm volatile ("fence" ::: "memory"); } typedef struct { - volatile atomic_t lock; + atomic_t lock; } spinlock_t; static inline void spinlock_lock(spinlock_t *lock) diff --git a/src/arch/riscv/include/bits.h b/src/arch/riscv/include/bits.h index f69c7ec0da..d824f3ec98 100644 --- a/src/arch/riscv/include/bits.h +++ b/src/arch/riscv/include/bits.h @@ -47,10 +47,19 @@ #define STR(x) XSTR(x) #define XSTR(x) #x -# define SLL32 sllw -# define STORE sd -# define LOAD ld -# define LOG_REGBYTES 3 +#if __riscv_xlen == 64 +#define SLL32 sllw +#define STORE sd +#define LOAD ld +#define LWU lwu +#define LOG_REGBYTES 3 +#else +#define SLL32 sll +#define STORE sw +#define LOAD lw +#define LWU lw +#define LOG_REGBYTES 2 +#endif #define REGBYTES (1 << LOG_REGBYTES) diff --git a/src/arch/riscv/include/mcall.h b/src/arch/riscv/include/mcall.h index cd1ed6dc18..192d2b4564 100644 --- a/src/arch/riscv/include/mcall.h +++ b/src/arch/riscv/include/mcall.h @@ -18,7 +18,13 @@ // NOTE: this is the size of hls_t below. A static_assert would be // nice to have. +#if __riscv_xlen == 64 #define HLS_SIZE 88 +#endif + +#if __riscv_xlen == 32 +#define HLS_SIZE 52 +#endif /* We save 37 registers, currently. */ #define MENTRY_FRAME_SIZE (HLS_SIZE + 37 * 8) @@ -26,6 +32,7 @@ #ifndef __ASSEMBLER__ #include <arch/encoding.h> +#include <arch/smp/atomic.h> #include <stdint.h> typedef struct { @@ -38,8 +45,8 @@ typedef struct { struct blocker { void *arg; void (*fn)(void *arg); - uint32_t sync_a; - uint32_t sync_b; + atomic_t sync_a; + atomic_t sync_b; }; typedef struct { diff --git a/src/arch/riscv/ramstage.S b/src/arch/riscv/ramstage.S index 115a55f290..28183e50e0 100644 --- a/src/arch/riscv/ramstage.S +++ b/src/arch/riscv/ramstage.S @@ -14,6 +14,7 @@ */ #include <arch/encoding.h> +#include <bits.h> #include <mcall.h> .section ".text._start", "ax", %progbits @@ -27,7 +28,7 @@ _start: slli t1, a0, RISCV_PGSHIFT add t0, t0, t1 li t1, 0xDEADBEEF - sd t1, 0(t0) + STORE t1, 0(t0) li t1, RISCV_PGSIZE - HLS_SIZE add sp, t0, t1 diff --git a/src/arch/riscv/smp.c b/src/arch/riscv/smp.c index 8942ec5b93..b32e4b8694 100644 --- a/src/arch/riscv/smp.c +++ b/src/arch/riscv/smp.c @@ -32,13 +32,13 @@ void smp_pause(int working_hartid) /* waiting for work hart */ do { barrier(); - } while (SYNCA != 0x01234567); + } while (atomic_read(&SYNCA) != 0x01234567); clear_csr(mstatus, MSTATUS_MIE); write_csr(mie, MIP_MSIP); /* count how many cores enter the halt */ - __sync_fetch_and_add(&SYNCB, 1); + atomic_add(&SYNCB, 1); do { barrier(); @@ -49,17 +49,17 @@ void smp_pause(int working_hartid) } else { /* Initialize the counter and * mark the work hart into smp_pause */ - SYNCB = 0; - SYNCA = 0x01234567; + atomic_set(&SYNCB, 0); + atomic_set(&SYNCA, 0x01234567); /* waiting for other Hart to enter the halt */ do { barrier(); - } while (SYNCB + 1 < CONFIG_MAX_CPUS); + } while (atomic_read(&SYNCB) + 1 < CONFIG_MAX_CPUS); /* initialize for the next call */ - SYNCA = 0; - SYNCB = 0; + atomic_set(&SYNCA, 0); + atomic_set(&SYNCB, 0); } #undef SYNCA #undef SYNCB |