Skip to content

Commit

Permalink
core: make OP-TEE relocatable without S-EL2 SPMC
Browse files Browse the repository at this point in the history
Dissociate CFG_CORE_PHYS_RELOCATABLE from CFG_CORE_SEL2_SPMC by making
OP-TEE use [TRSUTED_DRAM_BASE, TRUSTED_DRAM_BASE + TRUSTED_DRAM_SIZE)
as secure memory when SPMC at EL2 is not present.

Accordingly, whole secure DRAM is first registered as TA RAM. To avoid
TA RAM and TEE RAM interfering with each other, we allocate a memory
block that corresponds to the overlapping region b/w TEE RAM and TA
RAM within tee_mm_sec_ddr.

This causes max_allocated to be inaccurate, since there will always be
an entry that represents the overlapped region b/w TEE RAM and TA RAM,
which cannot be used regularly, but contributes to that statistics.

Signed-off-by: Seonghyun Park <seonghp@amazon.com>
  • Loading branch information
seonghp committed Apr 3, 2024
1 parent fc57019 commit da31546
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 15 deletions.
7 changes: 2 additions & 5 deletions core/arch/arm/arm.mk
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ ifeq ($(CFG_CORE_LARGE_PHYS_ADDR),y)
$(call force,CFG_WITH_LPAE,y)
endif

CFG_CORE_PHYS_RELOCATABLE ?= n

# SPMC configuration "S-EL1 SPMC" where SPM Core is implemented at S-EL1,
# that is, OP-TEE.
ifeq ($(CFG_CORE_SEL1_SPMC),y)
Expand Down Expand Up @@ -137,11 +139,6 @@ endif
ifeq ($(CFG_CORE_PHYS_RELOCATABLE)-$(CFG_WITH_PAGER),y-y)
$(error CFG_CORE_PHYS_RELOCATABLE and CFG_WITH_PAGER are not compatible)
endif
ifeq ($(CFG_CORE_PHYS_RELOCATABLE),y)
ifneq ($(CFG_CORE_SEL2_SPMC),y)
$(error CFG_CORE_PHYS_RELOCATABLE depends on CFG_CORE_SEL2_SPMC)
endif
endif

ifeq ($(CFG_CORE_FFA)-$(CFG_WITH_PAGER),y-y)
$(error CFG_CORE_FFA and CFG_WITH_PAGER are not compatible)
Expand Down
3 changes: 3 additions & 0 deletions core/arch/arm/kernel/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -1691,6 +1691,9 @@ void __weak boot_save_args(unsigned long a0, unsigned long a1,
boot_arg_nsec_entry = a4;
#endif
}
if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE))
core_mmu_set_secure_memory(TRUSTED_DRAM_BASE,
TRUSTED_DRAM_SIZE);
}
}

Expand Down
60 changes: 50 additions & 10 deletions core/mm/core_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
* Copyright (c) 2022, Arm Limited and Contributors. All rights reserved.
*/

#include "mm/generic_ram_layout.h"
#include <assert.h>
#include <config.h>
#include <kernel/boot.h>
Expand Down Expand Up @@ -153,9 +154,23 @@ void core_mmu_get_ta_range(paddr_t *base, size_t *size)

assert(secure_only[0].size >
load_offs + TEE_RAM_VA_SIZE + TEE_SDP_TEST_MEM_SIZE);
b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE;
s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE -
TEE_SDP_TEST_MEM_SIZE;
if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
/*
* When CFG_CORE_PHYS_RELOCATABLE is enabled but
* CFG_CORE_SEL2_SPMC is not, we first register the
* whole secure DRAM as TA RAM so that we can use
* the memory below the physical load offset as
* TA RAM. Overlapped region with TEE RAM will be
* invalidated by `core_mmu_init_ta_ram()`.
*/
b = secure_only[0].paddr;
s = secure_only[0].size - TEE_SDP_TEST_MEM_SIZE;
} else {
b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE;
s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE -
TEE_SDP_TEST_MEM_SIZE;
}
} else {
assert(secure_only[1].size > TEE_SDP_TEST_MEM_SIZE);
b = secure_only[1].paddr;
Expand Down Expand Up @@ -1005,17 +1020,23 @@ static size_t collect_mem_ranges(struct tee_mmap_region *memory_map,
size_t num_elems)
{
const struct core_mmu_phys_mem *mem = NULL;
vaddr_t ram_start = secure_only[0].paddr;
vaddr_t tee_ram_start;
size_t last = 0;

if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC))
tee_ram_start = secure_only[0].paddr;
else
tee_ram_start = core_mmu_tee_load_pa;


#define ADD_PHYS_MEM(_type, _addr, _size) \
add_phys_mem(memory_map, num_elems, #_addr, (_type), \
(_addr), (_size), &last)

if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, ram_start,
VCORE_UNPG_RX_PA - ram_start);
ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, tee_ram_start,
VCORE_UNPG_RX_PA - tee_ram_start);
ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
VCORE_UNPG_RX_SZ);
ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
Expand Down Expand Up @@ -1350,13 +1371,19 @@ static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
*/
vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
vaddr_t start_addr = secure_only[0].paddr;
vaddr_t tee_start_addr;
unsigned long offs = 0;
size_t last = 0;

last = collect_mem_ranges(memory_map, num_elems);
assign_mem_granularity(memory_map);

if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC))
tee_start_addr = core_mmu_tee_load_pa;
else
tee_start_addr = secure_only[0].paddr;

/*
* To ease mapping and lower use of xlat tables, sort mapping
* description moving small-page regions after the pgdir regions.
Expand All @@ -1368,7 +1395,7 @@ static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
add_pager_vaspace(memory_map, num_elems, &last);

if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
vaddr_t base_addr = start_addr + seed;
vaddr_t base_addr = tee_start_addr + seed;
const unsigned int va_width = core_mmu_get_va_width();
const vaddr_t va_mask = GENMASK_64(va_width - 1,
SMALL_PAGE_SHIFT);
Expand All @@ -1382,7 +1409,7 @@ static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
if (assign_mem_va(ba, memory_map) &&
mem_map_add_id_map(memory_map, num_elems, &last,
id_map_start, id_map_end)) {
offs = ba - start_addr;
offs = ba - tee_start_addr;
DMSG("Mapping core at %#"PRIxVA" offs %#lx",
ba, offs);
goto out;
Expand All @@ -1393,7 +1420,7 @@ static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
EMSG("Failed to map core with seed %#lx", seed);
}

if (!assign_mem_va(start_addr, memory_map))
if (!assign_mem_va(tee_start_addr, memory_map))
panic();

out:
Expand Down Expand Up @@ -2586,4 +2613,17 @@ void core_mmu_init_ta_ram(void)
tee_mm_final(&tee_mm_sec_ddr);
tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT,
TEE_MM_POOL_NO_FLAGS);

/*
* Allocate an entry that corresponds to the overlapped region
* b/w the TEE RAM and TA RAM regions when CFG_CORE_PHYS_RELOCATABLE
* is enabled, but CFG_CORE_SEL2_SPMC is not.
*
* This entry would increase `tee_mm_sec_ddr.max_allocated` value
* by TEE_RAM_VA_SIZE, making it inaccurate.
*/
if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC))
(void)tee_mm_alloc2(&tee_mm_sec_ddr, core_mmu_tee_load_pa,
TEE_RAM_VA_SIZE);
}

0 comments on commit da31546

Please sign in to comment.