Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RFC] Make OP-TEE physically relocatable when S-EL2 SPMC is not enabled #6712

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
9 changes: 4 additions & 5 deletions core/arch/arm/arm.mk
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,10 @@ ifeq ($(CFG_CORE_LARGE_PHYS_ADDR),y)
$(call force,CFG_WITH_LPAE,y)
endif

# Enable or not support in OP-TEE to relocate itself to allow it to run from a
# physical address that differs from the link address
CFG_CORE_PHYS_RELOCATABLE ?= n
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you move inline description from line 118/119 here, 1st words rephrased?

# Enable or not support in OP-TEE to relocate itself to allow it to run from a
# physical address that differs from the link address

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks! applied


# SPMC configuration "S-EL1 SPMC" where SPM Core is implemented at S-EL1,
# that is, OP-TEE.
ifeq ($(CFG_CORE_SEL1_SPMC),y)
Expand Down Expand Up @@ -137,11 +141,6 @@ endif
ifeq ($(CFG_CORE_PHYS_RELOCATABLE)-$(CFG_WITH_PAGER),y-y)
$(error CFG_CORE_PHYS_RELOCATABLE and CFG_WITH_PAGER are not compatible)
endif
ifeq ($(CFG_CORE_PHYS_RELOCATABLE),y)
ifneq ($(CFG_CORE_SEL2_SPMC),y)
$(error CFG_CORE_PHYS_RELOCATABLE depends on CFG_CORE_SEL2_SPMC)
endif
endif

ifeq ($(CFG_CORE_FFA)-$(CFG_WITH_PAGER),y-y)
$(error CFG_CORE_FFA and CFG_WITH_PAGER are not compatible)
Expand Down
3 changes: 3 additions & 0 deletions core/arch/arm/kernel/boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -1691,6 +1691,9 @@ void __weak boot_save_args(unsigned long a0, unsigned long a1,
boot_arg_nsec_entry = a4;
#endif
}
if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To be complete we should handle the case with CFG_CORE_FFA=y and !IS_ENABLED(CFG_CORE_SEL2_SPMC) && IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) above.

core_mmu_set_secure_memory(TRUSTED_DRAM_BASE,
TRUSTED_DRAM_SIZE);
}
}

Expand Down
6 changes: 3 additions & 3 deletions core/arch/arm/kernel/secure_partition.c
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,7 @@ static TEE_Result load_binary_sp(struct ts_session *s,
bin_page_count = bin_size_rounded / SMALL_PAGE_SIZE;

/* Allocate memory */
mm = tee_mm_alloc(&tee_mm_sec_ddr, bin_size_rounded);
mm = tee_mm_alloc_sec_mem(bin_size_rounded);
if (!mm) {
res = TEE_ERROR_OUT_OF_MEMORY;
goto err;
Expand Down Expand Up @@ -901,7 +901,7 @@ static TEE_Result handle_fdt_load_relative_mem_regions(struct sp_ctx *ctx,
struct mobj *m = NULL;
unsigned int idx = 0;

mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
mm = tee_mm_alloc_sec_mem(size);
if (!mm)
return TEE_ERROR_OUT_OF_MEMORY;

Expand Down Expand Up @@ -1225,7 +1225,7 @@ static TEE_Result handle_fdt_mem_regions(struct sp_ctx *ctx, void *fdt)

if (alloc_needed) {
/* Base address is missing, we have to allocate */
mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
mm = tee_mm_alloc_sec_mem(size);
if (!mm)
return TEE_ERROR_OUT_OF_MEMORY;

Expand Down
6 changes: 6 additions & 0 deletions core/include/mm/tee_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@ typedef struct _tee_mm_pool_t tee_mm_pool_t;
/* Physical Secure DDR pool */
extern tee_mm_pool_t tee_mm_sec_ddr;

/* Extra physical Secure DDR pool */
extern tee_mm_pool_t tee_mm_sec_ddr_extra;

/* Virtual eSRAM pool */
extern tee_mm_pool_t tee_mm_vcore;

Expand Down Expand Up @@ -84,6 +87,9 @@ void tee_mm_final(tee_mm_pool_t *pool);
*/
tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size);

/* Allocate memory from secure memory */
tee_mm_entry_t *tee_mm_alloc_sec_mem(size_t size);

/* Allocate supplied memory range if it's free */
tee_mm_entry_t *tee_mm_alloc2(tee_mm_pool_t *pool, paddr_t base, size_t size);

Expand Down
2 changes: 1 addition & 1 deletion core/kernel/ree_fs_ta.c
Original file line number Diff line number Diff line change
Expand Up @@ -719,7 +719,7 @@ static TEE_Result buf_ta_open(const TEE_UUID *uuid,
if (res)
goto err;

handle->mm = tee_mm_alloc(&tee_mm_sec_ddr, handle->ta_size);
handle->mm = tee_mm_alloc_sec_mem(handle->ta_size);
if (!handle->mm) {
res = TEE_ERROR_OUT_OF_MEMORY;
goto err;
Expand Down
118 changes: 107 additions & 11 deletions core/mm/core_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,23 @@ void core_mmu_get_ta_range(paddr_t *base, size_t *size)

assert(secure_only[0].size >
load_offs + TEE_RAM_VA_SIZE + TEE_SDP_TEST_MEM_SIZE);
b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE;
s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE -
TEE_SDP_TEST_MEM_SIZE;
if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need special treatment for CFG_CORE_SEL2_SPMC? Is it an optimization? If so please remove it since we can afford that extra overhead to keep the code easier to manage.

This comment applies to the entire patch.

/*
* When CFG_CORE_PHYS_RELOCATABLE is enabled but
* CFG_CORE_SEL2_SPMC is not, we first register the
* whole secure DRAM as TA RAM so that we can use
* the memory below the physical load offset as
* TA RAM. Overlapped region with TEE RAM will be
* invalidated by `core_mmu_init_ta_ram()`.
*/
b = secure_only[0].paddr;
s = secure_only[0].size - TEE_SDP_TEST_MEM_SIZE;
} else {
b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE;
s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE -
TEE_SDP_TEST_MEM_SIZE;
}
} else {
assert(secure_only[1].size > TEE_SDP_TEST_MEM_SIZE);
b = secure_only[1].paddr;
Expand Down Expand Up @@ -1005,17 +1019,23 @@ static size_t collect_mem_ranges(struct tee_mmap_region *memory_map,
size_t num_elems)
{
const struct core_mmu_phys_mem *mem = NULL;
vaddr_t ram_start = secure_only[0].paddr;
vaddr_t tee_ram_start = 0;
size_t last = 0;

if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC))
tee_ram_start = core_mmu_tee_load_pa;
else
tee_ram_start = secure_only[0].paddr;


#define ADD_PHYS_MEM(_type, _addr, _size) \
add_phys_mem(memory_map, num_elems, #_addr, (_type), \
(_addr), (_size), &last)

if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, ram_start,
VCORE_UNPG_RX_PA - ram_start);
ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, tee_ram_start,
VCORE_UNPG_RX_PA - tee_ram_start);
ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA,
VCORE_UNPG_RX_SZ);
ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA,
Expand Down Expand Up @@ -1053,7 +1073,31 @@ static size_t collect_mem_ranges(struct tee_mmap_region *memory_map,
size_t ta_size = 0;

core_mmu_get_ta_range(&ta_base, &ta_size);
ADD_PHYS_MEM(MEM_AREA_TA_RAM, ta_base, ta_size);

/*
* When CFG_CORE_PHYS_RELOCATABLE, TEE can be loaded in the
* middle of the secure DRAM, rather than the secure DRAM base.
* As a result, there may be up to two remaining memory regions
* (i.e., one in the front and one in the back), which can be
* used for TA RAM.
*
* When the TEE is loaded at the secure DRAM base or at the top,
* then either one of the below `ADD_PHYS_MEM()` can result in
* an empty memory region.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It doesn't make sense to add empty regions. Please add a check against that.

*/
if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
ADD_PHYS_MEM(MEM_AREA_TA_RAM, ta_base,
core_mmu_tee_load_pa - ta_base);

ADD_PHYS_MEM(MEM_AREA_TA_RAM,
core_mmu_tee_load_pa + TEE_RAM_VA_SIZE,
ta_size -
(core_mmu_tee_load_pa - ta_base) -
TEE_RAM_VA_SIZE);
} else {
ADD_PHYS_MEM(MEM_AREA_TA_RAM, ta_base, ta_size);
}
}

if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) &&
Expand Down Expand Up @@ -1350,13 +1394,19 @@ static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
*/
vaddr_t id_map_start = (vaddr_t)__identity_map_init_start;
vaddr_t id_map_end = (vaddr_t)__identity_map_init_end;
vaddr_t start_addr = secure_only[0].paddr;
vaddr_t tee_start_addr = 0;
unsigned long offs = 0;
size_t last = 0;

last = collect_mem_ranges(memory_map, num_elems);
assign_mem_granularity(memory_map);

if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC))
tee_start_addr = core_mmu_tee_load_pa;
else
tee_start_addr = secure_only[0].paddr;

/*
* To ease mapping and lower use of xlat tables, sort mapping
* description moving small-page regions after the pgdir regions.
Expand All @@ -1368,7 +1418,7 @@ static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
add_pager_vaspace(memory_map, num_elems, &last);

if (IS_ENABLED(CFG_CORE_ASLR) && seed) {
vaddr_t base_addr = start_addr + seed;
vaddr_t base_addr = tee_start_addr + seed;
const unsigned int va_width = core_mmu_get_va_width();
const vaddr_t va_mask = GENMASK_64(va_width - 1,
SMALL_PAGE_SHIFT);
Expand All @@ -1382,7 +1432,7 @@ static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
if (assign_mem_va(ba, memory_map) &&
mem_map_add_id_map(memory_map, num_elems, &last,
id_map_start, id_map_end)) {
offs = ba - start_addr;
offs = ba - tee_start_addr;
DMSG("Mapping core at %#"PRIxVA" offs %#lx",
ba, offs);
goto out;
Expand All @@ -1393,7 +1443,7 @@ static unsigned long init_mem_map(struct tee_mmap_region *memory_map,
EMSG("Failed to map core with seed %#lx", seed);
}

if (!assign_mem_va(start_addr, memory_map))
if (!assign_mem_va(tee_start_addr, memory_map))
panic();

out:
Expand Down Expand Up @@ -2552,6 +2602,27 @@ static TEE_Result teecore_init_pub_ram(void)
early_init(teecore_init_pub_ram);
#endif /*CFG_CORE_RESERVED_SHM*/

static bool get_extra_ta_ram(vaddr_t *s, vaddr_t *e)
{
struct tee_mmap_region *map = NULL;
struct tee_mmap_region *lower_ta_ram = NULL;

for (map = get_memory_map(); !core_mmap_is_end_of_table(map); map++) {
if (map->type == MEM_AREA_TA_RAM) {
if (!lower_ta_ram) {
lower_ta_ram = map;
continue;
} else {
*s = map->va;
*e = map->va + map->size;
break;
}
}
}

return lower_ta_ram != map && map->type == MEM_AREA_TA_RAM;
}

void core_mmu_init_ta_ram(void)
{
vaddr_t s = 0;
Expand Down Expand Up @@ -2586,4 +2657,29 @@ void core_mmu_init_ta_ram(void)
tee_mm_final(&tee_mm_sec_ddr);
tee_mm_init(&tee_mm_sec_ddr, ps, size, CORE_MMU_USER_CODE_SHIFT,
TEE_MM_POOL_NO_FLAGS);

if (IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE) &&
!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
if (!tee_mm_is_empty(&tee_mm_sec_ddr_extra))
panic("Extra TA RAM pool is not empty");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This "can't happen", an assert() should be enough.


if (!get_extra_ta_ram(&s, &e))
return;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should make sure that we can't allocate anything from tee_mm_sec_ddr_extra if this happens.


ps = virt_to_phys((void *)s);
size = e - s;

if (!ps || (ps & CORE_MMU_USER_CODE_MASK) ||
!size || (size & CORE_MMU_USER_CODE_MASK))
panic("invalid Extra TA RAM");

/* extra check: we could rely on core_mmu_get_mem_by_type() */
if (!tee_pbuf_is_sec(ps, size))
panic("Extra TA RAM is not secure");

/* remove previous config and init Extra TA ddr memory pool */
tee_mm_final(&tee_mm_sec_ddr_extra);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

tee_mm_final() isn't needed since the pool is empty.

tee_mm_init(&tee_mm_sec_ddr_extra, ps, size,
CORE_MMU_USER_CODE_SHIFT, TEE_MM_POOL_NO_FLAGS);
}
}
2 changes: 1 addition & 1 deletion core/mm/fobj.c
Original file line number Diff line number Diff line change
Expand Up @@ -759,7 +759,7 @@ struct fobj *fobj_sec_mem_alloc(unsigned int num_pages)
if (MUL_OVERFLOW(num_pages, SMALL_PAGE_SIZE, &size))
goto err;

f->mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
f->mm = tee_mm_alloc_sec_mem(size);
if (!f->mm)
goto err;

Expand Down
10 changes: 10 additions & 0 deletions core/mm/mobj.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include <util.h>

struct mobj *mobj_sec_ddr;
struct mobj *mobj_sec_ddr_extra;
struct mobj *mobj_tee_ram_rx;
struct mobj *mobj_tee_ram_rw;

Expand Down Expand Up @@ -505,6 +506,15 @@ static TEE_Result mobj_init(void)
if (!mobj_sec_ddr)
panic("Failed to register secure ta ram");

if (tee_mm_sec_ddr_extra.lo) {
mobj_sec_ddr_extra = mobj_phys_alloc(tee_mm_sec_ddr_extra.lo,
tee_mm_sec_ddr_extra.size,
TEE_MATTR_MEM_TYPE_CACHED,
CORE_MEM_TA_RAM);
if (!mobj_sec_ddr_extra)
panic("Failed to register extra TA ram");
}

if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) {
mobj_tee_ram_rx = mobj_phys_init(0,
VCORE_UNPG_RX_SZ,
Expand Down
14 changes: 14 additions & 0 deletions core/mm/tee_mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,17 @@ tee_mm_entry_t *tee_mm_alloc(tee_mm_pool_t *pool, size_t size)
return NULL;
}

tee_mm_entry_t* tee_mm_alloc_sec_mem(size_t size)
{
tee_mm_entry_t *mm = NULL;

mm = tee_mm_alloc(&tee_mm_sec_ddr, size);
if (!mm)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd prefer the more direct logic:

if (mm)
     return mm;

return tee_mm_alloc(&tee_mm_sec_ddr_extra, size);

return tee_mm_alloc(&tee_mm_sec_ddr_extra, size);

return mm;
}

static inline bool fit_in_gap(tee_mm_pool_t *pool, tee_mm_entry_t *e,
paddr_t offslo, paddr_t offshi)
{
Expand Down Expand Up @@ -357,6 +368,9 @@ bool tee_mm_is_empty(tee_mm_pool_t *pool)
/* Physical Secure DDR pool */
tee_mm_pool_t tee_mm_sec_ddr;

/* Extra physical Secure DDR pool */
tee_mm_pool_t tee_mm_sec_ddr_extra;

/* Virtual eSRAM pool */
tee_mm_pool_t tee_mm_vcore;

Expand Down