forked from keystone-enclave/sm
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathenclave.c
681 lines (552 loc) · 18.8 KB
/
enclave.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
//******************************************************************************
// Copyright (c) 2018, The Regents of the University of California (Regents).
// All Rights Reserved. See LICENSE for license details.
//------------------------------------------------------------------------------
#include "enclave.h"
#include "mprv.h"
#include "pmp.h"
#include "page.h"
#include "cpu.h"
#include "platform-hook.h"
#include <sbi/sbi_string.h>
#include <sbi/riscv_asm.h>
#include <sbi/riscv_locks.h>
#include <sbi/sbi_console.h>
#define ENCL_MAX 16
struct enclave enclaves[ENCL_MAX];
#define ENCLAVE_EXISTS(eid) (eid >= 0 && eid < ENCL_MAX && enclaves[eid].state >= 0)
static spinlock_t encl_lock = SPIN_LOCK_INITIALIZER;
extern void save_host_regs(void);
extern void restore_host_regs(void);
extern byte dev_public_key[PUBLIC_KEY_SIZE];
/****************************
*
* Enclave utility functions
* Internal use by SBI calls
*
****************************/
/* Internal function containing the core of the context switching
* code to the enclave.
*
* Used by resume_enclave and run_enclave.
*
* Expects that eid has already been valided, and it is OK to run this enclave
*/
static inline void context_switch_to_enclave(struct sbi_trap_regs* regs,
enclave_id eid,
int load_parameters){
/* save host context */
swap_prev_state(&enclaves[eid].threads[0], regs, 1);
swap_prev_mepc(&enclaves[eid].threads[0], regs, regs->mepc);
swap_prev_mstatus(&enclaves[eid].threads[0], regs, regs->mstatus);
enclaves[eid].threads[0].prev_mideleg = csr_read(mideleg);
uintptr_t interrupts = 0;
csr_write(mideleg, interrupts);
if(load_parameters) {
// passing parameters for a first run
csr_write(sepc, (uintptr_t) enclaves[eid].params.user_entry);
regs->mepc = (uintptr_t) enclaves[eid].params.runtime_entry - 4; // regs->mepc will be +4 before sbi_ecall_handler return
regs->mstatus = (1 << MSTATUS_MPP_SHIFT);
// $a1: (PA) DRAM base,
regs->a1 = (uintptr_t) enclaves[eid].pa_params.dram_base;
// $a2: (PA) DRAM size,
regs->a2 = (uintptr_t) enclaves[eid].pa_params.dram_size;
// $a3: (PA) kernel location,
regs->a3 = (uintptr_t) enclaves[eid].pa_params.runtime_base;
// $a4: (PA) user location,
regs->a4 = (uintptr_t) enclaves[eid].pa_params.user_base;
// $a5: (PA) freemem location,
regs->a5 = (uintptr_t) enclaves[eid].pa_params.free_base;
// $a6: (VA) utm base,
regs->a6 = (uintptr_t) enclaves[eid].params.untrusted_ptr;
// $a7: (size_t) utm size
regs->a7 = (uintptr_t) enclaves[eid].params.untrusted_size;
// switch to the initial enclave page table
csr_write(satp, enclaves[eid].encl_satp);
}
switch_vector_enclave();
// set PMP
//osm_pmp_set(PMP_NO_PERM);
int memid;
for(memid=0; memid < ENCLAVE_REGIONS_MAX; memid++) {
if(enclaves[eid].regions[memid].type != REGION_INVALID) {
pmp_set_keystone(enclaves[eid].regions[memid].pmp_rid, PMP_ALL_PERM);
}
}
// Setup any platform specific defenses
platform_switch_to_enclave(&(enclaves[eid]));
cpu_enter_enclave_context(eid);
}
static inline void context_switch_to_host(struct sbi_trap_regs *regs,
enclave_id eid,
int return_on_resume){
// set PMP
int memid;
for(memid=0; memid < ENCLAVE_REGIONS_MAX; memid++) {
if(enclaves[eid].regions[memid].type != REGION_INVALID) {
pmp_set_keystone(enclaves[eid].regions[memid].pmp_rid, PMP_NO_PERM);
}
}
osm_pmp_set(PMP_ALL_PERM);
/* restore host context */
csr_write(mideleg, enclaves[eid].threads[0].prev_mideleg);
swap_prev_state(&enclaves[eid].threads[0], regs, return_on_resume);
swap_prev_mepc(&enclaves[eid].threads[0], regs, regs->mepc);
swap_prev_mstatus(&enclaves[eid].threads[0], regs, regs->mstatus);
switch_vector_host();
uintptr_t pending = csr_read(mip);
if (pending & MIP_MTIP) {
csr_clear(mip, MIP_MTIP);
csr_set(mip, MIP_STIP);
}
if (pending & MIP_MSIP) {
csr_clear(mip, MIP_MSIP);
csr_set(mip, MIP_SSIP);
}
if (pending & MIP_MEIP) {
csr_clear(mip, MIP_MEIP);
csr_set(mip, MIP_SEIP);
}
// Reconfigure platform specific defenses
platform_switch_from_enclave(&(enclaves[eid]));
cpu_exit_enclave_context();
return;
}
// TODO: This function is externally used.
// refactoring needed
/*
* Init all metadata as needed for keeping track of enclaves
* Called once by the SM on startup
*/
void enclave_init_metadata(){
enclave_id eid;
int i=0;
/* Assumes eids are incrementing values, which they are for now */
for(eid=0; eid < ENCL_MAX; eid++){
enclaves[eid].state = INVALID;
// Clear out regions
for(i=0; i < ENCLAVE_REGIONS_MAX; i++){
enclaves[eid].regions[i].type = REGION_INVALID;
}
/* Fire all platform specific init for each enclave */
platform_init_enclave(&(enclaves[eid]));
}
}
static unsigned long clean_enclave_memory(uintptr_t utbase, uintptr_t utsize)
{
// This function is quite temporary. See issue #38
// Zero out the untrusted memory region, since it may be in
// indeterminate state.
sbi_memset((void*)utbase, 0, utsize);
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
static unsigned long encl_alloc_eid(enclave_id* _eid)
{
enclave_id eid;
spin_lock(&encl_lock);
for(eid=0; eid<ENCL_MAX; eid++)
{
if(enclaves[eid].state == INVALID){
break;
}
}
if(eid != ENCL_MAX)
enclaves[eid].state = ALLOCATED;
spin_unlock(&encl_lock);
if(eid != ENCL_MAX){
*_eid = eid;
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
else{
return SBI_ERR_SM_ENCLAVE_NO_FREE_RESOURCE;
}
}
static unsigned long encl_free_eid(enclave_id eid)
{
spin_lock(&encl_lock);
enclaves[eid].state = INVALID;
spin_unlock(&encl_lock);
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
int get_enclave_region_index(enclave_id eid, enum enclave_region_type type){
size_t i;
for(i = 0;i < ENCLAVE_REGIONS_MAX; i++){
if(enclaves[eid].regions[i].type == type){
return i;
}
}
// No such region for this enclave
return -1;
}
uintptr_t get_enclave_region_size(enclave_id eid, int memid)
{
if (0 <= memid && memid < ENCLAVE_REGIONS_MAX)
return pmp_region_get_size(enclaves[eid].regions[memid].pmp_rid);
return 0;
}
uintptr_t get_enclave_region_base(enclave_id eid, int memid)
{
if (0 <= memid && memid < ENCLAVE_REGIONS_MAX)
return pmp_region_get_addr(enclaves[eid].regions[memid].pmp_rid);
return 0;
}
// TODO: This function is externally used by sm-sbi.c.
// Change it to be internal (remove from the enclave.h and make static)
/* Internal function enforcing a copy source is from the untrusted world.
* Does NOT do verification of dest, assumes caller knows what that is.
* Dest should be inside the SM memory.
*/
unsigned long copy_enclave_create_args(uintptr_t src, struct keystone_sbi_create* dest){
int region_overlap = copy_to_sm(dest, src, sizeof(struct keystone_sbi_create));
if (region_overlap)
return SBI_ERR_SM_ENCLAVE_REGION_OVERLAPS;
else
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
/* copies data from enclave, source must be inside EPM */
static unsigned long copy_enclave_data(struct enclave* enclave,
void* dest, uintptr_t source, size_t size) {
int illegal = copy_to_sm(dest, source, size);
if(illegal)
return SBI_ERR_SM_ENCLAVE_ILLEGAL_ARGUMENT;
else
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
/* copies data into enclave, destination must be inside EPM */
static unsigned long copy_enclave_report(struct enclave* enclave,
uintptr_t dest, struct report* source) {
int illegal = copy_from_sm(dest, source, sizeof(struct report));
if(illegal)
return SBI_ERR_SM_ENCLAVE_ILLEGAL_ARGUMENT;
else
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
static int is_create_args_valid(struct keystone_sbi_create* args)
{
uintptr_t epm_start, epm_end;
/* printm("[create args info]: \r\n\tepm_addr: %llx\r\n\tepmsize: %llx\r\n\tutm_addr: %llx\r\n\tutmsize: %llx\r\n\truntime_addr: %llx\r\n\tuser_addr: %llx\r\n\tfree_addr: %llx\r\n", */
/* args->epm_region.paddr, */
/* args->epm_region.size, */
/* args->utm_region.paddr, */
/* args->utm_region.size, */
/* args->runtime_paddr, */
/* args->user_paddr, */
/* args->free_paddr); */
// check if physical addresses are valid
if (args->epm_region.size <= 0)
return 0;
// check if overflow
if (args->epm_region.paddr >=
args->epm_region.paddr + args->epm_region.size)
return 0;
if (args->utm_region.paddr >=
args->utm_region.paddr + args->utm_region.size)
return 0;
epm_start = args->epm_region.paddr;
epm_end = args->epm_region.paddr + args->epm_region.size;
// check if physical addresses are in the range
if (args->runtime_paddr < epm_start ||
args->runtime_paddr >= epm_end)
return 0;
if (args->user_paddr < epm_start ||
args->user_paddr >= epm_end)
return 0;
if (args->free_paddr < epm_start ||
args->free_paddr > epm_end)
// note: free_paddr == epm_end if there's no free memory
return 0;
// check the order of physical addresses
if (args->runtime_paddr > args->user_paddr)
return 0;
if (args->user_paddr > args->free_paddr)
return 0;
return 1;
}
/*********************************
*
* Enclave SBI functions
* These are exposed to S-mode via the sm-sbi interface
*
*********************************/
/* This handles creation of a new enclave, based on arguments provided
* by the untrusted host.
*
* This may fail if: it cannot allocate PMP regions, EIDs, etc
*/
unsigned long create_enclave(unsigned long *eidptr, struct keystone_sbi_create create_args)
{
/* EPM and UTM parameters */
uintptr_t base = create_args.epm_region.paddr;
size_t size = create_args.epm_region.size;
uintptr_t utbase = create_args.utm_region.paddr;
size_t utsize = create_args.utm_region.size;
enclave_id eid;
unsigned long ret;
int region, shared_region;
/* Runtime parameters */
if(!is_create_args_valid(&create_args))
return SBI_ERR_SM_ENCLAVE_ILLEGAL_ARGUMENT;
/* set va params */
struct runtime_va_params_t params = create_args.params;
struct runtime_pa_params pa_params;
pa_params.dram_base = base;
pa_params.dram_size = size;
pa_params.runtime_base = create_args.runtime_paddr;
pa_params.user_base = create_args.user_paddr;
pa_params.free_base = create_args.free_paddr;
// allocate eid
ret = SBI_ERR_SM_ENCLAVE_NO_FREE_RESOURCE;
if (encl_alloc_eid(&eid) != SBI_ERR_SM_ENCLAVE_SUCCESS)
goto error;
// create a PMP region bound to the enclave
ret = SBI_ERR_SM_ENCLAVE_PMP_FAILURE;
if(pmp_region_init_atomic(base, size, PMP_PRI_ANY, ®ion, 0))
goto free_encl_idx;
// create PMP region for shared memory
if(pmp_region_init_atomic(0, -1UL, PMP_PRI_BOTTOM, &shared_region, 1))
goto free_region;
// set pmp registers for private region (not shared)
if(pmp_set_global(region, PMP_NO_PERM))
goto free_shared_region;
// cleanup some memory regions for sanity See issue #38
clean_enclave_memory(utbase, utsize);
// initialize enclave metadata
enclaves[eid].eid = eid;
enclaves[eid].regions[0].pmp_rid = region;
enclaves[eid].regions[0].type = REGION_EPM;
enclaves[eid].regions[1].pmp_rid = shared_region;
enclaves[eid].regions[1].type = REGION_UTM;
#if __riscv_xlen == 32
enclaves[eid].encl_satp = ((base >> RISCV_PGSHIFT) | (SATP_MODE_SV32 << HGATP_MODE_SHIFT));
#else
enclaves[eid].encl_satp = ((base >> RISCV_PGSHIFT) | (SATP_MODE_SV39 << HGATP_MODE_SHIFT));
#endif
enclaves[eid].n_thread = 0;
enclaves[eid].params = params;
enclaves[eid].pa_params = pa_params;
/* Init enclave state (regs etc) */
clean_state(&enclaves[eid].threads[0]);
/* Platform create happens as the last thing before hashing/etc since
it may modify the enclave struct */
ret = platform_create_enclave(&enclaves[eid]);
if (ret)
goto unset_region;
/* Validate memory, prepare hash and signature for attestation */
spin_lock(&encl_lock); // FIXME This should error for second enter.
ret = validate_and_hash_enclave(&enclaves[eid]);
/* The enclave is fresh if it has been validated and hashed but not run yet. */
if (ret)
goto unlock;
enclaves[eid].state = FRESH;
/* EIDs are unsigned int in size, copy via simple copy */
*eidptr = eid;
spin_unlock(&encl_lock);
return SBI_ERR_SM_ENCLAVE_SUCCESS;
unlock:
spin_unlock(&encl_lock);
// free_platform:
platform_destroy_enclave(&enclaves[eid]);
unset_region:
pmp_unset_global(region);
free_shared_region:
pmp_region_free_atomic(shared_region);
free_region:
pmp_region_free_atomic(region);
free_encl_idx:
encl_free_eid(eid);
error:
return ret;
}
/*
* Fully destroys an enclave
* Deallocates EID, clears epm, etc
* Fails only if the enclave isn't running.
*/
unsigned long destroy_enclave(enclave_id eid)
{
int destroyable;
spin_lock(&encl_lock);
destroyable = (ENCLAVE_EXISTS(eid)
&& enclaves[eid].state <= STOPPED);
/* update the enclave state first so that
* no SM can run the enclave any longer */
if(destroyable)
enclaves[eid].state = DESTROYING;
spin_unlock(&encl_lock);
if(!destroyable)
return SBI_ERR_SM_ENCLAVE_NOT_DESTROYABLE;
// 0. Let the platform specifics do cleanup/modifications
platform_destroy_enclave(&enclaves[eid]);
// 1. clear all the data in the enclave pages
// requires no lock (single runner)
int i;
void* base;
size_t size;
region_id rid;
for(i = 0; i < ENCLAVE_REGIONS_MAX; i++){
if(enclaves[eid].regions[i].type == REGION_INVALID ||
enclaves[eid].regions[i].type == REGION_UTM)
continue;
//1.a Clear all pages
rid = enclaves[eid].regions[i].pmp_rid;
base = (void*) pmp_region_get_addr(rid);
size = (size_t) pmp_region_get_size(rid);
sbi_memset((void*) base, 0, size);
//1.b free pmp region
pmp_unset_global(rid);
pmp_region_free_atomic(rid);
}
// 2. free pmp region for UTM
rid = get_enclave_region_index(eid, REGION_UTM);
if(rid != -1)
pmp_region_free_atomic(enclaves[eid].regions[rid].pmp_rid);
enclaves[eid].encl_satp = 0;
enclaves[eid].n_thread = 0;
enclaves[eid].params = (struct runtime_va_params_t) {0};
enclaves[eid].pa_params = (struct runtime_pa_params) {0};
for(i=0; i < ENCLAVE_REGIONS_MAX; i++){
enclaves[eid].regions[i].type = REGION_INVALID;
}
// 3. release eid
encl_free_eid(eid);
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
unsigned long run_enclave(struct sbi_trap_regs *regs, enclave_id eid)
{
int runable;
spin_lock(&encl_lock);
runable = (ENCLAVE_EXISTS(eid)
&& enclaves[eid].state == FRESH);
if(runable) {
enclaves[eid].state = RUNNING;
enclaves[eid].n_thread++;
}
spin_unlock(&encl_lock);
if(!runable) {
return SBI_ERR_SM_ENCLAVE_NOT_FRESH;
}
// Enclave is OK to run, context switch to it
context_switch_to_enclave(regs, eid, 1);
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
unsigned long exit_enclave(struct sbi_trap_regs *regs, enclave_id eid)
{
int exitable;
spin_lock(&encl_lock);
exitable = enclaves[eid].state == RUNNING;
if (exitable) {
enclaves[eid].n_thread--;
if(enclaves[eid].n_thread == 0)
enclaves[eid].state = STOPPED;
}
spin_unlock(&encl_lock);
if(!exitable)
return SBI_ERR_SM_ENCLAVE_NOT_RUNNING;
context_switch_to_host(regs, eid, 0);
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
unsigned long stop_enclave(struct sbi_trap_regs *regs, uint64_t request, enclave_id eid)
{
int stoppable;
spin_lock(&encl_lock);
stoppable = enclaves[eid].state == RUNNING;
if (stoppable) {
enclaves[eid].n_thread--;
if(enclaves[eid].n_thread == 0)
enclaves[eid].state = STOPPED;
}
spin_unlock(&encl_lock);
if(!stoppable)
return SBI_ERR_SM_ENCLAVE_NOT_RUNNING;
context_switch_to_host(regs, eid, request == STOP_EDGE_CALL_HOST);
switch(request) {
case(STOP_TIMER_INTERRUPT):
return SBI_ERR_SM_ENCLAVE_INTERRUPTED;
case(STOP_EDGE_CALL_HOST):
return SBI_ERR_SM_ENCLAVE_EDGE_CALL_HOST;
default:
return SBI_ERR_SM_ENCLAVE_UNKNOWN_ERROR;
}
}
unsigned long resume_enclave(struct sbi_trap_regs *regs, enclave_id eid)
{
int resumable;
spin_lock(&encl_lock);
resumable = (ENCLAVE_EXISTS(eid)
&& (enclaves[eid].state == RUNNING || enclaves[eid].state == STOPPED)
&& enclaves[eid].n_thread < MAX_ENCL_THREADS);
if(!resumable) {
spin_unlock(&encl_lock);
return SBI_ERR_SM_ENCLAVE_NOT_RESUMABLE;
} else {
enclaves[eid].n_thread++;
enclaves[eid].state = RUNNING;
}
spin_unlock(&encl_lock);
// Enclave is OK to resume, context switch to it
context_switch_to_enclave(regs, eid, 0);
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}
unsigned long attest_enclave(uintptr_t report_ptr, uintptr_t data, uintptr_t size, enclave_id eid)
{
int attestable;
struct report report;
int ret;
if (size > ATTEST_DATA_MAXLEN)
return SBI_ERR_SM_ENCLAVE_ILLEGAL_ARGUMENT;
spin_lock(&encl_lock);
attestable = (ENCLAVE_EXISTS(eid)
&& (enclaves[eid].state >= FRESH));
if(!attestable) {
ret = SBI_ERR_SM_ENCLAVE_NOT_INITIALIZED;
goto err_unlock;
}
/* copy data to be signed */
ret = copy_enclave_data(&enclaves[eid], report.enclave.data,
data, size);
report.enclave.data_len = size;
if (ret) {
ret = SBI_ERR_SM_ENCLAVE_NOT_ACCESSIBLE;
goto err_unlock;
}
spin_unlock(&encl_lock); // Don't need to wait while signing, which might take some time
sbi_memcpy(report.dev_public_key, dev_public_key, PUBLIC_KEY_SIZE);
sbi_memcpy(report.sm.hash, sm_hash, MDSIZE);
sbi_memcpy(report.sm.public_key, sm_public_key, PUBLIC_KEY_SIZE);
sbi_memcpy(report.sm.signature, sm_signature, SIGNATURE_SIZE);
sbi_memcpy(report.enclave.hash, enclaves[eid].hash, MDSIZE);
sm_sign(report.enclave.signature,
&report.enclave,
sizeof(struct enclave_report)
- SIGNATURE_SIZE
- ATTEST_DATA_MAXLEN + size);
spin_lock(&encl_lock);
/* copy report to the enclave */
ret = copy_enclave_report(&enclaves[eid],
report_ptr,
&report);
if (ret) {
ret = SBI_ERR_SM_ENCLAVE_ILLEGAL_ARGUMENT;
goto err_unlock;
}
ret = SBI_ERR_SM_ENCLAVE_SUCCESS;
err_unlock:
spin_unlock(&encl_lock);
return ret;
}
unsigned long get_sealing_key(uintptr_t sealing_key, uintptr_t key_ident,
size_t key_ident_size, enclave_id eid)
{
struct sealing_key *key_struct = (struct sealing_key *)sealing_key;
int ret;
/* derive key */
ret = sm_derive_sealing_key((unsigned char *)key_struct->key,
(const unsigned char *)key_ident, key_ident_size,
(const unsigned char *)enclaves[eid].hash);
if (ret)
return SBI_ERR_SM_ENCLAVE_UNKNOWN_ERROR;
/* sign derived key */
sm_sign((void *)key_struct->signature, (void *)key_struct->key,
SEALING_KEY_SIZE);
return SBI_ERR_SM_ENCLAVE_SUCCESS;
}