diff --git a/includes/spec-wasm2c-prefix.c b/includes/spec-wasm2c-prefix.c index bc70c57..cace450 100644 --- a/includes/spec-wasm2c-prefix.c +++ b/includes/spec-wasm2c-prefix.c @@ -222,7 +222,7 @@ uint32_t* Z_spectestZ_global_i32Z_i = &spectest_global_i32; */ static void init_spectest_module(void) { - wasm_rt_allocate_memory(&spectest_memory_memory, 1, 2); + wasm_rt_allocate_memory(&spectest_memory_memory, 1, 2, true); wasm_rt_allocate_table(&spectest_table_table, 10, 20); } diff --git a/src/lib/cmmcompile/cmm_rts.ml b/src/lib/cmmcompile/cmm_rts.ml index f1dd22c..df167ba 100644 --- a/src/lib/cmmcompile/cmm_rts.ml +++ b/src/lib/cmmcompile/cmm_rts.ml @@ -147,18 +147,22 @@ module Memory = struct [root; eo], nodbg) else base_expr in - if is_value eo then - with_mem_check - ~root - ~effective_offset:eo - ~chunk ~expr:(expr eo) - else - Clet (eo_ident, eo, - with_mem_check - ~root - ~effective_offset:eo_var - ~chunk - ~expr:(expr eo_var)) + if Util.Command_line.explicit_bounds_checks () then + begin + if is_value eo then + with_mem_check + ~root + ~effective_offset:eo + ~chunk ~expr:(expr eo) + else + Clet (eo_ident, eo, + with_mem_check + ~root + ~effective_offset:eo_var + ~chunk + ~expr:(expr eo_var)) + end + else expr eo let store ~root ~dynamic_pointer ~(op:Libwasm.Ast.storeop) ~to_store = let open Libwasm.Types in @@ -176,18 +180,22 @@ module Memory = struct Cop (Cstore (chunk, Assignment), [effective_address root eo; to_store], nodbg) in - if is_value eo then - with_mem_check - ~root - ~effective_offset:eo - ~chunk ~expr:(expr eo) - else - Clet (eo_ident, eo, - with_mem_check - ~root - ~effective_offset:eo_var - ~chunk - ~expr:(expr eo_var)) + if Util.Command_line.explicit_bounds_checks () then + begin + if is_value eo then + with_mem_check + ~root + ~effective_offset:eo + ~chunk ~expr:(expr eo) + else + Clet (eo_ident, eo, + with_mem_check + ~root + ~effective_offset:eo_var + ~chunk + ~expr:(expr eo_var)) + end + else expr eo let grow root pages = (* I *think* it's safe to put false as allocation flag here, since diff --git a/src/lib/cmmcompile/dce.ml b/src/lib/cmmcompile/dce.ml index ee03cc7..0fc0824 100644 --- a/src/lib/cmmcompile/dce.ml +++ b/src/lib/cmmcompile/dce.ml @@ -17,11 +17,6 @@ let empty_dce_info usages pure expr = { expr } -(* An identifier used where we need to perform an action, but don't need - * the result. This means we have fewer variables for reg allocation, but - * still perform the necessary side-effects *) -let impure_ident = Ident.create "impure" - let increment_usages ht ident = (* Function parameters aren't in the hashtable. *) let ident_name = Ident.name ident in @@ -92,6 +87,8 @@ let populate_info ht expr = | Cexit (_, es) -> all_pure es | Ctrywith (e1, _, e2) -> all_pure [e1; e2] | Cop (Capply _, es, _) + | Cop (Cdivi, es, _) + | Cop (Cdiviu, es, _) | Cop (Cextcall _, es, _) | Cop (Cextcall_indirect _, es, _) | Cop (Cload _, es, _) @@ -134,10 +131,6 @@ let dce ht expr = (* Only used once, so reasonable to inline. We can kill * the binding. *) go e2 - else if dce_info.usages = 0 && (dce_info.pure = Impure) then - (* Redundant load / store, but we still need to perform it. - * Bind it to the "impure" identifier. *) - Clet (impure_ident, go e1, go e2) else (* Otherwise, we need the binding. *) Clet (ident, go e1, go e2) diff --git a/src/lib/cmmcompile/gencmm.ml b/src/lib/cmmcompile/gencmm.ml index 1d94bb8..73abb2c 100644 --- a/src/lib/cmmcompile/gencmm.ml +++ b/src/lib/cmmcompile/gencmm.ml @@ -241,27 +241,12 @@ let compile_binop env op v1 v2 = let open Libwasm.Values in let is_32 = match Var.type_ v1 with | I32Type -> true | _ -> false in let (e1, e2) = (rv env v1, rv env v2) in + let to_natint x = + match x with + | Cconst_int y -> Cconst_natint (Nativeint.of_int y) + | x -> x in - let division_operation normalise div_f = - let ty = Var.type_ v2 in - let cmp = - match ty with - | I32Type -> Cconst_int 0 - | I64Type -> Cconst_natint Nativeint.zero - | _ -> assert false in - let normal_e1 = normalise ty e1 in - let normal_e2 = normalise ty e2 in - let norm_e1_ident = Ident.create "_norme1" in - let norm_e2_ident = Ident.create "_norme2" in - Clet (norm_e1_ident, normal_e1, - Clet (norm_e2_ident, normal_e2, - Cifthenelse ( - Cop (Ccmpi Ceq, [Cvar norm_e2_ident; cmp], nodbg), - trap TrapDivZero, - div_f (Cvar norm_e1_ident) (Cvar norm_e2_ident) - ) - ) - ) in +(* Will add these back in. *) let overflow_check_1 = if is_32 then @@ -271,90 +256,161 @@ let compile_binop env op v1 v2 = let overflow_check_2 = Nativeint.minus_one in - let div_overflow_check signed norm_e1 norm_e2 operation overflow = - if signed then - Cifthenelse ( - Cop (Ccmpi Cne, - [(Cop (Cand, [ - Cop (Ccmpi Ceq, [norm_e1; Cconst_natint overflow_check_1], nodbg); - Cop (Ccmpi Ceq, [norm_e2; Cconst_natint overflow_check_2], nodbg)], nodbg)); - Cconst_natint 0n], nodbg), - overflow, - operation) - else - operation in + let op_and e1 e2 = + Cop (Ccmpa Cne, [Cop (Cand, [e1; e2], nodbg); + Cconst_natint 0n], nodbg) in - let to_natint x = - match x with - | Cconst_int y -> Cconst_natint (Nativeint.of_int y) - | x -> x in + let op_eq e1 e2 = Cop (Ccmpa Ceq, [e1; e2], nodbg) in - let divide signed = - let norm_fn = if signed then normalise_signed else normalise_unsigned in - let div_op = if signed then Cdivi else Cdiviu in + let op_or e1 e2 = + Cop (Ccmpa Cne, [Cop (Cor, [e1; e2], nodbg); Cconst_natint 0n], nodbg) in + + (* Check if integer division would overflow. + * If so, overflow is returned. If not, normal is returned. *) + let check_overflow e1 e2 on_overflow on_no_overflow = + Cifthenelse (op_and + (op_eq e1 (Cconst_natint overflow_check_1)) + (op_eq e2 (Cconst_natint overflow_check_2)), + (* Then special case to 0. *) + on_overflow, + (* Otherwise, do the normal remainder routine. *) + on_no_overflow) in + + (* Checks division by zero. If so, traps. *) + let check_division_by_zero divisor e = + Cifthenelse ( + op_eq divisor (Cconst_int 0), + trap TrapDivZero, + e) in - division_operation - norm_fn - (fun norm_e1 norm_e2 -> - let nat_e1 = to_natint norm_e1 in - let nat_e2 = to_natint norm_e2 in - match nat_e1, nat_e2 with - | Cconst_natint i1, Cconst_natint i2 when signed -> - if i2 = 0n then trap TrapDivZero else - if i1 = overflow_check_1 && i2 = overflow_check_2 then - trap TrapIntOverflow else - Cconst_natint (Nativeint.div i1 i2) - | _ -> - let div = Cop (div_op, [norm_e1; norm_e2], nodbg) in - div_overflow_check - signed norm_e1 norm_e2 div (trap TrapIntOverflow)) in let rem signed = - let norm_fn = if signed then normalise_signed else normalise_unsigned in + let ty = Var.type_ v1 in + let normalise = + if signed then normalise_signed ty else normalise_unsigned ty in let div_op = if signed then Cdivi else Cdiviu in - division_operation - norm_fn - (fun norm_e1 norm_e2 -> - let nat_e1 = to_natint norm_e1 in - let nat_e2 = to_natint norm_e2 in - match nat_e1, nat_e2 with - | Cconst_natint i1, Cconst_natint i2 when signed -> - if i2 = 0n then trap TrapDivZero else - if i1 = overflow_check_1 && i2 = overflow_check_2 then - trap TrapIntOverflow else - Cconst_natint (Nativeint.rem i1 i2) - | _ -> - let op = - Cop (Csubi, [norm_e1; + let nat_e1 = to_natint (normalise e1) in + let nat_e2 = to_natint (normalise e2) in + match nat_e1, nat_e2 with + | Cconst_natint i1, Cconst_natint i2 when signed -> + if i2 = 0n then trap TrapDivZero else + Cconst_natint (Nativeint.rem i1 i2) + | _, Cconst_natint i2 when i2 = 0n -> trap TrapDivZero + | _ -> + (* In the case of remainder, we actually do need to special-case + * the division operator, since remainder doesn't actually overflow. *) + let remainder_op = + Cop (Csubi, [nat_e1; Cop (Cmuli, [ - Cop (div_op, [norm_e1; norm_e2], nodbg); - norm_e2], nodbg)], nodbg) in - div_overflow_check signed norm_e1 norm_e2 op (Cconst_int 0)) in + Cop (div_op, [nat_e1; nat_e2], nodbg); + nat_e2], nodbg)], nodbg) in + + if signed then + remainder_op + |> check_division_by_zero nat_e2 + |> check_overflow nat_e1 nat_e2 (Cconst_int 0) + else check_division_by_zero nat_e2 remainder_op in + + let div signed = + let ty = Var.type_ v1 in + let normalise = + if signed then normalise_signed ty else normalise_unsigned ty in + let nat_e1 = to_natint (normalise e1) in + let nat_e2 = to_natint (normalise e2) in + let divide_op = + let op = if signed then Cdivi else Cdiviu in + Cop (op, [nat_e1; nat_e2], nodbg) in + match nat_e1, nat_e2 with + | Cconst_natint i1, Cconst_natint i2 when signed -> + if i2 = 0n then trap TrapDivZero else + if i1 = overflow_check_1 && i2 = overflow_check_2 then + trap TrapIntOverflow else + Cconst_natint (Nativeint.div i1 i2) + | _, Cconst_natint 0n -> trap TrapDivZero + | e1, Cconst_natint i when i = overflow_check_2 && signed -> + Cifthenelse (op_eq e1 (Cconst_natint overflow_check_1), + trap TrapIntOverflow, + divide_op) + | Cconst_natint i, e2 when i = overflow_check_1 && signed -> + (* In the case that the numerator could lead to a possible + * overflow, we need to check whether the divisor is either + * the second overflow, or zero. *) + Cifthenelse ( + op_or + (op_eq e2 (Cconst_natint overflow_check_2)) + (op_eq e2 (Cconst_int 0)), + trap TrapDivZero, (* Slighly inaccurate, but eh *) + divide_op + ) + | Cconst_natint _, e2 -> + (* Otherwise we just have to check for division by zero *) + Cifthenelse ( + op_eq e2 (Cconst_int 0), + trap TrapDivZero, + divide_op + ) + | _ -> + if signed then + divide_op + |> check_division_by_zero nat_e2 + |> check_overflow nat_e1 nat_e2 (trap TrapIntOverflow) + else check_division_by_zero nat_e2 divide_op in + + + let nativeint_mod n1 n2 = + Nativeint.(sub n1 (mul (div n1 n2) n2)) in + (* shift_left: need to normalise and mod RHS, but not LHS *) let shift_left = let ty = Var.type_ v1 in (* Must normalise e2 and then get e2 % int width before doing the shift *) - let normalised_rhs = normalise_unsigned ty e2 in - let mod_base = if is_32 then Cconst_int 32 else Cconst_int 64 in - (Cop (Clsl, ([e1; - Cop (Cmodi, [normalised_rhs; mod_base], nodbg)]), nodbg)) in + let e2 = normalise_unsigned ty e2 in + + let nat_e1 = to_natint e1 in + let nat_e2 = to_natint e2 in + let mod_base = if is_32 then 32n else 64n in + + let shift_length = + match nat_e2 with + | Cconst_natint i -> + Cconst_natint (nativeint_mod i mod_base) + | _ -> Cop (Cmodi, [e2; Cconst_natint mod_base], nodbg) in + + match nat_e1, shift_length with + | Cconst_natint i1, Cconst_natint i2 -> + Cconst_natint (Nativeint.(shift_left i1 (to_int i2))) + | _ -> Cop (Clsl, ([e1; shift_length]), nodbg) in (* shift_right: need to normalise LHS, and both normalise and mod RHS. *) let shift_right signed = let ty = Var.type_ v1 in (* Normalise LHS *) - let normalised_lhs = + let e1 = if signed then normalise_signed ty e1 else normalise_unsigned ty e1 in - (* Must normalise e2 and then get e2 % int width before doing the shift *) - let normalised_rhs = normalise_unsigned ty e2 in - let mod_base = if is_32 then Cconst_int 32 else Cconst_int 64 in + let e2 = normalise_unsigned ty e2 in + + let nat_e1 = to_natint e1 in + let nat_e2 = to_natint e2 in + let mod_base = if is_32 then 32n else 64n in let op = if signed then Casr else Clsr in - (Cop (op, ([normalised_lhs; - Cop (Cmodi, [normalised_rhs; mod_base], nodbg)]), nodbg)) in + + let shift_length = + match nat_e2 with + | Cconst_natint i -> + Cconst_natint (nativeint_mod i mod_base) + | _ -> Cop (Cmodi, [e2; Cconst_natint mod_base], nodbg) in + + match nat_e1, shift_length with + | Cconst_natint i1, Cconst_natint i2 -> + if signed then + Cconst_natint (Nativeint.(shift_right i1 (to_int i2))) + else + Cconst_natint (Nativeint.(shift_right_logical i1 (to_int i2))) + | _ -> Cop (op, ([e1; shift_length]), nodbg) in (* Rotation as two shifts and an or, Hacker's delight, p.37 *) @@ -392,7 +448,7 @@ let compile_binop env op v1 v2 = Clet (distance_ident, distance, rotate_left e1 (Cvar distance_ident) width) in - let rotate_right_i32 = + let rotate_right_i32 = let width = 32 in let low_set_ident = Ident.create "ror32_low_set" in let low_set = unset_high_32 e1 in @@ -412,7 +468,6 @@ let compile_binop env op v1 v2 = Clet (distance_ident, distance, rotate_right e1 (Cvar distance_ident) width) in - let rotate_left = if is_32 then rotate_left_i32 else rotate_left_i64 in let rotate_right = if is_32 then rotate_right_i32 else rotate_right_i64 in @@ -420,6 +475,9 @@ let compile_binop env op v1 v2 = let cf32 op = compile_f32_op env op v1 v2 in (* let cn op signed = compile_op_normalised signed env op v1 v2 in *) + + (* TODO: This is all so brittle and requires RTS calls anyway, that I think + * it would be better to implement it in C. *) let min_or_max ~is_f32 ~is_min = let (e1, e2) = (rv env v1, rv env v2) in let op = if is_min then CFle else CFge in @@ -468,8 +526,8 @@ let compile_binop env op v1 v2 = | Add -> cs Caddi | Sub -> cs Csubi | Mul -> cs Cmuli - | DivS -> divide true - | DivU -> divide false + | DivS -> div true + | DivU -> div false | RemS -> rem true | RemU -> rem false | And -> cs Cand @@ -1154,14 +1212,25 @@ let init_function module_name env (ir_mod: Stackless.module_) data_info = | Some x -> Cconst_natint (Nativeint.of_int32 x) | None -> Cconst_natint (Nativeint.of_int max_addressable_pages) end in - (* Perform allocation, then initialise data *) - Csequence ( - Cop ( - Cextcall ("wasm_rt_allocate_memory", typ_void, false, None), - [memory_symbol; min_pages; max_pages], - nodbg - ), - Clet (root_ident, root, init_data)) in + (* Perform allocation, then initialise data *) + + let calls = + if Util.Command_line.explicit_bounds_checks () then + [ + Cop ( + Cextcall ("wasm_rt_allocate_memory", typ_void, false, None), + [memory_symbol; min_pages; max_pages; Cconst_int 0], + nodbg + ); + ] + else + [ + Cop ( + Cextcall ("wasm_rt_allocate_memory", typ_void, false, None), + [memory_symbol; min_pages; max_pages; Cconst_int 1], + nodbg + )] in + Csequence (call_seq calls, Clet (root_ident, root, init_data)) in (* Allocate table if required, then initialise the table with elements *) let table_body = @@ -1205,6 +1274,11 @@ let init_function module_name env (ir_mod: Stackless.module_) data_info = let fuel = Cconst_int (Util.Command_line.initial_fuel ()) in Cop (Capply typ_void, [fn_symbol; fuel], nodbg) | _ -> Ctuple [] in + let setup_signal_handlers = + Cop ( + Cextcall ("wasm_rt_setup_signal_handlers", typ_void, false, None), + [], nodbg) in + (* Sequence all instructions. Later passes perform the `() ; M ~~> M` * and `M ; () ~~> M` translations, so we don't have to worry about unit @@ -1213,6 +1287,7 @@ let init_function module_name env (ir_mod: Stackless.module_) data_info = global_body; memory_body; table_body; + setup_signal_handlers; start_body ] in @@ -1320,11 +1395,11 @@ let module_memory env (ir_mod: Stackless.module_) export_info = [Cglobal_symbol name; Cdefine_symbol name]) export_info.memory_symbols |> List.concat in - (* Memory symbol: needs 3 words of space to store struct created by RTS *) + (* Memory symbol: needs 4 words of space to store struct created by RTS *) (* Must only define a memory symbol for an internal memory symbol! *) match ir_mod.memory_metadata with | Some (LocalMemory _) -> - let struct_size = Arch.size_int * 3 in + let struct_size = Arch.size_int * 4 in let memory_symb = [Cdefine_symbol (Compile_env.memory_symbol env); Cskip struct_size] in memory_exports @ memory_symb diff --git a/src/lib/util/command_line.ml b/src/lib/util/command_line.ml index 295994b..ec0cf9e 100644 --- a/src/lib/util/command_line.ml +++ b/src/lib/util/command_line.ml @@ -15,6 +15,7 @@ module Refs = struct let keep_temp = ref false let prefix = ref None let colouring_allocator = ref false + let explicit_bounds_checks = ref false end let options = @@ -32,7 +33,8 @@ let options = (noshort, "fuel", None, Some (fun i -> Refs.initial_fuel := (int_of_string i))); ('t', "keep-temp", Some (fun () -> Refs.keep_temp := true), None); ('p', "prefix", None, Some (fun s -> Refs.prefix := (Some s))); - (noshort, "colouring", Some (fun () -> Refs.colouring_allocator := true), None) + (noshort, "colouring", Some (fun () -> Refs.colouring_allocator := true), None); + (noshort, "explicit-bounds-checks", Some (fun () -> Refs.explicit_bounds_checks := true), None) ] let set_filename fn = Refs.filename := fn @@ -98,3 +100,6 @@ let keep_temp () = !Refs.keep_temp let prefix () = !Refs.prefix let colouring_allocator () = !Refs.colouring_allocator + +let explicit_bounds_checks () = !Refs.explicit_bounds_checks + diff --git a/src/lib/util/command_line.mli b/src/lib/util/command_line.mli index fd9d181..831626c 100644 --- a/src/lib/util/command_line.mli +++ b/src/lib/util/command_line.mli @@ -15,3 +15,4 @@ val initial_fuel : unit -> int val keep_temp : unit -> bool val prefix : unit -> string option val colouring_allocator : unit -> bool +val explicit_bounds_checks : unit -> bool diff --git a/src/rts/wasm-rt-impl.c b/src/rts/wasm-rt-impl.c index 038d682..ac9dc5d 100644 --- a/src/rts/wasm-rt-impl.c +++ b/src/rts/wasm-rt-impl.c @@ -14,8 +14,8 @@ * limitations under the License. */ +#define _GNU_SOURCE #include "wasm-rt-impl.h" - #include #include #include @@ -25,6 +25,10 @@ #include #include #include +#include +#include +#include +#include #define PAGE_SIZE 65536 @@ -37,27 +41,102 @@ void wasm_rt_trap(wasm_rt_trap_t code) { longjmp(g_jmp_buf, code); } + +// Handles segmentation faults when using mmap'ed memory, +// allowing memory violations to be reported gracefully +// as traps +void wasm_rt_sigsegv_handler(int sig, siginfo_t* info, void* something_else) { + longjmp(g_jmp_buf, WASM_RT_TRAP_OOB); +} + + +void wasm_rt_setup_signal_handlers() { + struct sigaction sa; + memset(&sa, 0, sizeof(struct sigaction)); + sigemptyset(&(sa.sa_mask)); + + sa.sa_flags = SA_NODEFER; + sa.sa_sigaction = wasm_rt_sigsegv_handler; + + sigaction(SIGSEGV, &sa, NULL); +} + +void wasm_rt_malloc_memory(wasm_rt_memory_t* memory) { + memory->data = calloc(memory->size, 1); +} + +// mmap allows us to allocate an area of memory such that we are +// guaranteed to raise a segmentation fault should the access +// exceed the memory bounds. +void wasm_rt_mmap_memory(wasm_rt_memory_t* memory) { + // Reserve (but don't allocate!) 8GB. + uint64_t size = 0x00000001FFFFFFFF; + + uint8_t* ptr = + (uint8_t*)(mmap(NULL, size, PROT_NONE, + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)); + + if (ptr == MAP_FAILED) { + err(1, "mmap"); + exit(1); + } + + // Mark the accessible pages as read/write enabled + if (mprotect(ptr, memory->size, PROT_READ|PROT_WRITE) == -1) { + err(1, "mprotect"); + exit(1); + } + + memory->data = ptr; +} + void wasm_rt_allocate_memory(wasm_rt_memory_t* memory, - uint32_t initial_pages, - uint32_t max_pages) { + uint32_t initial_pages, + uint32_t max_pages, + bool use_mmap) { memory->pages = initial_pages; memory->max_pages = max_pages; - memory->size = initial_pages * PAGE_SIZE; - memory->data = calloc(memory->size, 1); + uint32_t size = initial_pages * PAGE_SIZE; + memory->size = size; + if (use_mmap) { + wasm_rt_mmap_memory(memory); + memory->use_mmap = true; + } else { + wasm_rt_malloc_memory(memory); + memory->use_mmap = false; + } } + uint32_t wasm_rt_grow_memory(wasm_rt_memory_t* memory, uint32_t delta) { uint32_t old_pages = memory->pages; uint32_t new_pages = memory->pages + delta; + // Check whether we can grow if (new_pages < old_pages || new_pages > memory->max_pages) { - return (uint32_t)-1; + return (uint32_t) -1; } - memory->data = realloc(memory->data, new_pages); + + if (new_pages == 0) { + return (uint32_t) 0; + } + + // If so, set new pages and new size + uint32_t old_size = memory->size; + uint32_t new_size = new_pages * PAGE_SIZE; + memory->size = new_size; memory->pages = new_pages; - memory->size = new_pages * PAGE_SIZE; + + // If we're using mmap'ed memory, mremap, otherwise realloc + if (memory->use_mmap) { + mprotect(memory->data, new_size, PROT_READ|PROT_WRITE); + } else { + memory->data = realloc(memory->data, new_size); + } + return old_pages; } + void wasm_rt_allocate_table(wasm_rt_table_t* table, uint32_t elements, uint32_t max_elements) { diff --git a/src/rts/wasm-rt.h b/src/rts/wasm-rt.h index e110358..265c189 100644 --- a/src/rts/wasm-rt.h +++ b/src/rts/wasm-rt.h @@ -18,6 +18,7 @@ #define WASM_RT_H_ #include +#include #ifdef __cplusplus extern "C" { @@ -72,6 +73,8 @@ typedef struct { uint32_t pages, max_pages; /** The current size of the linear memory, in bytes. */ uint32_t size; + /** Whether the memory is using mmap for allocation. */ + uint32_t use_mmap; } wasm_rt_memory_t; /** A Table object. */ @@ -94,6 +97,9 @@ extern void wasm_rt_trap(wasm_rt_trap_t) __attribute__((noreturn)); /** Initialize a Memory object with an initial page size of `initial_pages` and * a maximum page size of `max_pages`. * + * If `use_mmap` is true, then allocates the memory using `mmap` with appropriate + * page protection. Otherwise, allocates using `malloc`. + * * ``` * wasm_rt_memory_t my_memory; * // 1 initial page (65536 bytes), and a maximum of 2 pages. @@ -101,7 +107,8 @@ extern void wasm_rt_trap(wasm_rt_trap_t) __attribute__((noreturn)); * ``` */ extern void wasm_rt_allocate_memory(wasm_rt_memory_t*, uint32_t initial_pages, - uint32_t max_pages); + uint32_t max_pages, + bool use_mmap); /** Grow a Memory object by `pages`, and return the previous page count. If * this new page count is greater than the maximum page count, the grow fails