diff --git a/example-code/qemu-aarch64v8a/.cargo/config.toml b/example-code/qemu-aarch64v8a/.cargo/config.toml index 353a64b..65cd244 100644 --- a/example-code/qemu-aarch64v8a/.cargo/config.toml +++ b/example-code/qemu-aarch64v8a/.cargo/config.toml @@ -2,6 +2,8 @@ rustflags = [ "-Clink-arg=-Tlinker.ld", ] +# QEMU Aarch64 boots in EL1 by default +# Add "-machine virtualization=on" to boot in EL2 runner = "qemu-system-aarch64 -machine virt -cpu cortex-a57 -semihosting -nographic -kernel" [build] diff --git a/example-code/qemu-aarch64v8a/Cargo.lock b/example-code/qemu-aarch64v8a/Cargo.lock index 1d0a256..ca340f1 100644 --- a/example-code/qemu-aarch64v8a/Cargo.lock +++ b/example-code/qemu-aarch64v8a/Cargo.lock @@ -3,5 +3,31 @@ version = 3 [[package]] -name = "basic-rust" +name = "critical-section" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" + +[[package]] +name = "embedded-alloc" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddae17915accbac2cfbc64ea0ae6e3b330e6ea124ba108dada63646fd3c6f815" +dependencies = [ + "critical-section", + "linked_list_allocator", +] + +[[package]] +name = "linked_list_allocator" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" + +[[package]] +name = "qemu-aarch64v8a" version = "0.1.0" +dependencies = [ + "critical-section", + "embedded-alloc", +] diff --git a/example-code/qemu-aarch64v8a/Cargo.toml b/example-code/qemu-aarch64v8a/Cargo.toml index 4cdd6be..cf513e2 100644 --- a/example-code/qemu-aarch64v8a/Cargo.toml +++ b/example-code/qemu-aarch64v8a/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "basic-rust" +name = "qemu-aarch64v8a" version = "0.1.0" edition = "2021" authors = ["Ferrous Systems"] @@ -7,6 +7,14 @@ license = "MIT OR Apache-2.0" description = "A simple Aarch64 demo application that runs in QEMU and compiles with Ferrocene" [dependencies] +critical-section = { version = "1.1.2", features = ["restore-state-bool"] } +embedded-alloc = "0.5.1" [profile.release] opt-level = "s" + +[source.crates-io] +replace-with = "vendored-sources" + +[source.vendored-sources] +directory = "vendor" diff --git a/example-code/qemu-aarch64v8a/README.md b/example-code/qemu-aarch64v8a/README.md index 0029814..2747c75 100644 --- a/example-code/qemu-aarch64v8a/README.md +++ b/example-code/qemu-aarch64v8a/README.md @@ -16,17 +16,34 @@ You must first install Ferrocene by executing `criticalup install` inside this folder. This will require a valid CriticalUp token - please see the [CriticalUp documentation](https://criticalup.ferrocene.dev). +To view the project inside VS Code, set your `RUSTC` environment variable to +point at your `criticalup` rustc proxy. On macOS, that can be done with: + +```bash +RUSTC=~/Library/Application\ Support/criticalup/bin/rustc code . +``` + ## Demo contents -This demo is a simple application designed to run inside a QEMU virtual machine. +This demo provides a few simple applications, designed to run inside a QEMU +virtual machine that is emulating an Aarch64 Arm Cortex-A system. Both demos: -1. It prints "Hello, world!" to the first QEMU UART, which is typically +1. Print "Hello, world!" to the first QEMU UART, which is typically connected to the console when you run QEMU. -2. It then causes a `panic!` which causes the custom panic handler to execute. -3. The the panic handler also prints to the same UART. -4. The panic handler exits QEMU using a semihosting operation that QEMU +2. Print some floating point numbers in a grid (the 1 though 10 times tables). +3. Causes a `panic!` which causes the custom panic handler to execute. +4. The the panic handler also prints to the same UART. +5. The panic handler exits QEMU using a semihosting operation that QEMU understands to mean "exit QEMU". +There are two binaries in `./src/bin`: + +* `no_heap` runs with no heap +* `with_heap` sets up a heap allocator and uses the `format!` macro to generate + heap-allocated strings, which it then prints. + +Both binaries should produce the same output. + ## Building and Running with `cargo` Ferrocene compiles standard Rust source code, and so this project has also been @@ -40,28 +57,26 @@ the linker script to the `cargo` temporary output directory where the linker will look for it. The compiled outputs will go into `./target/aarch64-none-eabi/`, where -`` is `debug` or `release`. The binary is called `basic-rust`, because +`` is `debug` or `release`. The binary is called `qemu-aarch64v8a`, because that's the name given in the `Cargo.toml` file. ```console -$ criticalup run cargo build --release - Finished release [optimized] target(s) in 0.00s -$ criticalup run cargo run --release - Compiling basic-rust v0.1.0 (/Users/jonathan/work/basic-rust) +$ criticalup run cargo run --release --bin no_heap + Compiling qemu-aarch64v8a v0.1.0 (/Users/jonathan/work/qemu-aarch64v8a) Finished release [optimized] target(s) in 0.16s - Running `qemu-system-aarch64 -machine virt -cpu cortex-a57 -semihosting -nographic -kernel target/aarch64-unknown-none/release/basic-rust` + Running `qemu-system-aarch64 -machine virt -cpu cortex-a57 -semihosting -nographic -kernel target/aarch64-unknown-none/release/qemu-aarch64v8a` Hello, this is Rust! - 1.00 2.00 3.00 4.00 5.00 6.00 7.00 8.00 9.00 10.00 - 2.00 4.00 6.00 8.00 10.00 12.00 14.00 16.00 18.00 20.00 - 3.00 6.00 9.00 12.00 15.00 18.00 21.00 24.00 27.00 30.00 - 4.00 8.00 12.00 16.00 20.00 24.00 28.00 32.00 36.00 40.00 - 5.00 10.00 15.00 20.00 25.00 30.00 35.00 40.00 45.00 50.00 - 6.00 12.00 18.00 24.00 30.00 36.00 42.00 48.00 54.00 60.00 - 7.00 14.00 21.00 28.00 35.00 42.00 49.00 56.00 63.00 70.00 - 8.00 16.00 24.00 32.00 40.00 48.00 56.00 64.00 72.00 80.00 - 9.00 18.00 27.00 36.00 45.00 54.00 63.00 72.00 81.00 90.00 - 10.00 20.00 30.00 40.00 50.00 60.00 70.00 80.00 90.00 100.00 -PANIC: PanicInfo { payload: Any { .. }, message: Some(I am a panic), location: Location { file: "src/main.rs", line: 40, col: 5 }, can_unwind: true, force_no_backtrace: false } + 1.00 2.00 3.00 4.00 5.00 6.00 7.00 8.00 9.00 10.00 + 2.00 4.00 6.00 8.00 10.00 12.00 14.00 16.00 18.00 20.00 + 3.00 6.00 9.00 12.00 15.00 18.00 21.00 24.00 27.00 30.00 + 4.00 8.00 12.00 16.00 20.00 24.00 28.00 32.00 36.00 40.00 + 5.00 10.00 15.00 20.00 25.00 30.00 35.00 40.00 45.00 50.00 + 6.00 12.00 18.00 24.00 30.00 36.00 42.00 48.00 54.00 60.00 + 7.00 14.00 21.00 28.00 35.00 42.00 49.00 56.00 63.00 70.00 + 8.00 16.00 24.00 32.00 40.00 48.00 56.00 64.00 72.00 80.00 + 9.00 18.00 27.00 36.00 45.00 54.00 63.00 72.00 81.00 90.00 + 10.00 20.00 30.00 40.00 50.00 60.00 70.00 80.00 90.00 100.00 +PANIC: PanicInfo { payload: Any { .. }, message: Some(I am a panic), location: Location { file: "src/bin/with_heap.rs", line: 61, col: 5 }, can_unwind: true, force_no_backtrace: false } ``` ## Building and Running without `cargo` @@ -73,37 +88,66 @@ This demo includes a [`build.sh`](./build.sh) shell script to build our binary by calling `rustc` directly. This script will: 1. Find the location of the tools it needs -2. Call `criticalup run rustc` to compile `src/main.rs` into `/basic-rust` -3. Generate `asm` and `map` files from the `/basic-rust` using LLVM +2. Call `criticalup run rustc --crate-type=lib` repeatedly, to compile all the + various dependencies (from the `./vendor` folder) +3. Call `criticalup run rustc --crate-type=bin` to compile `src/bin/no_heap.rs` + into `/no_heap` +4. Generate `asm` and `map` files from the `/no_heap` binary using LLVM tools shipped with Ferrocene +5. Compile the `with_heap` binary in the same fashion -The outputs will go into `./target/production` and the binary is called -`basic-rust`. You can choose any suitable directory, but avoid clashing with -anything you do using `cargo`. +The outputs will go into `./target/production` and the binaries are called +`no_heap` and `with_heap`. You can choose any suitable directory, but avoid +clashing with anything you do using `cargo`. ```console $ ./build.sh -Running rustc... -Generating asm... -Generating map... +Running rustc for critical-section +Running rustc for linked-list-allocator +Running rustc for embedded-alloc +Running rustc for lib... +Running rustc for no_heap... +Generating asm for no_heap... +Generating map for no_heap... +Running rustc for with_heap... +Generating asm for with_heap... +Generating map for with_heap... $ qemu-system-aarch64 \ -machine virt \ -cpu cortex-a57 \ -semihosting \ -nographic \ - -kernel ./target/production/basic-rust + -kernel target/production/with_heap +Hello, this is Rust! + 1.00 2.00 3.00 4.00 5.00 6.00 7.00 8.00 9.00 10.00 + 2.00 4.00 6.00 8.00 10.00 12.00 14.00 16.00 18.00 20.00 + 3.00 6.00 9.00 12.00 15.00 18.00 21.00 24.00 27.00 30.00 + 4.00 8.00 12.00 16.00 20.00 24.00 28.00 32.00 36.00 40.00 + 5.00 10.00 15.00 20.00 25.00 30.00 35.00 40.00 45.00 50.00 + 6.00 12.00 18.00 24.00 30.00 36.00 42.00 48.00 54.00 60.00 + 7.00 14.00 21.00 28.00 35.00 42.00 49.00 56.00 63.00 70.00 + 8.00 16.00 24.00 32.00 40.00 48.00 56.00 64.00 72.00 80.00 + 9.00 18.00 27.00 36.00 45.00 54.00 63.00 72.00 81.00 90.00 + 10.00 20.00 30.00 40.00 50.00 60.00 70.00 80.00 90.00 100.00 +PANIC: PanicInfo { payload: Any { .. }, message: Some(I am a panic), location: Location { file: "src/bin/with_heap.rs", line: 61, col: 5 }, can_unwind: true, force_no_backtrace: false } +``` + +Rather than type out the full QEMU command line, you can also use `qemu.sh`: + +```console +$ ./qemu.sh ./target/production/with_heap Hello, this is Rust! - 1.00 2.00 3.00 4.00 5.00 6.00 7.00 8.00 9.00 10.00 - 2.00 4.00 6.00 8.00 10.00 12.00 14.00 16.00 18.00 20.00 - 3.00 6.00 9.00 12.00 15.00 18.00 21.00 24.00 27.00 30.00 - 4.00 8.00 12.00 16.00 20.00 24.00 28.00 32.00 36.00 40.00 - 5.00 10.00 15.00 20.00 25.00 30.00 35.00 40.00 45.00 50.00 - 6.00 12.00 18.00 24.00 30.00 36.00 42.00 48.00 54.00 60.00 - 7.00 14.00 21.00 28.00 35.00 42.00 49.00 56.00 63.00 70.00 - 8.00 16.00 24.00 32.00 40.00 48.00 56.00 64.00 72.00 80.00 - 9.00 18.00 27.00 36.00 45.00 54.00 63.00 72.00 81.00 90.00 - 10.00 20.00 30.00 40.00 50.00 60.00 70.00 80.00 90.00 100.00 -PANIC: PanicInfo { payload: Any { .. }, message: Some(I am a panic), location: Location { file: "src/main.rs", line: 40, col: 5 }, can_unwind: true, force_no_backtrace: false } + 1.00 2.00 3.00 4.00 5.00 6.00 7.00 8.00 9.00 10.00 + 2.00 4.00 6.00 8.00 10.00 12.00 14.00 16.00 18.00 20.00 + 3.00 6.00 9.00 12.00 15.00 18.00 21.00 24.00 27.00 30.00 + 4.00 8.00 12.00 16.00 20.00 24.00 28.00 32.00 36.00 40.00 + 5.00 10.00 15.00 20.00 25.00 30.00 35.00 40.00 45.00 50.00 + 6.00 12.00 18.00 24.00 30.00 36.00 42.00 48.00 54.00 60.00 + 7.00 14.00 21.00 28.00 35.00 42.00 49.00 56.00 63.00 70.00 + 8.00 16.00 24.00 32.00 40.00 48.00 56.00 64.00 72.00 80.00 + 9.00 18.00 27.00 36.00 45.00 54.00 63.00 72.00 81.00 90.00 + 10.00 20.00 30.00 40.00 50.00 60.00 70.00 80.00 90.00 100.00 +PANIC: PanicInfo { payload: Any { .. }, message: Some(I am a panic), location: Location { file: "src/bin/with_heap.rs", line: 61, col: 5 }, can_unwind: true, force_no_backtrace: false } ``` ## License diff --git a/example-code/qemu-aarch64v8a/build.sh b/example-code/qemu-aarch64v8a/build.sh index 34b8906..aee7c36 100755 --- a/example-code/qemu-aarch64v8a/build.sh +++ b/example-code/qemu-aarch64v8a/build.sh @@ -3,23 +3,102 @@ set -euo pipefail TARGET_DIR=target/production -OUTPUT_BINARY=${TARGET_DIR}/basic-rust RUSTC=$(criticalup which rustc) SYSROOT=$(criticalup run rustc --print sysroot) OBJDUMP=$(ls "${SYSROOT}"/lib/rustlib/*/bin/llvm-objdump) -OUTPUT_MAP=${TARGET_DIR}/basic-rust.map -OUTPUT_ASM=${TARGET_DIR}/basic-rust.asm +RUSTC_FLAGS="--target aarch64-unknown-none -Copt-level=s" + +WITH_HEAP_OUTPUT_BINARY=${TARGET_DIR}/with_heap +WITH_HEAP_OUTPUT_MAP=${TARGET_DIR}/with_heap.map +WITH_HEAP_OUTPUT_ASM=${TARGET_DIR}/with_heap.asm +NO_HEAP_OUTPUT_BINARY=${TARGET_DIR}/no_heap +NO_HEAP_OUTPUT_MAP=${TARGET_DIR}/no_heap.map +NO_HEAP_OUTPUT_ASM=${TARGET_DIR}/no_heap.asm rm -rf ${TARGET_DIR} mkdir -p ${TARGET_DIR} -echo Running rustc... -"${RUSTC}" --target aarch64-unknown-none \ + +# ############################################################################ +echo "Running rustc for critical-section" +# ############################################################################ +"${RUSTC}" ${RUSTC_FLAGS} \ + --crate-type=lib \ + --crate-name=critical_section \ + --emit=dep-info,metadata,link \ + --out-dir ${TARGET_DIR} \ + --cfg 'feature="restore-state-bool"' \ + --edition 2021 \ + vendor/critical-section/src/lib.rs + +# ############################################################################ +echo "Running rustc for linked-list-allocator" +# ############################################################################ +"${RUSTC}" ${RUSTC_FLAGS} \ + --crate-type=lib \ + --crate-name=linked_list_allocator \ + --emit=dep-info,metadata,link \ + --out-dir ${TARGET_DIR} \ + -L ${TARGET_DIR} \ + --edition 2018 \ + vendor/linked_list_allocator/src/lib.rs + +# ############################################################################ +echo "Running rustc for embedded-alloc" +# ############################################################################ +"${RUSTC}" ${RUSTC_FLAGS} \ + --crate-type=lib \ + --crate-name=embedded_alloc \ + --emit=dep-info,metadata,link \ + --out-dir ${TARGET_DIR} \ + -L ${TARGET_DIR} \ + --extern critical_section=${TARGET_DIR}/libcritical_section.rmeta \ + --extern linked_list_allocator=${TARGET_DIR}/liblinked_list_allocator.rmeta \ + --edition 2018 \ + vendor/embedded-alloc/src/lib.rs + +# ############################################################################ +echo Running rustc for lib... +# ############################################################################ +"${RUSTC}" ${RUSTC_FLAGS} \ + --crate-type=lib \ + --crate-name=qemu_aarch64v8a \ + --emit=dep-info,metadata,link \ + --out-dir ${TARGET_DIR} \ + -L ${TARGET_DIR} \ + --edition 2021 \ + --extern critical_section=${TARGET_DIR}/libcritical_section.rmeta \ + --extern embedded_alloc=${TARGET_DIR}/libembedded_alloc.rmeta \ + src/lib.rs + +# ############################################################################ +echo Running rustc for no_heap... +# ############################################################################ +"${RUSTC}" ${RUSTC_FLAGS} \ + --crate-type=bin \ + -Clink-arg=-Tlinker.ld \ + --edition 2021 \ + -L ${TARGET_DIR} \ + --extern qemu_aarch64v8a=${TARGET_DIR}/libqemu_aarch64v8a.rlib \ + -o ${NO_HEAP_OUTPUT_BINARY} \ + src/bin/no_heap.rs +echo Generating asm for no_heap... +"${OBJDUMP}" -Cd ${NO_HEAP_OUTPUT_BINARY} > ${NO_HEAP_OUTPUT_ASM} +echo Generating map for no_heap... +"${OBJDUMP}" -Ct ${NO_HEAP_OUTPUT_BINARY} > ${NO_HEAP_OUTPUT_MAP} + +# ############################################################################ +echo Running rustc for with_heap... +# ############################################################################ +"${RUSTC}" ${RUSTC_FLAGS} \ -Clink-arg=-Tlinker.ld \ - -Copt-level=s \ --edition 2021 \ - -o ${OUTPUT_BINARY} \ - src/main.rs -echo Generating asm... -"${OBJDUMP}" -Cd ${OUTPUT_BINARY} > ${OUTPUT_ASM} -echo Generating map... -"${OBJDUMP}" -Ct ${OUTPUT_BINARY} > ${OUTPUT_MAP} + -L ${TARGET_DIR} \ + --extern qemu_aarch64v8a=${TARGET_DIR}/libqemu_aarch64v8a.rlib \ + --extern embedded_alloc=${TARGET_DIR}/libembedded_alloc.rlib \ + -o ${WITH_HEAP_OUTPUT_BINARY} \ + src/bin/with_heap.rs +echo Generating asm for with_heap... +"${OBJDUMP}" -Cd ${WITH_HEAP_OUTPUT_BINARY} > ${WITH_HEAP_OUTPUT_ASM} +echo Generating map for with_heap... +"${OBJDUMP}" -Ct ${WITH_HEAP_OUTPUT_BINARY} > ${WITH_HEAP_OUTPUT_MAP} + diff --git a/example-code/qemu-aarch64v8a/qemu.sh b/example-code/qemu-aarch64v8a/qemu.sh index 9b50949..efc23be 100755 --- a/example-code/qemu-aarch64v8a/qemu.sh +++ b/example-code/qemu-aarch64v8a/qemu.sh @@ -1,5 +1,5 @@ #!/bin/sh TARGET_DIR=target/production -BINARY=${TARGET_DIR}/basic-rust +BINARY=${1:-${TARGET_DIR}/no_heap} qemu-system-aarch64 -machine virt -cpu cortex-a57 -semihosting -nographic -kernel ${BINARY} diff --git a/example-code/qemu-aarch64v8a/src/main.rs b/example-code/qemu-aarch64v8a/src/bin/no_heap.rs similarity index 68% rename from example-code/qemu-aarch64v8a/src/main.rs rename to example-code/qemu-aarch64v8a/src/bin/no_heap.rs index f66f40e..6802d8e 100644 --- a/example-code/qemu-aarch64v8a/src/main.rs +++ b/example-code/qemu-aarch64v8a/src/bin/no_heap.rs @@ -10,13 +10,11 @@ #![no_main] use core::fmt::Write; - -mod virt_uart; +use qemu_aarch64v8a::{exception_level, virt_uart}; /// The entry-point to the Rust application. /// -/// It is called by the start-up code in [`boot.S`](./boot.S) and thus exported -/// as a C-compatible symbol. +/// It is called by the start-up code in `lib.rs`. #[no_mangle] pub extern "C" fn kmain() { if let Err(e) = main() { @@ -29,7 +27,7 @@ pub extern "C" fn kmain() { /// Called by [`kmain`]. fn main() -> Result<(), core::fmt::Error> { let mut uart0 = unsafe { virt_uart::Uart::new_uart0() }; - writeln!(uart0, "Hello, this is Rust!")?; + writeln!(uart0, "Hello, this is Rust @ {:?}", exception_level())?; for x in 1..=10 { for y in 1..=10 { let z = f64::from(x) * f64::from(y); @@ -60,29 +58,4 @@ fn panic(info: &core::panic::PanicInfo) -> ! { } } -core::arch::global_asm!( - r#" - -.section .text.startup -.global _start - -// Assumes we are in EL1 - -_start: - // Set stack pointer - ldr x30, =stack_top - mov sp, x30 - // Set FPEN bits [21:20] to 0b11 to prevent trapping. - mov x0, #3 << 20 - msr cpacr_el1, x0 - // Clear interrupt bit - msr daifclr, #0x4 - // Jump to application - bl kmain - // In case the application returns, loop forever - b . - -"# -); - // End of file diff --git a/example-code/qemu-aarch64v8a/src/bin/with_heap.rs b/example-code/qemu-aarch64v8a/src/bin/with_heap.rs new file mode 100644 index 0000000..120a2fe --- /dev/null +++ b/example-code/qemu-aarch64v8a/src/bin/with_heap.rs @@ -0,0 +1,76 @@ +//! An example program for QEMU's Aarch64 Virtual Machine +//! +//! Written by Jonathan Pallant at Ferrous Systems +//! +//! Copyright (c) Ferrous Systems, 2024 + +#![no_std] +#![no_main] + +extern crate alloc; + +use core::{fmt::Write, ptr::addr_of_mut}; +use embedded_alloc::Heap; +use qemu_aarch64v8a::{exception_level, virt_uart}; + +#[global_allocator] +static HEAP: Heap = Heap::empty(); + +/// The entry-point to the Rust application. +/// +/// It is called by the start-up code in `lib.rs`. +#[no_mangle] +pub extern "C" fn kmain() { + // Initialize the allocator BEFORE you use it + { + const HEAP_SIZE: usize = 1024; + static mut HEAP_MEM: [u8; HEAP_SIZE] = [0u8; HEAP_SIZE]; + unsafe { + let heap_start = addr_of_mut!(HEAP_MEM); + HEAP.init(heap_start as usize, HEAP_SIZE); + } + } + + if let Err(e) = main() { + panic!("main returned {:?}", e); + } +} + +/// The main function of our Rust application. +/// +/// Called by [`kmain`]. +fn main() -> Result<(), core::fmt::Error> { + let mut uart0 = unsafe { virt_uart::Uart::new_uart0() }; + writeln!(uart0, "Hello, this is Rust @ {:?}", exception_level())?; + for x in 1..=10 { + for y in 1..=10 { + let z = f64::from(x) * f64::from(y); + let msg = alloc::format!("{z:>8.2} "); + write!(uart0, "{}", msg)?; + } + writeln!(uart0)?; + } + panic!("I am a panic"); +} + +/// Called when the application raises an unrecoverable `panic!`. +/// +/// Prints the panic to the console and then exits QEMU using a semihosting +/// breakpoint. +#[panic_handler] +fn panic(info: &core::panic::PanicInfo) -> ! { + const SYS_REPORTEXC: u64 = 0x18; + let mut c = unsafe { virt_uart::Uart::new_uart0() }; + let _ = writeln!(c, "PANIC: {:?}", info); + loop { + // Exit, using semihosting + unsafe { + core::arch::asm!( + "hlt 0xf000", + in("x0") SYS_REPORTEXC + ) + } + } +} + +// End of file diff --git a/example-code/qemu-aarch64v8a/src/critical_section.rs b/example-code/qemu-aarch64v8a/src/critical_section.rs new file mode 100644 index 0000000..be1b10c --- /dev/null +++ b/example-code/qemu-aarch64v8a/src/critical_section.rs @@ -0,0 +1,36 @@ +//! Code that implements the `critical-section` traits on 64-bit Aarch64. + +struct SingleCoreCriticalSection; +critical_section::set_impl!(SingleCoreCriticalSection); + +/// Reads the CPU interrupt status bit from DAIF +/// +/// Returns true if interrupts enabled. +#[inline] +pub fn interrupts_enabled() -> bool { + const DAIF_I_BIT: u32 = 1 << 7; + let r: u32; + unsafe { + core::arch::asm!("mrs {0:x}, DAIF", out(reg) r, options(nomem, nostack, preserves_flags)) + }; + r & DAIF_I_BIT != 0 +} + +unsafe impl critical_section::Impl for SingleCoreCriticalSection { + unsafe fn acquire() -> critical_section::RawRestoreState { + let was_active = interrupts_enabled(); + // Disable interrupts by masking IRQs (leave FIQ enabled) + core::arch::asm!("msr DAIFset, #7", options(nomem, nostack, preserves_flags)); + core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst); + was_active + } + + unsafe fn release(was_active: critical_section::RawRestoreState) { + // Only re-enable interrupts if they were enabled before the critical section. + if was_active { + core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst); + // Enable interrupts by unmasking IRQs + core::arch::asm!("msr DAIFclr, #7", options(nomem, nostack, preserves_flags)); + } + } +} diff --git a/example-code/qemu-aarch64v8a/src/lib.rs b/example-code/qemu-aarch64v8a/src/lib.rs new file mode 100644 index 0000000..61d5ead --- /dev/null +++ b/example-code/qemu-aarch64v8a/src/lib.rs @@ -0,0 +1,56 @@ +#![no_std] + +pub mod critical_section; +pub mod virt_uart; + +/// An Aarch64 Exception Level +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum ExceptionLevel { + /// User code + EL0, + /// Kernel code + EL1, + /// Hypervisor code + EL2, + /// Secure Kernel code + EL3, +} + +/// Reads the CPU Exception Level from `CurrentEL` +#[inline] +pub fn exception_level() -> ExceptionLevel { + let r: u32; + unsafe { + core::arch::asm!("mrs {0:x}, CurrentEL", out(reg) r, options(nomem, nostack, preserves_flags)) + }; + match (r >> 2) & 0b11 { + 0 => ExceptionLevel::EL0, + 1 => ExceptionLevel::EL1, + 2 => ExceptionLevel::EL2, + _ => ExceptionLevel::EL3, + } +} + +core::arch::global_asm!( + r#" + +.section .text.startup +.global _start + +// Assumes we are in EL1 + +_start: + // Set stack pointer + ldr x30, =stack_top + mov sp, x30 + // Set FPEN bits [21:20] to 0b11 to prevent trapping. + mov x0, #3 << 20 + msr CPACR_EL1, x0 + // Clear interrupt mask bit to enable interrupts + msr DAIFclr, #0x7 + // Jump to application + bl kmain + // In case the application returns, loop forever + b . +"#, +); diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/.cargo-checksum.json b/example-code/qemu-aarch64v8a/vendor/critical-section/.cargo-checksum.json new file mode 100644 index 0000000..bc6171b --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"cbd8b81564f893cdb91bea255013af9dada13a0cde91aeaed0002701fb7a020e","CODE_OF_CONDUCT.md":"8e25e95078b1a582086587adf8e1d907d43aacee6a072b8630d54a6289e5e0b9","Cargo.toml":"af0123f9e52fa1482936f1143924dc22891cba334031b5a55c19bc69b804916e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"fee6bdac4b917351332567b9fd5013c2ef3c847dd113fd6216cf28e96637a157","README.md":"5e6c551ffe891fd5b9a3b6196d8961bbd5e92b1c764999d7e426a83fd23ac623","docs/msrv.md":"690a70e9ed6764198e4a5c0933a4124fc382091754d8bafb82241483ae2d89b9","src/lib.rs":"43a8804ae04c98c323e307c3f018078dd6d6ea5316f7c9bf5b8add24f5eb6000","src/mutex.rs":"4c239da3d73a13689e5c09d248ea2cb5d2c5bff4c87ba73bd405a12c02e85b66","src/std.rs":"4f04dde8d9528d7b6676fc2ea929f9f095e34c613efee23ad1c9d63329eff8ca"},"package":"7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216"} \ No newline at end of file diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/CHANGELOG.md b/example-code/qemu-aarch64v8a/vendor/critical-section/CHANGELOG.md new file mode 100644 index 0000000..afd47a3 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/CHANGELOG.md @@ -0,0 +1,137 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +No unreleased changes yet + +## [v1.1.2] - 2023-08-09 + +- Clarified that `acquire()` must provide ordering guarantees +- Updated atomic-polyfill reference to point to portable-atomic instead +- Improved documentation for `Mutex` example +- Added list of some known implementations + +## [v1.1.1] - 2022-09-13 + +- On the `std` implementation, panicking inside the `critical_section::with()` closure no longer accidentally leaves the critical section locked (#26). + +## [v1.1.0] - 2022-08-17 + +- Added built-in critical section implementation using `std::sync::Mutex`, enabled by the `std` Cargo feature. +- MSRV changed to `1.54` when `std` feature is disabled, `1.63` when enabled. + +## [v1.0.0] - 2022-08-10 + +- Improved docs. + +## [v1.0.0-alpha.2] - 2022-07-28 + +- Change name of the `extern fn`s to avoid clash with critical-section 0.2. + +## [v1.0.0-alpha.1] - 2022-07-28 + +Breaking changes: + +- Removed all builtin impls. These are going to be provided by platform-support crates now. +- Renamed `custom_impl!` to `set_impl!`. +- RestoreState is now an opaque struct for the user, and a transparent `RawRestoreState` type alias for impl writers. +- RestoreState type is now configurable with Cargo features. Default is `()`. (previously it was fixed to `u8`.) +- Added own `CriticalSection` and `Mutex` types, instead of reexporting them from `bare_metal`. + +## [v0.2.8] - 2022-11-29 + +- Implemented critical-section by forwarding to version 1.1.1 + +Breaking changes: + +- `acquire` and `release` are only implemented if the restore-state used by + version 1.1.1 is an u8 or smaller. +- No default critical-section implementation is provided. + +Those breaking changes are necessary because versions <= 0.2.7 were unsound, and that +was impossible to fix without a breaking change. + +This version is meant to minimize that breaking change. However, all +users are encouraged to upgrade to critical-section 1.1. + +If you're seeing a linker error like `undefined symbol: _critical_section_1_0_acquire`, you're affected. To fix it: + +- If your target supports `std`: Add the `critical-section` dependency to `Cargo.toml` enabling the `std` feature. + + ```toml + [dependencies] + critical-section = { version = "1.1", features = ["std"]} + ``` + +- For single-core Cortex-M targets in privileged mode: + ```toml + [dependencies] + cortex-m = { version = "0.7.6", features = ["critical-section-single-core"]} + ``` + +- For single-hart RISC-V targets in privileged mode: + ```toml + [dependencies] + riscv = { version = "0.10", features = ["critical-section-single-hart"]} + ``` + +- For other targets: check if your HAL or architecture-support crate has a `critical-section 1.0` implementation available. Otherwise, [provide your own](https://github.com/rust-embedded/critical-section#providing-an-implementation). + + +## [v0.2.7] - 2022-04-08 + +- Add support for AVR targets. + +## [v0.2.6] - 2022-04-02 + +- Improved docs. + +## [v0.2.5] - 2021-11-02 + +- Fix `std` implementation to allow reentrant (nested) critical sections. This would previously deadlock. + +## [v0.2.4] - 2021-09-24 + +- Add support for 32bit RISC-V targets. + +## [v0.2.3] - 2021-09-13 + +- Use correct `#[vcfg]` for `wasm` targets. + +## [v0.2.2] - 2021-09-13 + +- Added support for `wasm` targets. + +## [v0.2.1] - 2021-05-11 + +- Added critical section implementation for `std`, based on a global Mutex. + +## [v0.2.0] - 2021-05-10 + +- Breaking change: use `CriticalSection<'_>` instead of `&CriticalSection<'_>` + +## v0.1.0 - 2021-05-10 + +- First release + +[Unreleased]: https://github.com/rust-embedded/critical-section/compare/v1.1.2...HEAD +[v1.1.2]: https://github.com/rust-embedded/critical-section/compare/v1.1.1...v1.1.2 +[v1.1.1]: https://github.com/rust-embedded/critical-section/compare/v1.1.0...v1.1.1 +[v1.1.0]: https://github.com/rust-embedded/critical-section/compare/v1.0.0...v1.1.0 +[v1.0.0]: https://github.com/rust-embedded/critical-section/compare/v1.0.0-alpha.2...v1.0.0 +[v1.0.0-alpha.2]: https://github.com/rust-embedded/critical-section/compare/v1.0.0-alpha.1...v1.0.0-alpha.2 +[v1.0.0-alpha.1]: https://github.com/rust-embedded/critical-section/compare/v0.2.7...v1.0.0-alpha.1 +[v0.2.8]: https://github.com/rust-embedded/critical-section/compare/v0.2.7...v0.2.8 +[v0.2.7]: https://github.com/rust-embedded/critical-section/compare/v0.2.6...v0.2.7 +[v0.2.6]: https://github.com/rust-embedded/critical-section/compare/v0.2.5...v0.2.6 +[v0.2.5]: https://github.com/rust-embedded/critical-section/compare/v0.2.4...v0.2.5 +[v0.2.4]: https://github.com/rust-embedded/critical-section/compare/v0.2.3...v0.2.4 +[v0.2.3]: https://github.com/rust-embedded/critical-section/compare/v0.2.2...v0.2.3 +[v0.2.2]: https://github.com/rust-embedded/critical-section/compare/v0.2.1...v0.2.2 +[v0.2.1]: https://github.com/rust-embedded/critical-section/compare/v0.2.0...v0.2.1 +[v0.2.0]: https://github.com/rust-embedded/critical-section/compare/v0.1.0...v0.2.0 diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/CODE_OF_CONDUCT.md b/example-code/qemu-aarch64v8a/vendor/critical-section/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..bcefc6a --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/CODE_OF_CONDUCT.md @@ -0,0 +1,37 @@ +# The Rust Code of Conduct + +## Conduct + +**Contact**: [HAL team][team] + +* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. +* On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. +* Please be kind and courteous. There's no need to be mean or rude. +* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. +* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. +* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. +* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [HAL team][team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. +* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome. + +## Moderation + +These are the policies for upholding our community's standards of conduct. + +1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) +2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed. +3. Moderators will first respond to such remarks with a warning. +4. If the warning is unheeded, the user will be "kicked," i.e., kicked out of the communication channel to cool off. +5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded. +6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology. +7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed. +8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others. + +In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely. + +And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust. + +The enforcement policies listed above apply to all official embedded WG venues; including official IRC channels (#rust-embedded); GitHub repositories under rust-embedded; and all forums under rust-embedded.org (forum.rust-embedded.org). + +*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).* + +[team]: https://github.com/rust-embedded/wg#the-hal-team diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/Cargo.toml b/example-code/qemu-aarch64v8a/vendor/critical-section/Cargo.toml new file mode 100644 index 0000000..1108b05 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/Cargo.toml @@ -0,0 +1,33 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "critical-section" +version = "1.1.2" +description = "Cross-platform critical section" +readme = "README.md" +categories = [ + "embedded", + "no-std", + "concurrency", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-embedded/critical-section" + +[features] +restore-state-bool = [] +restore-state-none = [] +restore-state-u16 = [] +restore-state-u32 = [] +restore-state-u64 = [] +restore-state-u8 = [] +std = ["restore-state-bool"] diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/LICENSE-APACHE b/example-code/qemu-aarch64v8a/vendor/critical-section/LICENSE-APACHE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/LICENSE-MIT b/example-code/qemu-aarch64v8a/vendor/critical-section/LICENSE-MIT new file mode 100644 index 0000000..78bced9 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2022 The critical-section authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/README.md b/example-code/qemu-aarch64v8a/vendor/critical-section/README.md new file mode 100644 index 0000000..32f1678 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/README.md @@ -0,0 +1,250 @@ +# critical-section +[![crates.io](https://img.shields.io/crates/d/critical-section.svg)](https://crates.io/crates/critical-section) +[![crates.io](https://img.shields.io/crates/v/critical-section.svg)](https://crates.io/crates/critical-section) +[![Documentation](https://docs.rs/critical-section/badge.svg)](https://docs.rs/critical-section) + +This project is developed and maintained by the [HAL team][team]. + +A critical section that works everywhere! + +When writing software for embedded systems, it's common to use a "critical section" +as a basic primitive to control concurrency. A critical section is essentially a +mutex global to the whole process, that can be acquired by only one thread at a time. +This can be used to protect data behind mutexes, to [emulate atomics](https://crates.io/crates/portable-atomic) in +targets that don't support them, etc. + +There's a wide range of possible implementations depending on the execution environment: +- For bare-metal single core, disabling interrupts in the current (only) core. +- For bare-metal multicore, disabling interrupts in the current core and acquiring a hardware spinlock to prevent other cores from entering a critical section concurrently. +- For bare-metal using a RTOS, using library functions for acquiring a critical section, often named "scheduler lock" or "kernel lock". +- For bare-metal running in non-privileged mode, calling some system call is usually needed. +- For `std` targets, acquiring a global `std::sync::Mutex`. + +Libraries often need to use critical sections, but there's no universal API for this in `core`. This leads +library authors to hard-code them for their target, or at best add some `cfg`s to support a few targets. +This doesn't scale since there are many targets out there, and in the general case it's impossible to know +which critical section implementation is needed from the Rust target alone. For example, the `thumbv7em-none-eabi` target +could be cases 1-4 from the above list. + +This crate solves the problem by providing this missing universal API. + +- It provides functions `acquire`, `release` and `with` that libraries can directly use. +- It provides a way for any crate to supply an implementation. This allows "target support" crates such as architecture crates (`cortex-m`, `riscv`), RTOS bindings, or HALs for multicore chips to supply the correct implementation so that all the crates in the dependency tree automatically use it. + +## Usage in `no-std` binaries. + +First, add a dependency on a crate providing a critical section implementation. Enable the `critical-section-*` Cargo feature if required by the crate. + +Implementations are typically provided by either architecture-support crates, HAL crates, and OS/RTOS bindings, including: + +* The [`cortex-m`] crate provides an implementation for all single-core Cortex-M microcontrollers via its `critical-section-single-core` feature +* The [`riscv`] crate provides an implementation for all single-hart RISC-V microcontrollers via its `critical-section-single-hart` feature +* The [`msp430`] crate provides an implementation for all MSP430 microcontrollers via its `critical-section-single-core` feature +* The [`rp2040-hal`] crate provides a multi-core-safe critical section for the RP2040 microcontroller via its `critical-section-impl` feature +* The [`avr-device`] crate provides an implementation for all AVR microcontrollers via its `critical-section-impl` feature +* The [`esp-hal-common`] crate provides an implementation for ESP32 microcontrollers which is used by the ESP HALs +* The [`embassy-rp`] crate provides a multi-core-safe critical section for the RP2040 microcontroller via its `critical-section-impl` feature +* The [`nrf-softdevice`] crate provides a critical section that's compatible with the nRF soft-device firmware via its `critical-section-impl` feature + +[`cortex-m`]: https://crates.io/crates/cortex-m +[`riscv`]: https://crates.io/crates/riscv +[`msp430`]: https://crates.io/crates/msp430 +[`rp2040-hal`]: https://crates.io/crates/rp2040-hal +[`avr-device`]: https://crates.io/crates/avr-device +[`esp-hal-common`]: https://crates.io/crates/esp-hal-common +[`embassy-rp`]: https://docs.embassy.dev/embassy-rp +[`nrf-softdevice`]: https://docs.embassy.dev/nrf-softdevice + +For example, for single-core Cortex-M targets, you can use: + +```toml +[dependencies] +cortex-m = { version = "0.7.6", features = ["critical-section-single-core"]} +``` + +Then you can use `critical_section::with()`. + +```rust +use core::cell::Cell; +use critical_section::Mutex; + +static MY_VALUE: Mutex> = Mutex::new(Cell::new(0)); + +critical_section::with(|cs| { + // This code runs within a critical section. + + // `cs` is a token that you can use to "prove" that to some API, + // for example to a `Mutex`: + MY_VALUE.borrow(cs).set(42); +}); + +# #[cfg(not(feature = "std"))] // needed for `cargo test --features std` +# mod no_std { +# struct MyCriticalSection; +# critical_section::set_impl!(MyCriticalSection); +# unsafe impl critical_section::Impl for MyCriticalSection { +# unsafe fn acquire() -> () {} +# unsafe fn release(token: ()) {} +# } +# } +``` + +## Usage in `std` binaries. + +Add the `critical-section` dependency to `Cargo.toml` enabling the `std` feature. This makes the `critical-section` crate itself +provide an implementation based on `std::sync::Mutex`, so you don't have to add any other dependency. + +```toml +[dependencies] +critical-section = { version = "1.1", features = ["std"]} +``` + +## Usage in libraries + +If you're writing a library intended to be portable across many targets, simply add a dependency on `critical-section` +and use `critical_section::free` and/or `Mutex` as usual. + +**Do not** add any dependency supplying a critical section implementation. Do not enable any `critical-section-*` Cargo feature. +This has to be done by the end user, enabling the correct implementation for their target. + +**Do not** enable any Cargo feature in `critical-section`. + +## Usage in `std` tests for `no-std` libraries. + +If you want to run `std`-using tests in otherwise `no-std` libraries, enable the `std` feature in `dev-dependencies` only. +This way the main target will use the `no-std` implementation chosen by the end-user's binary, and only the test targets +will use the `std` implementation. + +```toml +[dependencies] +critical-section = "1.1" + +[dev-dependencies] +critical-section = { version = "1.1", features = ["std"]} +``` + +## Providing an implementation + +Crates adding support for a particular architecture, chip or operating system should provide a critical section implementation. +It is **strongly recommended** to gate the implementation behind a feature, so the user can still use another implementation +if needed (having two implementations in the same binary will cause linking to fail). + +Add the dependency, and a `critical-section-*` feature to your `Cargo.toml`: + +```toml +[features] +# Enable critical section implementation that does "foo" +critical-section-foo = ["critical-section/restore-state-bool"] + +[dependencies] +critical-section = { version = "1.0", optional = true } +``` + +Then, provide the critical implementation like this: + +```rust +# #[cfg(not(feature = "std"))] // needed for `cargo test --features std` +# mod no_std { +// This is a type alias for the enabled `restore-state-*` feature. +// For example, it is `bool` if you enable `restore-state-bool`. +use critical_section::RawRestoreState; + +struct MyCriticalSection; +critical_section::set_impl!(MyCriticalSection); + +unsafe impl critical_section::Impl for MyCriticalSection { + unsafe fn acquire() -> RawRestoreState { + // TODO + } + + unsafe fn release(token: RawRestoreState) { + // TODO + } +} +# } +``` + +## Troubleshooting + +### Undefined reference errors + +If you get an error like these: + +```not_rust +undefined reference to `_critical_section_1_0_acquire' +undefined reference to `_critical_section_1_0_release' +``` + +it is because you (or a library) are using `critical_section::with` without providing a critical section implementation. +Make sure you're depending on a crate providing the implementation, and have enabled the `critical-section-*` feature in it if required. See the `Usage` section above. + +The error can also be caused by having the dependency but never `use`ing it. This can be fixed by adding a dummy `use`: + +```rust,ignore +use the_cs_impl_crate as _; +``` + +### Duplicate symbol errors + +If you get errors like these: + +```not_rust +error: symbol `_critical_section_1_0_acquire` is already defined +``` + +it is because you have two crates trying to provide a critical section implementation. You can only +have one implementation in a program. + +You can use `cargo tree --format '{p} {f}'` to view all dependencies and their enabled features. Make sure +that in the whole dependency tree, exactly one implementation is provided. + +Check for multiple versions of the same crate as well. For example, check the `critical-section-single-core` +feature is not enabled for both `cortex-m` 0.7 and 0.8. + +## Why not generics? + +An alternative solution would be to use a `CriticalSection` trait, and make all +code that needs acquiring the critical section generic over it. This has a few problems: + +- It would require passing it as a generic param to a very big amount of code, which +would be quite unergonomic. +- It's common to put `Mutex`es in `static` variables, and `static`s can't +be generic. +- It would allow mixing different critical section implementations in the same program, +which would be unsound. + +## Minimum Supported Rust Version (MSRV) + +This crate is guaranteed to compile on the following Rust versions: + +- If the `std` feature is not enabled: stable Rust 1.54 and up. +- If the `std` feature is enabled: stable Rust 1.63 and up. + +It might compile with older versions but that may change in any new patch release. + +See [here](docs/msrv.md) for details on how the MSRV may be upgraded. + +## License + +This work is licensed under either of + +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or + ) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or ) + +at your option. + +## Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. + +## Code of Conduct + +Contribution to this crate is organized under the terms of the [Rust Code of +Conduct][CoC], the maintainer of this crate, the [HAL team][team], promises +to intervene to uphold that code of conduct. + +[CoC]: CODE_OF_CONDUCT.md +[team]: https://github.com/rust-embedded/wg#the-hal-team diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/docs/msrv.md b/example-code/qemu-aarch64v8a/vendor/critical-section/docs/msrv.md new file mode 100644 index 0000000..ec0056a --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/docs/msrv.md @@ -0,0 +1,30 @@ +# Minimum Supported Rust Version (MSRV) + +This crate is guaranteed to compile on all stable Rust versions going back to +the version stated as MSRV in the README. It *might* compile with even older versions but +that may change in any new patch release. + +## How the MSRV will be upgraded + +For `critical-section`, we do not consider upgrading the MSRV a strictly breaking change as defined by +[SemVer](https://semver.org). + +We follow these rules when upgrading it: + +- We will not update the MSRV on any patch release: \_.\_.*Z*. +- We may upgrade the MSRV on any *major* or *minor* release: *X*.*Y*.\_. +- We may upgrade the MSRV in any preliminary version release (e.g. an `-alpha` release) as + these serve as preparation for the final release. +- MSRV upgrades will be clearly stated in the changelog. + +This applies both to `0._._` releases as well as `>=1._._` releases. + +For example: + +For a given `x.y.z` release, we may upgrade the MSRV on `x` and `y` releases but not on `z` releases. + +If your MSRV upgrade policy differs from this, you are advised to specify the +`critical-section` dependency in your `Cargo.toml` accordingly. + +See the [Rust Embedded Working Group MSRV RFC](https://github.com/rust-embedded/wg/blob/master/rfcs/0523-msrv-2020.md) +for more background information and reasoning. diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/src/lib.rs b/example-code/qemu-aarch64v8a/vendor/critical-section/src/lib.rs new file mode 100644 index 0000000..6b26725 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/src/lib.rs @@ -0,0 +1,289 @@ +#![cfg_attr(not(feature = "std"), no_std)] +#![doc = include_str!("../README.md")] + +mod mutex; +#[cfg(feature = "std")] +mod std; + +use core::marker::PhantomData; + +pub use self::mutex::Mutex; + +/// Critical section token. +/// +/// An instance of this type indicates that the current thread is executing code within a critical +/// section. +#[derive(Clone, Copy, Debug)] +pub struct CriticalSection<'cs> { + _private: PhantomData<&'cs ()>, +} + +impl<'cs> CriticalSection<'cs> { + /// Creates a critical section token. + /// + /// This method is meant to be used to create safe abstractions rather than being directly used + /// in applications. + /// + /// # Safety + /// + /// This must only be called when the current thread is in a critical section. The caller must + /// ensure that the returned instance will not live beyond the end of the critical section. + /// + /// The caller must use adequate fences to prevent the compiler from moving the + /// instructions inside the critical section to the outside of it. Sequentially consistent fences are + /// suggested immediately after entry and immediately before exit from the critical section. + /// + /// Note that the lifetime `'cs` of the returned instance is unconstrained. User code must not + /// be able to influence the lifetime picked for this type, since that might cause it to be + /// inferred to `'static`. + #[inline(always)] + pub unsafe fn new() -> Self { + CriticalSection { + _private: PhantomData, + } + } +} + +#[cfg(any( + all(feature = "restore-state-none", feature = "restore-state-bool"), + all(feature = "restore-state-none", feature = "restore-state-u8"), + all(feature = "restore-state-none", feature = "restore-state-u16"), + all(feature = "restore-state-none", feature = "restore-state-u32"), + all(feature = "restore-state-none", feature = "restore-state-u64"), + all(feature = "restore-state-bool", feature = "restore-state-u8"), + all(feature = "restore-state-bool", feature = "restore-state-u16"), + all(feature = "restore-state-bool", feature = "restore-state-u32"), + all(feature = "restore-state-bool", feature = "restore-state-u64"), + all(feature = "restore-state-u8", feature = "restore-state-u16"), + all(feature = "restore-state-u8", feature = "restore-state-u32"), + all(feature = "restore-state-u8", feature = "restore-state-u64"), + all(feature = "restore-state-u16", feature = "restore-state-u32"), + all(feature = "restore-state-u16", feature = "restore-state-u64"), + all(feature = "restore-state-u32", feature = "restore-state-u64"), +))] +compile_error!("You must set at most one of these Cargo features: restore-state-none, restore-state-bool, restore-state-u8, restore-state-u16, restore-state-u32, restore-state-u64"); + +#[cfg(not(any( + feature = "restore-state-bool", + feature = "restore-state-u8", + feature = "restore-state-u16", + feature = "restore-state-u32", + feature = "restore-state-u64" +)))] +type RawRestoreStateInner = (); + +#[cfg(feature = "restore-state-bool")] +type RawRestoreStateInner = bool; + +#[cfg(feature = "restore-state-u8")] +type RawRestoreStateInner = u8; + +#[cfg(feature = "restore-state-u16")] +type RawRestoreStateInner = u16; + +#[cfg(feature = "restore-state-u32")] +type RawRestoreStateInner = u32; + +#[cfg(feature = "restore-state-u64")] +type RawRestoreStateInner = u64; + +// We have RawRestoreStateInner and RawRestoreState so that we don't have to copypaste the docs 5 times. +// In the docs this shows as `pub type RawRestoreState = u8` or whatever the selected type is, because +// the "inner" type alias is private. + +/// Raw, transparent "restore state". +/// +/// This type changes based on which Cargo feature is selected, out of +/// - `restore-state-none` (default, makes the type be `()`) +/// - `restore-state-bool` +/// - `restore-state-u8` +/// - `restore-state-u16` +/// - `restore-state-u32` +/// - `restore-state-u64` +/// +/// See [`RestoreState`]. +/// +/// User code uses [`RestoreState`] opaquely, critical section implementations +/// use [`RawRestoreState`] so that they can use the inner value. +pub type RawRestoreState = RawRestoreStateInner; + +/// Opaque "restore state". +/// +/// Implementations use this to "carry over" information between acquiring and releasing +/// a critical section. For example, when nesting two critical sections of an +/// implementation that disables interrupts globally, acquiring the inner one won't disable +/// the interrupts since they're already disabled. The impl would use the restore state to "tell" +/// the corresponding release that it does *not* have to reenable interrupts yet, only the +/// outer release should do so. +/// +/// User code uses [`RestoreState`] opaquely, critical section implementations +/// use [`RawRestoreState`] so that they can use the inner value. +#[derive(Clone, Copy, Debug)] +pub struct RestoreState(RawRestoreState); + +impl RestoreState { + /// Create an invalid, dummy `RestoreState`. + /// + /// This can be useful to avoid `Option` when storing a `RestoreState` in a + /// struct field, or a `static`. + /// + /// Note that due to the safety contract of [`acquire`]/[`release`], you must not pass + /// a `RestoreState` obtained from this method to [`release`]. + pub const fn invalid() -> Self { + #[cfg(not(any( + feature = "restore-state-bool", + feature = "restore-state-u8", + feature = "restore-state-u16", + feature = "restore-state-u32", + feature = "restore-state-u64" + )))] + return Self(()); + + #[cfg(feature = "restore-state-bool")] + return Self(false); + + #[cfg(feature = "restore-state-u8")] + return Self(0); + + #[cfg(feature = "restore-state-u16")] + return Self(0); + + #[cfg(feature = "restore-state-u32")] + return Self(0); + + #[cfg(feature = "restore-state-u64")] + return Self(0); + } +} + +/// Acquire a critical section in the current thread. +/// +/// This function is extremely low level. Strongly prefer using [`with`] instead. +/// +/// Nesting critical sections is allowed. The inner critical sections +/// are mostly no-ops since they're already protected by the outer one. +/// +/// # Safety +/// +/// - Each `acquire` call must be paired with exactly one `release` call in the same thread. +/// - `acquire` returns a "restore state" that you must pass to the corresponding `release` call. +/// - `acquire`/`release` pairs must be "properly nested", ie it's not OK to do `a=acquire(); b=acquire(); release(a); release(b);`. +/// - It is UB to call `release` if the critical section is not acquired in the current thread. +/// - It is UB to call `release` with a "restore state" that does not come from the corresponding `acquire` call. +/// - It must provide ordering guarantees at least equivalent to a [`core::sync::atomic::Ordering::Acquire`] +/// on a memory location shared by all critical sections, on which the `release` call will do a +/// [`core::sync::atomic::Ordering::Release`] operation. +#[inline(always)] +pub unsafe fn acquire() -> RestoreState { + extern "Rust" { + fn _critical_section_1_0_acquire() -> RawRestoreState; + } + + #[allow(clippy::unit_arg)] + RestoreState(_critical_section_1_0_acquire()) +} + +/// Release the critical section. +/// +/// This function is extremely low level. Strongly prefer using [`with`] instead. +/// +/// # Safety +/// +/// See [`acquire`] for the safety contract description. +#[inline(always)] +pub unsafe fn release(restore_state: RestoreState) { + extern "Rust" { + fn _critical_section_1_0_release(restore_state: RawRestoreState); + } + + #[allow(clippy::unit_arg)] + _critical_section_1_0_release(restore_state.0) +} + +/// Execute closure `f` in a critical section. +/// +/// Nesting critical sections is allowed. The inner critical sections +/// are mostly no-ops since they're already protected by the outer one. +/// +/// # Panics +/// +/// This function panics if the given closure `f` panics. In this case +/// the critical section is released before unwinding. +#[inline] +pub fn with(f: impl FnOnce(CriticalSection) -> R) -> R { + // Helper for making sure `release` is called even if `f` panics. + struct Guard { + state: RestoreState, + } + + impl Drop for Guard { + #[inline(always)] + fn drop(&mut self) { + unsafe { release(self.state) } + } + } + + let state = unsafe { acquire() }; + let _guard = Guard { state }; + + unsafe { f(CriticalSection::new()) } +} + +/// Methods required for a critical section implementation. +/// +/// This trait is not intended to be used except when implementing a critical section. +/// +/// # Safety +/// +/// Implementations must uphold the contract specified in [`crate::acquire`] and [`crate::release`]. +pub unsafe trait Impl { + /// Acquire the critical section. + /// + /// # Safety + /// + /// Callers must uphold the contract specified in [`crate::acquire`] and [`crate::release`]. + unsafe fn acquire() -> RawRestoreState; + + /// Release the critical section. + /// + /// # Safety + /// + /// Callers must uphold the contract specified in [`crate::acquire`] and [`crate::release`]. + unsafe fn release(restore_state: RawRestoreState); +} + +/// Set the critical section implementation. +/// +/// # Example +/// +/// ``` +/// # #[cfg(not(feature = "std"))] // needed for `cargo test --features std` +/// # mod no_std { +/// use critical_section::RawRestoreState; +/// +/// struct MyCriticalSection; +/// critical_section::set_impl!(MyCriticalSection); +/// +/// unsafe impl critical_section::Impl for MyCriticalSection { +/// unsafe fn acquire() -> RawRestoreState { +/// // ... +/// } +/// +/// unsafe fn release(restore_state: RawRestoreState) { +/// // ... +/// } +/// } +/// # } +#[macro_export] +macro_rules! set_impl { + ($t: ty) => { + #[no_mangle] + unsafe fn _critical_section_1_0_acquire() -> $crate::RawRestoreState { + <$t as $crate::Impl>::acquire() + } + #[no_mangle] + unsafe fn _critical_section_1_0_release(restore_state: $crate::RawRestoreState) { + <$t as $crate::Impl>::release(restore_state) + } + }; +} diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/src/mutex.rs b/example-code/qemu-aarch64v8a/vendor/critical-section/src/mutex.rs new file mode 100644 index 0000000..c9ea6ff --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/src/mutex.rs @@ -0,0 +1,200 @@ +use super::CriticalSection; +use core::cell::{Ref, RefCell, RefMut, UnsafeCell}; + +/// A mutex based on critical sections. +/// +/// # Example +/// +/// ```no_run +/// # use critical_section::Mutex; +/// # use std::cell::Cell; +/// +/// static FOO: Mutex> = Mutex::new(Cell::new(42)); +/// +/// fn main() { +/// critical_section::with(|cs| { +/// FOO.borrow(cs).set(43); +/// }); +/// } +/// +/// fn interrupt_handler() { +/// let _x = critical_section::with(|cs| FOO.borrow(cs).get()); +/// } +/// ``` +/// +/// +/// # Design +/// +/// [`std::sync::Mutex`] has two purposes. It converts types that are [`Send`] +/// but not [`Sync`] into types that are both; and it provides +/// [interior mutability]. `critical_section::Mutex`, on the other hand, only adds +/// `Sync`. It does *not* provide interior mutability. +/// +/// This was a conscious design choice. It is possible to create multiple +/// [`CriticalSection`] tokens, either by nesting critical sections or `Copy`ing +/// an existing token. As a result, it would not be sound for [`Mutex::borrow`] +/// to return `&mut T`, because there would be nothing to prevent calling +/// `borrow` multiple times to create aliased `&mut T` references. +/// +/// The solution is to include a runtime check to ensure that each resource is +/// borrowed only once. This is what `std::sync::Mutex` does. However, this is +/// a runtime cost that may not be required in all circumstances. For instance, +/// `Mutex>` never needs to create `&mut T` or equivalent. +/// +/// If `&mut T` is needed, the simplest solution is to use `Mutex>`, +/// which is the closest analogy to `std::sync::Mutex`. [`RefCell`] inserts the +/// exact runtime check necessary to guarantee that the `&mut T` reference is +/// unique. +/// +/// To reduce verbosity when using `Mutex>`, we reimplement some of +/// `RefCell`'s methods on it directly. +/// +/// ```no_run +/// # use critical_section::Mutex; +/// # use std::cell::RefCell; +/// +/// static FOO: Mutex> = Mutex::new(RefCell::new(42)); +/// +/// fn main() { +/// critical_section::with(|cs| { +/// // Instead of calling this +/// let _ = FOO.borrow(cs).take(); +/// // Call this +/// let _ = FOO.take(cs); +/// // `RefCell::borrow` and `RefCell::borrow_mut` are renamed to +/// // `borrow_ref` and `borrow_ref_mut` to avoid name collisions +/// let _: &mut i32 = &mut *FOO.borrow_ref_mut(cs); +/// }) +/// } +/// ``` +/// +/// [`std::sync::Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html +/// [interior mutability]: https://doc.rust-lang.org/reference/interior-mutability.html +#[derive(Debug)] +pub struct Mutex { + inner: UnsafeCell, +} + +impl Mutex { + /// Creates a new mutex. + #[inline] + pub const fn new(value: T) -> Self { + Mutex { + inner: UnsafeCell::new(value), + } + } + + /// Gets a mutable reference to the contained value when the mutex is already uniquely borrowed. + /// + /// This does not require locking or a critical section since it takes `&mut self`, which + /// guarantees unique ownership already. Care must be taken when using this method to + /// **unsafely** access `static mut` variables, appropriate fences must be used to prevent + /// unwanted optimizations. + #[inline] + pub fn get_mut(&mut self) -> &mut T { + unsafe { &mut *self.inner.get() } + } + + /// Unwraps the contained value, consuming the mutex. + #[inline] + pub fn into_inner(self) -> T { + self.inner.into_inner() + } + + /// Borrows the data for the duration of the critical section. + #[inline] + pub fn borrow<'cs>(&'cs self, _cs: CriticalSection<'cs>) -> &'cs T { + unsafe { &*self.inner.get() } + } +} + +impl Mutex> { + /// Borrow the data and call [`RefCell::replace`] + /// + /// This is equivalent to `self.borrow(cs).replace(t)` + /// + /// # Panics + /// + /// This call could panic. See the documentation for [`RefCell::replace`] + /// for more details. + #[inline] + #[track_caller] + pub fn replace<'cs>(&'cs self, cs: CriticalSection<'cs>, t: T) -> T { + self.borrow(cs).replace(t) + } + + /// Borrow the data and call [`RefCell::replace_with`] + /// + /// This is equivalent to `self.borrow(cs).replace_with(f)` + /// + /// # Panics + /// + /// This call could panic. See the documentation for + /// [`RefCell::replace_with`] for more details. + #[inline] + #[track_caller] + pub fn replace_with<'cs, F>(&'cs self, cs: CriticalSection<'cs>, f: F) -> T + where + F: FnOnce(&mut T) -> T, + { + self.borrow(cs).replace_with(f) + } + + /// Borrow the data and call [`RefCell::borrow`] + /// + /// This is equivalent to `self.borrow(cs).borrow()` + /// + /// # Panics + /// + /// This call could panic. See the documentation for [`RefCell::borrow`] + /// for more details. + #[inline] + #[track_caller] + pub fn borrow_ref<'cs>(&'cs self, cs: CriticalSection<'cs>) -> Ref<'cs, T> { + self.borrow(cs).borrow() + } + + /// Borrow the data and call [`RefCell::borrow_mut`] + /// + /// This is equivalent to `self.borrow(cs).borrow_mut()` + /// + /// # Panics + /// + /// This call could panic. See the documentation for [`RefCell::borrow_mut`] + /// for more details. + #[inline] + #[track_caller] + pub fn borrow_ref_mut<'cs>(&'cs self, cs: CriticalSection<'cs>) -> RefMut<'cs, T> { + self.borrow(cs).borrow_mut() + } +} + +impl Mutex> { + /// Borrow the data and call [`RefCell::take`] + /// + /// This is equivalent to `self.borrow(cs).take()` + /// + /// # Panics + /// + /// This call could panic. See the documentation for [`RefCell::take`] + /// for more details. + #[inline] + #[track_caller] + pub fn take<'cs>(&'cs self, cs: CriticalSection<'cs>) -> T { + self.borrow(cs).take() + } +} + +// NOTE A `Mutex` can be used as a channel so the protected data must be `Send` +// to prevent sending non-Sendable stuff (e.g. access tokens) across different +// threads. +unsafe impl Sync for Mutex where T: Send {} + +/// ``` compile_fail +/// fn bad(cs: critical_section::CriticalSection) -> &u32 { +/// let x = critical_section::Mutex::new(42u32); +/// x.borrow(cs) +/// } +/// ``` +#[cfg(doctest)] +const BorrowMustNotOutliveMutexTest: () = (); diff --git a/example-code/qemu-aarch64v8a/vendor/critical-section/src/std.rs b/example-code/qemu-aarch64v8a/vendor/critical-section/src/std.rs new file mode 100644 index 0000000..40df429 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/critical-section/src/std.rs @@ -0,0 +1,80 @@ +use std::cell::Cell; +use std::mem::MaybeUninit; +use std::sync::{Mutex, MutexGuard}; + +static GLOBAL_MUTEX: Mutex<()> = Mutex::new(()); + +// This is initialized if a thread has acquired the CS, uninitialized otherwise. +static mut GLOBAL_GUARD: MaybeUninit> = MaybeUninit::uninit(); + +std::thread_local!(static IS_LOCKED: Cell = Cell::new(false)); + +struct StdCriticalSection; +crate::set_impl!(StdCriticalSection); + +unsafe impl crate::Impl for StdCriticalSection { + unsafe fn acquire() -> bool { + // Allow reentrancy by checking thread local state + IS_LOCKED.with(|l| { + if l.get() { + // CS already acquired in the current thread. + return true; + } + + // Note: it is fine to set this flag *before* acquiring the mutex because it's thread local. + // No other thread can see its value, there's no potential for races. + // This way, we hold the mutex for slightly less time. + l.set(true); + + // Not acquired in the current thread, acquire it. + let guard = match GLOBAL_MUTEX.lock() { + Ok(guard) => guard, + Err(err) => { + // Ignore poison on the global mutex in case a panic occurred + // while the mutex was held. + err.into_inner() + } + }; + GLOBAL_GUARD.write(guard); + + false + }) + } + + unsafe fn release(nested_cs: bool) { + if !nested_cs { + // SAFETY: As per the acquire/release safety contract, release can only be called + // if the critical section is acquired in the current thread, + // in which case we know the GLOBAL_GUARD is initialized. + GLOBAL_GUARD.assume_init_drop(); + + // Note: it is fine to clear this flag *after* releasing the mutex because it's thread local. + // No other thread can see its value, there's no potential for races. + // This way, we hold the mutex for slightly less time. + IS_LOCKED.with(|l| l.set(false)); + } + } +} + +#[cfg(test)] +mod tests { + use std::thread; + + use crate as critical_section; + + #[cfg(feature = "std")] + #[test] + #[should_panic(expected = "Not a PoisonError!")] + fn reusable_after_panic() { + let _ = thread::spawn(|| { + critical_section::with(|_| { + panic!("Boom!"); + }) + }) + .join(); + + critical_section::with(|_| { + panic!("Not a PoisonError!"); + }) + } +} diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/.cargo-checksum.json b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/.cargo-checksum.json new file mode 100644 index 0000000..fbedf4c --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"a1651e7a8c9cfda6f35d5ec2be828184500ec356f1ab385f504fa8e23fe3f259","CODE_OF_CONDUCT.md":"fba7846e321b6ac7f74932cbf5831d89a7116c71763f1b263ba1c49ac7c2f382","Cargo.lock":"3acb6c437c9ccded007b5d311f5742e51f3a0b699adfa2825f67512f08e0742e","Cargo.toml":"7f0a60553a1853dc3f95e125ef99e88d8f43a8f87e9351579b35ee0c0d71e89e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"d4560bb506ccd1f67d6aa8560ab331a16af512f4571abc9a4889681bf289e5c8","README.md":"16f095bdd08f2f672f6ae05601afb05048353f958ea730346671f4d7e0ca4de6","examples/allocator_api.rs":"67693b7ee69ca735f8ff4954b1007fef87a91bd6f9c1cd757ec5936457b8169c","examples/global_alloc.rs":"13408e68ab8674c18400f28b852d8800928b787ad2e9b4bc32cacbed1b8d3303","examples/integration_test.rs":"faafc89a02ffc068cbdafd831ca47ec1bbbc638b18c373f3762f3afbacb62cbf","memory.x":"84c6db55a88fed665966dc22708590c6f5eb826e5ddc7349cbb17eb043ec615c","src/lib.rs":"0853df773271dc5474221821418e71125a9cb67864ccc037cd6e3f419df4efef","triagebot.toml":"a135e10c777cd13459559bdf74fb704c1379af7c9b0f70bc49fa6f5a837daa81"},"package":"ddae17915accbac2cfbc64ea0ae6e3b330e6ea124ba108dada63646fd3c6f815"} \ No newline at end of file diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/CHANGELOG.md b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/CHANGELOG.md new file mode 100644 index 0000000..2c8bb34 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/CHANGELOG.md @@ -0,0 +1,138 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/) +and this project adheres to [Semantic Versioning](http://semver.org/). + +## [v0.5.1] - 2023-11-04 + +### Added + +- Implemented [`Allocator`] for `Heap` with the `allocator_api` crate feature. + This feature requires a nightly toolchain for the unstable [`allocator_api`] + compiler feature. + +[`Allocator`]: https://doc.rust-lang.org/core/alloc/trait.Allocator.html +[`allocator_api`]: https://doc.rust-lang.org/beta/unstable-book/library-features/allocator-api.html + +### Changed + +- Updated `linked_list_allocator` dependency to 0.10.5, which allows + compiling with stable rust. + +## [v0.5.0] - 2022-12-06 + +### Changed + +- Renamed crate from `alloc-cortex-m` to `embedded-alloc`. +- Renamed `CortexMHeap` to `Heap`. +- Use `critical-section` to lock the heap, instead of `cortex_m::interrupt::free()`. + This allows using this crate on non-Cortex-M systems, or on + Cortex-M systems that require a custom critical section implementation. + +## [v0.4.3] - 2022-11-03 + +### Changed + +- Updated `linked_list_allocator` dependency to 0.10.4, which fixes + CVE-2022-36086/RUSTSEC-2022-0063. + +## [v0.4.2] - 2022-01-04 + +### Changed + +- Updated `cortex-m` dependency to 0.7.2. + +## [v0.4.1] - 2021-01-02 + +### Added + +- `const_mut_refs` feature to the dependency `linked_list_allocator` crate. + +### Changed + +- Bumped the dependency of the `linked_list_allocator` crate to v0.8.11. + +## [v0.4.0] - 2020-06-05 + +- Bumped the `cortex-m` dependency to v0.6.2. +- Bumped the dependency of the `linked_list_allocator` crate to v0.8.1. +- Removed `#![feature(alloc)]` to supress compiler warning about stability for alloc. + +## [v0.3.5] - 2018-06-19 + +### Fixed + +- To work with recent nightly + +## [v0.3.4] - 2018-05-12 + +### Changed + +- Update the example in the crate level documentation to show how to define the new `oom` lang item. + +## [v0.3.3] - 2018-04-23 + +- Bumped the dependency of the `linked_list_allocator` crate to v0.6.0. + +## [v0.3.2] - 2018-02-26 + +### Changed + +- Bumped the dependency of the `linked_list_allocator` crate to v0.5.0. + +## [v0.3.1] - 2017-10-07 + +### Fixed + +- The example in the documentation. + +## [v0.3.0] - 2017-10-07 + +### Changed + +- [breaking-change] Switched to the new allocator system. See documentation for details. + +## [v0.2.2] - 2017-04-29 + +### Added + +- a `__rust_allocate_zeroed` symbol as it's needed on recent nightlies. + +## [v0.2.1] - 2016-11-27 + +### Fixed + +- The heap size is `end_addr` - `start_addr`. Previously, it was wrongly + calculated as `end_addr - start_addr - 1`. + +## [v0.2.0] - 2016-11-19 + +### Changed + +- [breaking-change] Hid the HEAP variable. We now only expose an `init` function to + initialize the allocator. + +## v0.1.0 - 2016-11-19 + +### Added + +- Initial version of the allocator + +[Unreleased]: https://github.com/rust-embedded/embedded-alloc/compare/v0.5.1...HEAD +[v0.5.1]: https://github.com/rust-embedded/embedded-alloc/compare/v0.5.0...v0.5.1 +[v0.5.0]: https://github.com/rust-embedded/embedded-alloc/compare/v0.4.3...v0.5.0 +[v0.4.3]: https://github.com/rust-embedded/embedded-alloc/compare/v0.4.2...v0.4.3 +[v0.4.2]: https://github.com/rust-embedded/embedded-alloc/compare/v0.4.1...v0.4.2 +[v0.4.1]: https://github.com/rust-embedded/embedded-alloc/compare/v0.4.0...v0.4.1 +[v0.4.0]: https://github.com/rust-embedded/embedded-alloc/compare/v0.3.5...v0.4.0 +[v0.3.5]: https://github.com/rust-embedded/embedded-alloc/compare/v0.3.4...v0.3.5 +[v0.3.4]: https://github.com/rust-embedded/embedded-alloc/compare/v0.3.3...v0.3.4 +[v0.3.3]: https://github.com/rust-embedded/embedded-alloc/compare/v0.3.2...v0.3.3 +[v0.3.2]: https://github.com/rust-embedded/embedded-alloc/compare/v0.3.1...v0.3.2 +[v0.3.1]: https://github.com/rust-embedded/embedded-alloc/compare/v0.3.0...v0.3.1 +[v0.3.0]: https://github.com/rust-embedded/embedded-alloc/compare/v0.2.2...v0.3.0 +[v0.2.2]: https://github.com/rust-embedded/embedded-alloc/compare/v0.2.1...v0.2.2 +[v0.2.1]: https://github.com/rust-embedded/embedded-alloc/compare/v0.2.0...v0.2.1 +[v0.2.0]: https://github.com/rust-embedded/embedded-alloc/compare/v0.1.0...v0.2.0 diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/CODE_OF_CONDUCT.md b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..7a47646 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/CODE_OF_CONDUCT.md @@ -0,0 +1,37 @@ +# The Rust Code of Conduct + +## Conduct + +**Contact**: [Libs team](https://github.com/rust-embedded/wg#the-libs-team) + +* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. +* On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. +* Please be kind and courteous. There's no need to be mean or rude. +* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. +* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. +* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. +* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Libs team][team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. +* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome. + +## Moderation + +These are the policies for upholding our community's standards of conduct. + +1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) +2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed. +3. Moderators will first respond to such remarks with a warning. +4. If the warning is unheeded, the user will be "kicked," i.e., kicked out of the communication channel to cool off. +5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded. +6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology. +7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed. +8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others. + +In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely. + +And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust. + +The enforcement policies listed above apply to all official embedded WG venues; including official IRC channels (#rust-embedded); GitHub repositories under rust-embedded; and all forums under rust-embedded.org (forum.rust-embedded.org). + +*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).* + +[team]: https://github.com/rust-embedded/wg#the-libs-team diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/Cargo.lock b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/Cargo.lock new file mode 100644 index 0000000..6cb9362 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/Cargo.lock @@ -0,0 +1,199 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "bare-metal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5deb64efa5bd81e31fcd1938615a6d98c82eafcbcd787162b6f63b91d6bac5b3" +dependencies = [ + "rustc_version", +] + +[[package]] +name = "bitfield" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719" + +[[package]] +name = "cortex-m" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ec610d8f49840a5b376c69663b6369e71f4b34484b9b2eb29fb918d92516cb9" +dependencies = [ + "bare-metal", + "bitfield", + "critical-section", + "embedded-hal", + "volatile-register", +] + +[[package]] +name = "cortex-m-rt" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee84e813d593101b1723e13ec38b6ab6abbdbaaa4546553f5395ed274079ddb1" +dependencies = [ + "cortex-m-rt-macros", +] + +[[package]] +name = "cortex-m-rt-macros" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f6f3e36f203cfedbc78b357fb28730aa2c6dc1ab060ee5c2405e843988d3c7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "cortex-m-semihosting" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c23234600452033cc77e4b761e740e02d2c4168e11dbf36ab14a0f58973592b0" +dependencies = [ + "cortex-m", +] + +[[package]] +name = "critical-section" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216" + +[[package]] +name = "embedded-alloc" +version = "0.5.1" +dependencies = [ + "cortex-m", + "cortex-m-rt", + "cortex-m-semihosting", + "critical-section", + "linked_list_allocator", + "panic-semihosting", +] + +[[package]] +name = "embedded-hal" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35949884794ad573cf46071e41c9b60efb0cb311e3ca01f7af807af1debc66ff" +dependencies = [ + "nb 0.1.3", + "void", +] + +[[package]] +name = "linked_list_allocator" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" + +[[package]] +name = "nb" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f" +dependencies = [ + "nb 1.1.0", +] + +[[package]] +name = "nb" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d5439c4ad607c3c23abf66de8c8bf57ba8adcd1f129e699851a6e43935d339d" + +[[package]] +name = "panic-semihosting" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8a3e1233d9073d76a870223512ce4eeea43c067a94a445c13bd6d792d7b1ab" +dependencies = [ + "cortex-m", + "cortex-m-semihosting", +] + +[[package]] +name = "proc-macro2" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "vcell" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77439c1b53d2303b20d9459b1ade71a83c716e3f9c34f3228c00e6f185d6c002" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "volatile-register" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de437e2a6208b014ab52972a27e59b33fa2920d3e00fe05026167a1c509d19cc" +dependencies = [ + "vcell", +] diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/Cargo.toml b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/Cargo.toml new file mode 100644 index 0000000..b914bcb --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/Cargo.toml @@ -0,0 +1,57 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "embedded-alloc" +version = "0.5.1" +authors = [ + "The Cortex-M Team ", + "Jonathan Pallant ", + "Jorge Aparicio ", + "Sébastien Béchet ", +] +description = "A heap allocator for embedded systems" +documentation = "https://docs.rs/embedded-alloc" +readme = "README.md" +keywords = [ + "allocator", + "embedded", + "arm", + "riscv", + "cortex-m", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-embedded/embedded-alloc" + +[dependencies.critical-section] +version = "1.0" + +[dependencies.linked_list_allocator] +version = "0.10.5" +default-features = false + +[dev-dependencies.cortex-m] +version = "0.7.6" +features = ["critical-section-single-core"] + +[dev-dependencies.cortex-m-rt] +version = "0.7" + +[dev-dependencies.cortex-m-semihosting] +version = "0.5" + +[dev-dependencies.panic-semihosting] +version = "0.6" +features = ["exit"] + +[features] +allocator_api = [] diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/LICENSE-APACHE b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/LICENSE-APACHE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/LICENSE-MIT b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/LICENSE-MIT new file mode 100644 index 0000000..57535f3 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016-2018 Jorge Aparicio + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/README.md b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/README.md new file mode 100644 index 0000000..5fd93d7 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/README.md @@ -0,0 +1,78 @@ +[![crates.io](https://img.shields.io/crates/d/embedded-alloc.svg)](https://crates.io/crates/embedded-alloc) +[![crates.io](https://img.shields.io/crates/v/embedded-alloc.svg)](https://crates.io/crates/embedded-alloc) +![Minimum Supported Rust Version](https://img.shields.io/badge/rustc-1.68+-blue.svg) - + [Documentation](https://docs.rs/embedded-alloc) - [Change log](https://github.com/rust-embedded/embedded-alloc/blob/master/CHANGELOG.md) + +# `embedded-alloc` + +> A heap allocator for embedded systems. + +Note that using this as your global allocator requires Rust 1.68 or later. +(With earlier versions, you need the unstable feature `#![feature(default_alloc_error_handler)]`) + +This project is developed and maintained by the [libs team][team]. + +## Example + +Starting with Rust 1.68, this crate can be used as a global allocator on stable Rust: + +```rust +#![no_std] +#![no_main] + +extern crate alloc; + +use cortex_m_rt::entry; +use embedded_alloc::Heap; + +#[global_allocator] +static HEAP: Heap = Heap::empty(); + +#[entry] +fn main() -> ! { + // Initialize the allocator BEFORE you use it + { + use core::mem::MaybeUninit; + const HEAP_SIZE: usize = 1024; + static mut HEAP_MEM: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + unsafe { HEAP.init(HEAP_MEM.as_ptr() as usize, HEAP_SIZE) } + } + + // now the allocator is ready types like Box, Vec can be used. + + loop { /* .. */ } +} +``` + +For a full usage example, see [`examples/global_alloc.rs`](https://github.com/rust-embedded/embedded-alloc/blob/master/examples/global_alloc.rs). + +For this to work, an implementation of [`critical-section`](https://github.com/rust-embedded/critical-section) must be provided. + +For simple use cases you may enable the `critical-section-single-core` feature in the [cortex-m](https://github.com/rust-embedded/cortex-m) crate. +Please refer to the documentation of [`critical-section`](https://docs.rs/critical-section) for further guidance. + + +## License + +Licensed under either of + +- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or + ) +- MIT license ([LICENSE-MIT](LICENSE-MIT) or ) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. + +## Code of Conduct + +Contribution to this crate is organized under the terms of the [Rust Code of +Conduct][CoC], the maintainer of this crate, the [libs team][team], promises +to intervene to uphold that code of conduct. + +[CoC]: CODE_OF_CONDUCT.md +[team]: https://github.com/rust-embedded/wg#the-libs-team diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/allocator_api.rs b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/allocator_api.rs new file mode 100644 index 0000000..95ab702 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/allocator_api.rs @@ -0,0 +1,35 @@ +#![feature(allocator_api)] +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::vec::Vec; +use core::mem::MaybeUninit; +use core::panic::PanicInfo; +use cortex_m_rt::entry; +use embedded_alloc::Heap; + +// This is not used, but as of 2023-10-29 allocator_api cannot be used without +// a global heap +#[global_allocator] +static HEAP: Heap = Heap::empty(); + +#[entry] +fn main() -> ! { + const HEAP_SIZE: usize = 16; + static mut HEAP_MEM: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + let heap: Heap = Heap::empty(); + unsafe { heap.init(HEAP_MEM.as_ptr() as usize, HEAP_SIZE) } + + let mut xs = Vec::new_in(heap); + xs.push(1); + + #[allow(clippy::empty_loop)] + loop { /* .. */ } +} + +#[panic_handler] +fn panic(_: &PanicInfo) -> ! { + loop {} +} diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/global_alloc.rs b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/global_alloc.rs new file mode 100644 index 0000000..812c9f1 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/global_alloc.rs @@ -0,0 +1,34 @@ +#![no_std] +#![no_main] + +extern crate alloc; + +use alloc::vec::Vec; +use core::panic::PanicInfo; +use cortex_m_rt::entry; +use embedded_alloc::Heap; + +#[global_allocator] +static HEAP: Heap = Heap::empty(); + +#[entry] +fn main() -> ! { + // Initialize the allocator BEFORE you use it + { + use core::mem::MaybeUninit; + const HEAP_SIZE: usize = 1024; + static mut HEAP_MEM: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + unsafe { HEAP.init(HEAP_MEM.as_ptr() as usize, HEAP_SIZE) } + } + + let mut xs = Vec::new(); + xs.push(1); + + #[allow(clippy::empty_loop)] + loop { /* .. */ } +} + +#[panic_handler] +fn panic(_: &PanicInfo) -> ! { + loop {} +} diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/integration_test.rs b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/integration_test.rs new file mode 100644 index 0000000..a45ba87 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/examples/integration_test.rs @@ -0,0 +1,88 @@ +//! This is a very basic smoke test that runs in QEMU +//! Reference the QEMU section of the [Embedded Rust Book] for more information +//! +//! This only tests integration of the allocator on an embedded target. +//! Comprehensive allocator tests are located in the allocator dependency. +//! +//! After toolchain installation this test can be run with: +//! +//! ```bash +//! cargo +nightly run --target thumbv7m-none-eabi --example integration_test --all-features +//! ``` +//! +//! [Embedded Rust Book]: https://docs.rust-embedded.org/book/intro/index.html + +#![feature(allocator_api)] +#![no_main] +#![no_std] + +extern crate alloc; +extern crate panic_semihosting; + +use alloc::vec::Vec; +use core::mem::{size_of, MaybeUninit}; +use cortex_m_rt::entry; +use cortex_m_semihosting::{debug, hprintln}; +use embedded_alloc::Heap; + +#[global_allocator] +static HEAP: Heap = Heap::empty(); + +fn test_global_heap() { + assert_eq!(HEAP.used(), 0); + + let mut xs: Vec = alloc::vec![1]; + xs.push(2); + xs.extend(&[3, 4]); + + // do not optimize xs + core::hint::black_box(&mut xs); + + assert_eq!(xs.as_slice(), &[1, 2, 3, 4]); + assert_eq!(HEAP.used(), size_of::() * xs.len()); +} + +fn test_allocator_api() { + // small local heap + const HEAP_SIZE: usize = 16; + let heap_mem: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + let local_heap: Heap = Heap::empty(); + unsafe { local_heap.init(heap_mem.as_ptr() as usize, HEAP_SIZE) } + + assert_eq!(local_heap.used(), 0); + + let mut v: Vec = Vec::new_in(local_heap); + v.push(0xCAFE); + v.extend(&[0xDEAD, 0xFEED]); + + // do not optimize v + core::hint::black_box(&mut v); + + assert_eq!(v.as_slice(), &[0xCAFE, 0xDEAD, 0xFEED]); +} + +#[entry] +fn main() -> ! { + { + const HEAP_SIZE: usize = 1024; + static mut HEAP_MEM: [MaybeUninit; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE]; + unsafe { HEAP.init(HEAP_MEM.as_ptr() as usize, HEAP_SIZE) } + } + + #[allow(clippy::type_complexity)] + let tests: &[(fn() -> (), &'static str)] = &[ + (test_global_heap, "test_global_heap"), + (test_allocator_api, "test_allocator_api"), + ]; + + for (test_fn, test_name) in tests { + hprintln!("{}: start", test_name); + test_fn(); + hprintln!("{}: pass", test_name); + } + + // exit QEMU with a success status + debug::exit(debug::EXIT_SUCCESS); + #[allow(clippy::empty_loop)] + loop {} +} diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/memory.x b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/memory.x new file mode 100644 index 0000000..367c5c8 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/memory.x @@ -0,0 +1,6 @@ +MEMORY +{ + /* These values correspond to the LM3S6965, one of the few devices QEMU can emulate */ + FLASH : ORIGIN = 0x00000000, LENGTH = 256K + RAM : ORIGIN = 0x20000000, LENGTH = 64K +} diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/src/lib.rs b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/src/lib.rs new file mode 100644 index 0000000..cc9fe1e --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/src/lib.rs @@ -0,0 +1,114 @@ +#![doc = include_str!("../README.md")] +#![no_std] +#![cfg_attr(feature = "allocator_api", feature(allocator_api, alloc_layout_extra))] + +use core::alloc::{GlobalAlloc, Layout}; +use core::cell::RefCell; +use core::ptr::{self, NonNull}; + +use critical_section::Mutex; +use linked_list_allocator::Heap as LLHeap; + +pub struct Heap { + heap: Mutex>, +} + +impl Heap { + /// Crate a new UNINITIALIZED heap allocator + /// + /// You must initialize this heap using the + /// [`init`](Self::init) method before using the allocator. + pub const fn empty() -> Heap { + Heap { + heap: Mutex::new(RefCell::new(LLHeap::empty())), + } + } + + /// Initializes the heap + /// + /// This function must be called BEFORE you run any code that makes use of the + /// allocator. + /// + /// `start_addr` is the address where the heap will be located. + /// + /// `size` is the size of the heap in bytes. + /// + /// Note that: + /// + /// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will + /// be the smallest address used. + /// + /// - The largest address used is `start_addr + size - 1`, so if `start_addr` is + /// `0x1000` and `size` is `0x30000` then the allocator won't use memory at + /// addresses `0x31000` and larger. + /// + /// # Safety + /// + /// Obey these or Bad Stuff will happen. + /// + /// - This function must be called exactly ONCE. + /// - `size > 0` + pub unsafe fn init(&self, start_addr: usize, size: usize) { + critical_section::with(|cs| { + self.heap + .borrow(cs) + .borrow_mut() + .init(start_addr as *mut u8, size); + }); + } + + /// Returns an estimate of the amount of bytes in use. + pub fn used(&self) -> usize { + critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().used()) + } + + /// Returns an estimate of the amount of bytes available. + pub fn free(&self) -> usize { + critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().free()) + } + + fn alloc_first_fit(&self, layout: Layout) -> Result, ()> { + critical_section::with(|cs| self.heap.borrow(cs).borrow_mut().allocate_first_fit(layout)) + } +} + +unsafe impl GlobalAlloc for Heap { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.alloc_first_fit(layout) + .ok() + .map_or(ptr::null_mut(), |allocation| allocation.as_ptr()) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + critical_section::with(|cs| { + self.heap + .borrow(cs) + .borrow_mut() + .deallocate(NonNull::new_unchecked(ptr), layout) + }); + } +} + +#[cfg(feature = "allocator_api")] +mod allocator_api { + use core::alloc::{AllocError, Allocator, GlobalAlloc, Layout}; + use core::ptr::NonNull; + + unsafe impl Allocator for crate::Heap { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), + size => self + .alloc_first_fit(layout) + .map(|allocation| NonNull::slice_from_raw_parts(allocation, size)) + .map_err(|_| AllocError), + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + self.dealloc(ptr.as_ptr(), layout); + } + } + } +} diff --git a/example-code/qemu-aarch64v8a/vendor/embedded-alloc/triagebot.toml b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/triagebot.toml new file mode 100644 index 0000000..fa0824a --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/embedded-alloc/triagebot.toml @@ -0,0 +1 @@ +[assign] diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/.cargo-checksum.json b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/.cargo-checksum.json new file mode 100644 index 0000000..377cc11 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"55feafda6839a4ac9074b8f01c4db915defe1b758247d87e78ea7b9e13bcc6b1","Changelog.md":"275a38e7aca5972dd336dd90197bb6b8d21b6a3ee7daa41162e9783ddb692022","LICENSE-APACHE":"4d10fe5f3aa176b05b229a248866bad70b834c173f1252a814ff4748d8a13837","LICENSE-MIT":"9b34cf73fe71998b241ae7084adce6a84f3a65e01e1b9c52696b5e34b763b108","README.md":"696315eb554713d6f938ec9451d1a9fa0ddcb11cee89ccf8c82cfdc602066210","rust-toolchain":"58bea07cb6d97f9cfcd5c8f98b1feca0fb81cce5b0bf29a8e70ed2641956e9a6","src/hole.rs":"821f5d5f60641e944ddbd43c0ddeacece7049bdb81db8a3b70dabba5e1a39f11","src/lib.rs":"a1f28cd938ea6980c91a92a7163d5424790560154795169a363f6544888da84a","src/test.rs":"54eea4eacc7ff55c9d0aa2aeb657f147bdd21fc130a479ebba211c91b9a42ab1"},"package":"9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286"} \ No newline at end of file diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/Cargo.toml b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/Cargo.toml new file mode 100644 index 0000000..a575dfc --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/Cargo.toml @@ -0,0 +1,53 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +rust-version = "1.61" +name = "linked_list_allocator" +version = "0.10.5" +authors = ["Philipp Oppermann "] +description = "Simple allocator usable for no_std systems. It builds a linked list from the freed blocks and thus needs no additional data structures." +homepage = "http://os.phil-opp.com/kernel-heap.html#a-better-allocator" +documentation = "https://docs.rs/crate/linked_list_allocator" +readme = "README.md" +keywords = [ + "allocator", + "no_std", + "malloc", + "heap", + "kernel", +] +license = "Apache-2.0/MIT" +repository = "https://github.com/phil-opp/linked-list-allocator" + +[package.metadata.release] +dev-version = false +pre-release-commit-message = "Release version {{version}}" + +[[package.metadata.release.pre-release-replacements]] +exactly = 1 +file = "Changelog.md" +replace = """ +# Unreleased + +# {{version}} – {{date}}""" +search = "# Unreleased" + +[dependencies.spinning_top] +version = "0.2.5" +optional = true + +[features] +alloc_ref = [] +const_mut_refs = [] +default = ["use_spin"] +use_spin = ["spinning_top"] +use_spin_nightly = ["use_spin"] diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/Changelog.md b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/Changelog.md new file mode 100644 index 0000000..111d012 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/Changelog.md @@ -0,0 +1,117 @@ +# Unreleased + +# 0.10.5 – 2023-03-04 + +- Remove features `const_mut_refs` and `use_spin_nightly`. + + Since rust 1.61, the [required const features](https://github.com/rust-lang/rust/pull/93827) are available in stable rust, and `lock_api` >= 0.4.7 automatically uses them. + To avoid a breaking change, the features are still listed in Cargo.toml, but have no effect and are marked as deprecated. + This bumps the minimum supported rust version to 1.61. + +# 0.10.4 – 2022-10-10 + +- Fix [memory leak of small back paddings](https://github.com/rust-osdev/linked-list-allocator/issues/66) by considering regions that would result in such small back paddings as unsuitable ([#71](https://github.com/rust-osdev/linked-list-allocator/pull/71)) + +# 0.10.3 – 2022-09-06 + +- Fix build on stable by adding missing field in `HoleList` initializer ([#68](https://github.com/rust-osdev/linked-list-allocator/pull/68)) + - Fixes a bug introduced in `v0.10.2`. + +# 0.10.2 – 2022-09-06 + +Fix for potential out-of-bound writes that were possible on `Heap` initialization and `Heap::extend`. See the [security advisory](https://github.com/rust-osdev/linked-list-allocator/security/advisories/GHSA-xg8p-34w2-j49j) for details. The issues were fixed in the following way: + +- The initialization functions now panic if the given size is not large enough to store the necessary metadata. Depending on the alignment of the heap bottom pointer, the minimum size is between `2 * size_of::` and `3 * size_of::`. +- The `extend` method now panics when trying to extend an unitialized heap. +- Extend calls with a size smaller than `size_of::() * 2` are now buffered internally and not added to the list directly. The buffered region will be merged with future `extend` calls. +- The `size()` method now returns the _usable_ size of the heap, which might be slightly smaller than the `top() - bottom()` difference because of alignment constraints. + +# 0.10.1 – 2022-07-07 + +- Fixed logic for freeing nodes ([#64]) + +[#64]: https://github.com/rust-osdev/linked-list-allocator/pull/64 + +# 0.10.0 – 2022-06-27 + +- Changed constructor to take `*mut u8` instead of `usize` ([#62]) + - NOTE: Breaking API change +- Reworked internals to pass Miri tests ([#62]) + +[#62]: https://github.com/phil-opp/linked-list-allocator/pull/62 + +# 0.9.1 – 2021-10-17 + +- Add safe constructor and initialization for `Heap` ([#55](https://github.com/phil-opp/linked-list-allocator/pull/55)) +- Merge front/back padding after allocate current hole ([#54](https://github.com/phil-opp/linked-list-allocator/pull/54)) + +# 0.9.0 – 2021-05-01 + +- Update `spinning_top` dependency to `v0.2.3` ([#50](https://github.com/phil-opp/linked-list-allocator/pull/50)) + +# 0.8.11 – 2021-01-02 + +- Add new `use_spin_nightly` feature, which, together with `const_mut_refs`, makes the `empty` method of `LockedHeap` const ([#49](https://github.com/phil-opp/linked-list-allocator/pull/49)) + +# 0.8.10 – 2020-12-28 + +- Made hole module public for external uses ([#47](https://github.com/phil-opp/linked-list-allocator/pull/47)) + +# 0.8.9 – 2020-12-27 + +- Don't require nightly for `use_spin` feature ([#46](https://github.com/phil-opp/linked-list-allocator/pull/46)) + +# 0.8.8 – 2020-12-16 + +- Do not require alloc crate ([#44](https://github.com/phil-opp/linked-list-allocator/pull/44)) + +# 0.8.7 – 2020-12-10 + +- _Unstable Breakage:_ fix(alloc_ref): Use new nightly Allocator trait [#42](https://github.com/phil-opp/linked-list-allocator/pull/42) +- Build on stable without features [#43](https://github.com/phil-opp/linked-list-allocator/pull/43) + - Adds a new `const_mut_refs` crate feature (enabled by default). + - By disabling this feature, it's possible to build the crate on stable Rust. + +# 0.8.6 – 2020-09-24 + +- Fix build error on latest nightly ([#35](https://github.com/phil-opp/linked-list-allocator/pull/35)) + +# 0.8.5 – 2020-08-13 + +- Update AllocRef implementation for latest API changes ([#33](https://github.com/phil-opp/linked-list-allocator/pull/33)) + +# 0.8.4 + +- Add function to get used and free heap size ([#32](https://github.com/phil-opp/linked-list-allocator/pull/32)) + +# 0.8.3 + +- Prevent writing to heap memory range when size too small ([#31](https://github.com/phil-opp/linked-list-allocator/pull/31)) + +# 0.8.2 + +- Update AllocRef implementation for latest API changes ([#30](https://github.com/phil-opp/linked-list-allocator/pull/30)) + +# 0.8.1 + +- AllocRef::alloc is now safe and allows zero-sized allocations ([#28](https://github.com/phil-opp/linked-list-allocator/pull/28)) + - This is technically a **breaking change** for the unstable `alloc_ref` feature of this crate because it now requires a newer nightly version of Rust. + +# 0.8.0 + +- **Breaking**: Make AllocRef implementation optional behind new `alloc_ref` feature + - To enable the `AllocRef` implementation again, enable the `alloc_ref` feature of this crate in your Cargo.toml +- Fix build on nightly 1.43.0 (05-03-2020) ([#25](https://github.com/phil-opp/linked-list-allocator/pull/25)) + +# 0.7.0 + +- Use new spinning_top crate instead of `spin` ([#23](https://github.com/phil-opp/linked-list-allocator/pull/23)) + +# 0.6.6 + +- The `Alloc` trait was renamed to `AllocRef` ([#20](https://github.com/phil-opp/linked-list-allocator/pull/20)) + +# 0.6.5 + +- Align up the Hole initialization address ([#18](https://github.com/phil-opp/linked-list-allocator/pull/18)) +- Remove `alloc` feature gate, which is now stable diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/LICENSE-APACHE b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/LICENSE-APACHE new file mode 100644 index 0000000..cd482d8 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/LICENSE-APACHE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "[]" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/LICENSE-MIT b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/LICENSE-MIT new file mode 100644 index 0000000..2e0e7b1 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Philipp Oppermann + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/README.md b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/README.md new file mode 100644 index 0000000..56ba834 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/README.md @@ -0,0 +1,41 @@ +# linked-list-allocator + +[![Crates.io](https://img.shields.io/crates/v/linked-list-allocator)](https://crates.io/crates/linked-list-allocator) +[![Build Status](https://github.com/rust-osdev/linked-list-allocator/workflows/Build/badge.svg)](https://github.com/rust-osdev/linked-list-allocator/actions?query=workflow%3ABuild) +[![docs.rs](https://img.shields.io/badge/docs.rs-documentation-green.svg)](https://docs.rs/linked-list-allocator) + +## Usage + +Create a static allocator in your root module: + +```rust +use linked_list_allocator::LockedHeap; + +#[global_allocator] +static ALLOCATOR: LockedHeap = LockedHeap::empty(); +``` + +Before using this allocator, you need to init it: + +```rust +pub fn init_heap() { + let heap_start = …; + let heap_end = …; + let heap_size = heap_end - heap_start; + unsafe { + ALLOCATOR.lock().init(heap_start, heap_size); + } +} +``` + +## Features + +- **`use_spin`** (default): Provide a `LockedHeap` type that implements the [`GlobalAlloc`] trait by using a spinlock. +- **`alloc_ref`**: Provide an implementation of the unstable [`AllocRef`] trait; requires nightly Rust. + - Warning: The `AllocRef` trait is still regularly changed on the Rust side, so expect some regular breakage when using this feature. + +[`GlobalAlloc`]: https://doc.rust-lang.org/nightly/core/alloc/trait.GlobalAlloc.html +[`AllocRef`]: https://doc.rust-lang.org/nightly/core/alloc/trait.AllocRef.html + +## License +This crate is dual-licensed under MIT or the Apache License (Version 2.0). See LICENSE-APACHE and LICENSE-MIT for details. diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/rust-toolchain b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/rust-toolchain new file mode 100644 index 0000000..bf867e0 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/rust-toolchain @@ -0,0 +1 @@ +nightly diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/hole.rs b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/hole.rs new file mode 100644 index 0000000..f7481bf --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/hole.rs @@ -0,0 +1,783 @@ +use core::alloc::Layout; +use core::mem; +use core::mem::{align_of, size_of}; +use core::ptr::null_mut; +use core::ptr::NonNull; + +use crate::{align_down_size, align_up_size}; + +use super::align_up; + +/// A sorted list of holes. It uses the the holes itself to store its nodes. +pub struct HoleList { + pub(crate) first: Hole, // dummy + pub(crate) bottom: *mut u8, + pub(crate) top: *mut u8, + pub(crate) pending_extend: u8, +} + +pub(crate) struct Cursor { + prev: NonNull, + hole: NonNull, + top: *mut u8, +} + +/// A block containing free memory. It points to the next hole and thus forms a linked list. +pub(crate) struct Hole { + pub size: usize, + pub next: Option>, +} + +/// Basic information about a hole. +#[derive(Debug, Clone, Copy)] +struct HoleInfo { + addr: *mut u8, + size: usize, +} + +impl Cursor { + fn next(mut self) -> Option { + unsafe { + self.hole.as_mut().next.map(|nhole| Cursor { + prev: self.hole, + hole: nhole, + top: self.top, + }) + } + } + + fn current(&self) -> &Hole { + unsafe { self.hole.as_ref() } + } + + fn previous(&self) -> &Hole { + unsafe { self.prev.as_ref() } + } + + // On success, it returns the new allocation, and the linked list has been updated + // to accomodate any new holes and allocation. On error, it returns the cursor + // unmodified, and has made no changes to the linked list of holes. + fn split_current(self, required_layout: Layout) -> Result<(*mut u8, usize), Self> { + let front_padding; + let alloc_ptr; + let alloc_size; + let back_padding; + + // Here we create a scope, JUST to make sure that any created references do not + // live to the point where we start doing pointer surgery below. + { + let hole_size = self.current().size; + let hole_addr_u8 = self.hole.as_ptr().cast::(); + let required_size = required_layout.size(); + let required_align = required_layout.align(); + + // Quick check: If the new item is larger than the current hole, it's never gunna + // work. Go ahead and bail early to save ourselves some math. + if hole_size < required_size { + return Err(self); + } + + // Attempt to fracture the current hole into the following parts: + // ([front_padding], allocation, [back_padding]) + // + // The paddings are optional, and only placed if required. + // + // First, figure out if front padding is necessary. This would be necessary if the new + // allocation has a larger alignment requirement than the current hole, and we didn't get + // lucky that the current position was well-aligned enough for the new item. + let aligned_addr = if hole_addr_u8 == align_up(hole_addr_u8, required_align) { + // hole has already the required alignment, no front padding is needed. + front_padding = None; + hole_addr_u8 + } else { + // Unfortunately, we did not get lucky. Instead: Push the "starting location" FORWARD the size + // of a hole node, to guarantee there is at least enough room for the hole header, and + // potentially additional space. + let new_start = hole_addr_u8.wrapping_add(HoleList::min_size()); + + let aligned_addr = align_up(new_start, required_align); + front_padding = Some(HoleInfo { + // Our new front padding will exist at the same location as the previous hole, + // it will just have a smaller size after we have chopped off the "tail" for + // the allocation. + addr: hole_addr_u8, + size: (aligned_addr as usize) - (hole_addr_u8 as usize), + }); + aligned_addr + }; + + // Okay, now that we found space, we need to see if the decisions we just made + // ACTUALLY fit in the previous hole space + let allocation_end = aligned_addr.wrapping_add(required_size); + let hole_end = hole_addr_u8.wrapping_add(hole_size); + + if allocation_end > hole_end { + // hole is too small + return Err(self); + } + + // Yes! We have successfully placed our allocation as well. + alloc_ptr = aligned_addr; + alloc_size = required_size; + + // Okay, time to move onto the back padding. + let back_padding_size = hole_end as usize - allocation_end as usize; + back_padding = if back_padding_size == 0 { + None + } else { + // NOTE: Because we always use `HoleList::align_layout`, the size of + // the new allocation is always "rounded up" to cover any partial gaps that + // would have occurred. For this reason, we DON'T need to "round up" + // to account for an unaligned hole spot. + let hole_layout = Layout::new::(); + let back_padding_start = align_up(allocation_end, hole_layout.align()); + let back_padding_end = back_padding_start.wrapping_add(hole_layout.size()); + + // Will the proposed new back padding actually fit in the old hole slot? + if back_padding_end <= hole_end { + // Yes, it does! Place a back padding node + Some(HoleInfo { + addr: back_padding_start, + size: back_padding_size, + }) + } else { + // No, it does not. We don't want to leak any heap bytes, so we + // consider this hole unsuitable for the requested allocation. + return Err(self); + } + }; + } + + //////////////////////////////////////////////////////////////////////////// + // This is where we actually perform surgery on the linked list. + //////////////////////////////////////////////////////////////////////////// + let Cursor { + mut prev, mut hole, .. + } = self; + // Remove the current location from the previous node + unsafe { + prev.as_mut().next = None; + } + // Take the next node out of our current node + let maybe_next_addr: Option> = unsafe { hole.as_mut().next.take() }; + + // As of now, the old `Hole` is no more. We are about to replace it with one or more of + // the front padding, the allocation, and the back padding. + + match (front_padding, back_padding) { + (None, None) => { + // No padding at all, how lucky! We still need to connect the PREVIOUS node + // to the NEXT node, if there was one + unsafe { + prev.as_mut().next = maybe_next_addr; + } + } + (None, Some(singlepad)) | (Some(singlepad), None) => unsafe { + // We have front padding OR back padding, but not both. + // + // Replace the old node with the new single node. We need to stitch the new node + // into the linked list. Start by writing the padding into the proper location + let singlepad_ptr = singlepad.addr.cast::(); + singlepad_ptr.write(Hole { + size: singlepad.size, + // If the old hole had a next pointer, the single padding now takes + // "ownership" of that link + next: maybe_next_addr, + }); + + // Then connect the OLD previous to the NEW single padding + prev.as_mut().next = Some(NonNull::new_unchecked(singlepad_ptr)); + }, + (Some(frontpad), Some(backpad)) => unsafe { + // We have front padding AND back padding. + // + // We need to stich them together as two nodes where there used to + // only be one. Start with the back padding. + let backpad_ptr = backpad.addr.cast::(); + backpad_ptr.write(Hole { + size: backpad.size, + // If the old hole had a next pointer, the BACK padding now takes + // "ownership" of that link + next: maybe_next_addr, + }); + + // Now we emplace the front padding, and link it to both the back padding, + // and the old previous + let frontpad_ptr = frontpad.addr.cast::(); + frontpad_ptr.write(Hole { + size: frontpad.size, + // We now connect the FRONT padding to the BACK padding + next: Some(NonNull::new_unchecked(backpad_ptr)), + }); + + // Then connect the OLD previous to the NEW FRONT padding + prev.as_mut().next = Some(NonNull::new_unchecked(frontpad_ptr)); + }, + } + + // Well that went swimmingly! Hand off the allocation, with surgery performed successfully! + Ok((alloc_ptr, alloc_size)) + } +} + +// See if we can extend this hole towards the end of the allocation region +// If so: increase the size of the node. If no: keep the node as-is +fn check_merge_top(mut node: NonNull, top: *mut u8) { + let node_u8 = node.as_ptr().cast::(); + let node_sz = unsafe { node.as_ref().size }; + + // If this is the last node, we need to see if we need to merge to the end + let end = node_u8.wrapping_add(node_sz); + let hole_layout = Layout::new::(); + if end < top { + let next_hole_end = align_up(end, hole_layout.align()).wrapping_add(hole_layout.size()); + + if next_hole_end > top { + let offset = (top as usize) - (end as usize); + unsafe { + node.as_mut().size += offset; + } + } + } +} + +// See if we can scoot this hole back to the bottom of the allocation region +// If so: create and return the new hole. If not: return the existing hole +fn check_merge_bottom(node: NonNull, bottom: *mut u8) -> NonNull { + debug_assert_eq!(bottom as usize % align_of::(), 0); + + if bottom.wrapping_add(core::mem::size_of::()) > node.as_ptr().cast::() { + let offset = (node.as_ptr() as usize) - (bottom as usize); + let size = unsafe { node.as_ref() }.size + offset; + unsafe { make_hole(bottom, size) } + } else { + node + } +} + +impl HoleList { + /// Creates an empty `HoleList`. + pub const fn empty() -> HoleList { + HoleList { + first: Hole { + size: 0, + next: None, + }, + bottom: null_mut(), + top: null_mut(), + pending_extend: 0, + } + } + + pub(crate) fn cursor(&mut self) -> Option { + if let Some(hole) = self.first.next { + Some(Cursor { + hole, + prev: NonNull::new(&mut self.first)?, + top: self.top, + }) + } else { + None + } + } + + #[cfg(any(test, fuzzing))] + #[allow(dead_code)] + pub(crate) fn debug(&mut self) { + if let Some(cursor) = self.cursor() { + let mut cursor = cursor; + loop { + println!( + "prev: {:?}[{}], hole: {:?}[{}]", + cursor.previous() as *const Hole, + cursor.previous().size, + cursor.current() as *const Hole, + cursor.current().size, + ); + if let Some(c) = cursor.next() { + cursor = c; + } else { + println!("Done!"); + return; + } + } + } else { + println!("No holes"); + } + } + + /// Creates a `HoleList` that contains the given hole. + /// + /// The `hole_addr` pointer is automatically aligned, so the `bottom` + /// field might be larger than the given `hole_addr`. + /// + /// The given `hole_size` must be large enough to store the required + /// metadata, otherwise this function will panic. Depending on the + /// alignment of the `hole_addr` pointer, the minimum size is between + /// `2 * size_of::` and `3 * size_of::`. + /// + /// The usable size for allocations will be truncated to the nearest + /// alignment of `align_of::`. Any extra bytes left at the end + /// will be reclaimed once sufficient additional space is given to + /// [`extend`][crate::Heap::extend]. + /// + /// # Safety + /// + /// This function is unsafe because it creates a hole at the given `hole_addr`. + /// This can cause undefined behavior if this address is invalid or if memory from the + /// `[hole_addr, hole_addr+size)` range is used somewhere else. + pub unsafe fn new(hole_addr: *mut u8, hole_size: usize) -> HoleList { + assert_eq!(size_of::(), Self::min_size()); + assert!(hole_size >= size_of::()); + + let aligned_hole_addr = align_up(hole_addr, align_of::()); + let requested_hole_size = hole_size - ((aligned_hole_addr as usize) - (hole_addr as usize)); + let aligned_hole_size = align_down_size(requested_hole_size, align_of::()); + assert!(aligned_hole_size >= size_of::()); + + let ptr = aligned_hole_addr as *mut Hole; + ptr.write(Hole { + size: aligned_hole_size, + next: None, + }); + + assert_eq!( + hole_addr.wrapping_add(hole_size), + aligned_hole_addr.wrapping_add(requested_hole_size) + ); + + HoleList { + first: Hole { + size: 0, + next: Some(NonNull::new_unchecked(ptr)), + }, + bottom: aligned_hole_addr, + top: aligned_hole_addr.wrapping_add(aligned_hole_size), + pending_extend: (requested_hole_size - aligned_hole_size) as u8, + } + } + + /// Aligns the given layout for use with `HoleList`. + /// + /// Returns a layout with size increased to fit at least `HoleList::min_size` and proper + /// alignment of a `Hole`. + /// + /// The [`allocate_first_fit`][HoleList::allocate_first_fit] and + /// [`deallocate`][HoleList::deallocate] methods perform the required alignment + /// themselves, so calling this function manually is not necessary. + pub fn align_layout(layout: Layout) -> Layout { + let mut size = layout.size(); + if size < Self::min_size() { + size = Self::min_size(); + } + let size = align_up_size(size, mem::align_of::()); + Layout::from_size_align(size, layout.align()).unwrap() + } + + /// Searches the list for a big enough hole. + /// + /// A hole is big enough if it can hold an allocation of `layout.size()` bytes with + /// the given `layout.align()`. If such a hole is found in the list, a block of the + /// required size is allocated from it. Then the start address of that + /// block and the aligned layout are returned. The automatic layout alignment is required + /// because the `HoleList` has some additional layout requirements for each memory block. + /// + /// This function uses the “first fit” strategy, so it uses the first hole that is big + /// enough. Thus the runtime is in O(n) but it should be reasonably fast for small allocations. + // + // NOTE: We could probably replace this with an `Option` instead of a `Result` in a later + // release to remove this clippy warning + #[allow(clippy::result_unit_err)] + pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<(NonNull, Layout), ()> { + let aligned_layout = Self::align_layout(layout); + let mut cursor = self.cursor().ok_or(())?; + + loop { + match cursor.split_current(aligned_layout) { + Ok((ptr, _len)) => { + return Ok((NonNull::new(ptr).ok_or(())?, aligned_layout)); + } + Err(curs) => { + cursor = curs.next().ok_or(())?; + } + } + } + } + + /// Frees the allocation given by `ptr` and `layout`. + /// + /// This function walks the list and inserts the given block at the correct place. If the freed + /// block is adjacent to another free block, the blocks are merged again. + /// This operation is in `O(n)` since the list needs to be sorted by address. + /// + /// [`allocate_first_fit`]: HoleList::allocate_first_fit + /// + /// # Safety + /// + /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with + /// identical layout. Undefined behavior may occur for invalid arguments. + /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and + /// returns the aligned layout. + pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Layout { + let aligned_layout = Self::align_layout(layout); + deallocate(self, ptr.as_ptr(), aligned_layout.size()); + aligned_layout + } + + /// Returns the minimal allocation size. Smaller allocations or deallocations are not allowed. + pub fn min_size() -> usize { + size_of::() * 2 + } + + /// Returns information about the first hole for test purposes. + #[cfg(test)] + pub fn first_hole(&self) -> Option<(*const u8, usize)> { + self.first.next.as_ref().map(|hole| { + (hole.as_ptr() as *mut u8 as *const u8, unsafe { + hole.as_ref().size + }) + }) + } + + pub(crate) unsafe fn extend(&mut self, by: usize) { + assert!(!self.top.is_null(), "tried to extend an empty heap"); + + let top = self.top; + + let dead_space = top.align_offset(align_of::()); + debug_assert_eq!( + 0, dead_space, + "dead space detected during extend: {} bytes. This means top was unaligned", + dead_space + ); + + debug_assert!( + (self.pending_extend as usize) < Self::min_size(), + "pending extend was larger than expected" + ); + + // join this extend request with any pending (but not yet acted on) extension + let extend_by = self.pending_extend as usize + by; + + let minimum_extend = Self::min_size(); + if extend_by < minimum_extend { + self.pending_extend = extend_by as u8; + return; + } + + // only extend up to another valid boundary + let new_hole_size = align_down_size(extend_by, align_of::()); + let layout = Layout::from_size_align(new_hole_size, 1).unwrap(); + + // instantiate the hole by forcing a deallocation on the new memory + self.deallocate(NonNull::new_unchecked(top as *mut u8), layout); + self.top = top.add(new_hole_size); + + // save extra bytes given to extend that weren't aligned to the hole size + self.pending_extend = (extend_by - new_hole_size) as u8; + } +} + +unsafe fn make_hole(addr: *mut u8, size: usize) -> NonNull { + let hole_addr = addr.cast::(); + debug_assert_eq!( + addr as usize % align_of::(), + 0, + "Hole address not aligned!", + ); + hole_addr.write(Hole { size, next: None }); + NonNull::new_unchecked(hole_addr) +} + +impl Cursor { + fn try_insert_back(self, node: NonNull, bottom: *mut u8) -> Result { + // Covers the case where the new hole exists BEFORE the current pointer, + // which only happens when previous is the stub pointer + if node < self.hole { + let node_u8 = node.as_ptr().cast::(); + let node_size = unsafe { node.as_ref().size }; + let hole_u8 = self.hole.as_ptr().cast::(); + + assert!( + node_u8.wrapping_add(node_size) <= hole_u8, + "Freed node aliases existing hole! Bad free?", + ); + debug_assert_eq!(self.previous().size, 0); + + let Cursor { + mut prev, + hole, + top, + } = self; + unsafe { + let mut node = check_merge_bottom(node, bottom); + prev.as_mut().next = Some(node); + node.as_mut().next = Some(hole); + } + Ok(Cursor { + prev, + hole: node, + top, + }) + } else { + Err(self) + } + } + + fn try_insert_after(&mut self, mut node: NonNull) -> Result<(), ()> { + let node_u8 = node.as_ptr().cast::(); + let node_size = unsafe { node.as_ref().size }; + + // If we have a next, does the node overlap next? + if let Some(next) = self.current().next.as_ref() { + if node < *next { + let node_u8 = node_u8 as *const u8; + assert!( + node_u8.wrapping_add(node_size) <= next.as_ptr().cast::(), + "Freed node aliases existing hole! Bad free?", + ); + } else { + // The new hole isn't between current and next. + return Err(()); + } + } + + // At this point, we either have no "next" pointer, or the hole is + // between current and "next". The following assert can only trigger + // if we've gotten our list out of order. + debug_assert!(self.hole < node, "Hole list out of order?"); + + let hole_u8 = self.hole.as_ptr().cast::(); + let hole_size = self.current().size; + + // Does hole overlap node? + assert!( + hole_u8.wrapping_add(hole_size) <= node_u8, + "Freed node ({:?}) aliases existing hole ({:?}[{}])! Bad free?", + node_u8, + hole_u8, + hole_size, + ); + + // All good! Let's insert that after. + unsafe { + let maybe_next = self.hole.as_mut().next.replace(node); + node.as_mut().next = maybe_next; + } + + Ok(()) + } + + // Merge the current node with up to n following nodes + fn try_merge_next_n(self, max: usize) { + let Cursor { + prev: _, + mut hole, + top, + .. + } = self; + + for _ in 0..max { + // Is there a next node? + let mut next = if let Some(next) = unsafe { hole.as_mut() }.next.as_ref() { + *next + } else { + // Since there is no NEXT node, we need to check whether the current + // hole SHOULD extend to the end, but doesn't. This would happen when + // there isn't enough remaining space to place a hole after the current + // node's placement. + check_merge_top(hole, top); + return; + }; + + // Can we directly merge these? e.g. are they touching? + // + // NOTE: Because we always use `HoleList::align_layout`, the size of + // the new hole is always "rounded up" to cover any partial gaps that + // would have occurred. For this reason, we DON'T need to "round up" + // to account for an unaligned hole spot. + let hole_u8 = hole.as_ptr().cast::(); + let hole_sz = unsafe { hole.as_ref().size }; + let next_u8 = next.as_ptr().cast::(); + let end = hole_u8.wrapping_add(hole_sz); + + let touching = end == next_u8; + + if touching { + let next_sz; + let next_next; + unsafe { + let next_mut = next.as_mut(); + next_sz = next_mut.size; + next_next = next_mut.next.take(); + } + unsafe { + let hole_mut = hole.as_mut(); + hole_mut.next = next_next; + hole_mut.size += next_sz; + } + // Okay, we just merged the next item. DON'T move the cursor, as we can + // just try to merge the next_next, which is now our next. + } else { + // Welp, not touching, can't merge. Move to the next node. + hole = next; + } + } + } +} + +/// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to +/// find the correct place (the list is sorted by address). +fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) { + // Start off by just making this allocation a hole where it stands. + // We'll attempt to merge it with other nodes once we figure out where + // it should live + let hole = unsafe { make_hole(addr, size) }; + + // Now, try to get a cursor to the list - this only works if we have at least + // one non-"dummy" hole in the list + let cursor = if let Some(cursor) = list.cursor() { + cursor + } else { + // Oh hey, there are no "real" holes at all. That means this just + // becomes the only "real" hole! Check if this is touching the end + // or the beginning of the allocation range + let hole = check_merge_bottom(hole, list.bottom); + check_merge_top(hole, list.top); + list.first.next = Some(hole); + return; + }; + + // First, check if we can just insert this node at the top of the list. If the + // insertion succeeded, then our cursor now points to the NEW node, behind the + // previous location the cursor was pointing to. + // + // Otherwise, our cursor will point at the current non-"dummy" head of the list + let (cursor, n) = match cursor.try_insert_back(hole, list.bottom) { + Ok(cursor) => { + // Yup! It lives at the front of the list. Hooray! Attempt to merge + // it with just ONE next node, since it is at the front of the list + (cursor, 1) + } + Err(mut cursor) => { + // Nope. It lives somewhere else. Advance the list until we find its home + while let Err(()) = cursor.try_insert_after(hole) { + cursor = cursor + .next() + .expect("Reached end of holes without finding deallocation hole!"); + } + // Great! We found a home for it, our cursor is now JUST BEFORE the new + // node we inserted, so we need to try to merge up to twice: One to combine + // the current node to the new node, then once more to combine the new node + // with the node after that. + (cursor, 2) + } + }; + + // We now need to merge up to two times to combine the current node with the next + // two nodes. + cursor.try_merge_next_n(n); +} + +#[cfg(test)] +pub mod test { + use super::HoleList; + use crate::{align_down_size, test::new_heap}; + use core::mem::size_of; + use std::{alloc::Layout, convert::TryInto, prelude::v1::*, ptr::NonNull}; + + #[test] + fn cursor() { + let mut heap = new_heap(); + let curs = heap.holes.cursor().unwrap(); + // This is the "dummy" node + assert_eq!(curs.previous().size, 0); + // This is the "full" heap + assert_eq!( + curs.current().size, + align_down_size(1000, size_of::()) + ); + // There is no other hole + assert!(curs.next().is_none()); + } + + #[test] + fn aff() { + let mut heap = new_heap(); + let reqd = Layout::from_size_align(256, 1).unwrap(); + let _ = heap.allocate_first_fit(reqd).unwrap(); + } + + /// Tests `HoleList::new` with the minimal allowed `hole_size`. + #[test] + fn hole_list_new_min_size() { + // define an array of `u64` instead of `u8` for alignment + static mut HEAP: [u64; 2] = [0; 2]; + let heap_start = unsafe { HEAP.as_ptr() as usize }; + let heap = + unsafe { HoleList::new(HEAP.as_mut_ptr().cast(), 2 * core::mem::size_of::()) }; + assert_eq!(heap.bottom as usize, heap_start); + assert_eq!(heap.top as usize, heap_start + 2 * size_of::()); + assert_eq!(heap.first.size, 0); // dummy + assert_eq!( + heap.first.next, + Some(NonNull::new(heap.bottom.cast())).unwrap() + ); + assert_eq!( + unsafe { heap.first.next.as_ref().unwrap().as_ref() }.size, + 2 * core::mem::size_of::() + ); + assert_eq!(unsafe { &*(heap.first.next.unwrap().as_ptr()) }.next, None); + } + + /// Tests that `HoleList::new` aligns the `hole_addr` correctly and adjusts the size + /// accordingly. + #[test] + fn hole_list_new_align() { + // define an array of `u64` instead of `u8` for alignment + static mut HEAP: [u64; 3] = [0; 3]; + + let heap_start: *mut u8 = unsafe { HEAP.as_mut_ptr().add(1) }.cast(); + // initialize the HoleList with a hole_addr one byte before `heap_start` + // -> the function should align it up to `heap_start` + let heap = + unsafe { HoleList::new(heap_start.sub(1), 2 * core::mem::size_of::() + 1) }; + assert_eq!(heap.bottom, heap_start); + assert_eq!(heap.top.cast(), unsafe { + // one byte less than the `hole_size` given to `new` because of alignment + heap_start.add(2 * core::mem::size_of::()) + }); + + assert_eq!(heap.first.size, 0); // dummy + assert_eq!( + heap.first.next, + Some(NonNull::new(heap.bottom.cast())).unwrap() + ); + assert_eq!( + unsafe { &*(heap.first.next.unwrap().as_ptr()) }.size, + unsafe { heap.top.offset_from(heap.bottom) } + .try_into() + .unwrap() + ); + assert_eq!(unsafe { &*(heap.first.next.unwrap().as_ptr()) }.next, None); + } + + #[test] + #[should_panic] + fn hole_list_new_too_small() { + // define an array of `u64` instead of `u8` for alignment + static mut HEAP: [u64; 3] = [0; 3]; + + let heap_start: *mut u8 = unsafe { HEAP.as_mut_ptr().add(1) }.cast(); + // initialize the HoleList with a hole_addr one byte before `heap_start` + // -> the function should align it up to `heap_start`, but then the + // available size is too small to store a hole -> it should panic + unsafe { HoleList::new(heap_start.sub(1), 2 * core::mem::size_of::()) }; + } + + #[test] + #[should_panic] + fn extend_empty() { + unsafe { HoleList::empty().extend(16) }; + } +} diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/lib.rs b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/lib.rs new file mode 100644 index 0000000..9e85882 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/lib.rs @@ -0,0 +1,370 @@ +#![cfg_attr( + feature = "alloc_ref", + feature(allocator_api, alloc_layout_extra, nonnull_slice_from_raw_parts) +)] +#![no_std] + +#[cfg(any(test, fuzzing))] +#[macro_use] +extern crate std; + +#[cfg(feature = "use_spin")] +extern crate spinning_top; + +#[cfg(feature = "use_spin")] +use core::alloc::GlobalAlloc; +use core::alloc::Layout; +#[cfg(feature = "alloc_ref")] +use core::alloc::{AllocError, Allocator}; +use core::mem::MaybeUninit; +#[cfg(feature = "use_spin")] +use core::ops::Deref; +use core::ptr::NonNull; +#[cfg(test)] +use hole::Hole; +use hole::HoleList; +#[cfg(feature = "use_spin")] +use spinning_top::Spinlock; + +pub mod hole; +#[cfg(test)] +mod test; + +/// A fixed size heap backed by a linked list of free memory blocks. +pub struct Heap { + used: usize, + holes: HoleList, +} + +#[cfg(fuzzing)] +impl Heap { + pub fn debug(&mut self) { + println!( + "bottom: {:?}, top: {:?}, size: {}, pending: {}", + self.bottom(), + self.top(), + self.size(), + self.holes.first.size, + ); + self.holes.debug(); + } +} + +unsafe impl Send for Heap {} + +impl Heap { + /// Creates an empty heap. All allocate calls will return `None`. + pub const fn empty() -> Heap { + Heap { + used: 0, + holes: HoleList::empty(), + } + } + + /// Initializes an empty heap + /// + /// The `heap_bottom` pointer is automatically aligned, so the [`bottom()`][Self::bottom] + /// method might return a pointer that is larger than `heap_bottom` after construction. + /// + /// The given `heap_size` must be large enough to store the required + /// metadata, otherwise this function will panic. Depending on the + /// alignment of the `hole_addr` pointer, the minimum size is between + /// `2 * size_of::` and `3 * size_of::`. + /// + /// The usable size for allocations will be truncated to the nearest + /// alignment of `align_of::`. Any extra bytes left at the end + /// will be reclaimed once sufficient additional space is given to + /// [`extend`][Heap::extend]. + /// + /// # Safety + /// + /// This function must be called at most once and must only be used on an + /// empty heap. + /// + /// The bottom address must be valid and the memory in the + /// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else. + /// This function is unsafe because it can cause undefined behavior if the given address + /// is invalid. + /// + /// The provided memory range must be valid for the `'static` lifetime. + pub unsafe fn init(&mut self, heap_bottom: *mut u8, heap_size: usize) { + self.used = 0; + self.holes = HoleList::new(heap_bottom, heap_size); + } + + /// Initialize an empty heap with provided memory. + /// + /// The caller is responsible for procuring a region of raw memory that may be utilized by the + /// allocator. This might be done via any method such as (unsafely) taking a region from the + /// program's memory, from a mutable static, or by allocating and leaking such memory from + /// another allocator. + /// + /// The latter approach may be especially useful if the underlying allocator does not perform + /// deallocation (e.g. a simple bump allocator). Then the overlaid linked-list-allocator can + /// provide memory reclamation. + /// + /// The usable size for allocations will be truncated to the nearest + /// alignment of `align_of::`. Any extra bytes left at the end + /// will be reclaimed once sufficient additional space is given to + /// [`extend`][Heap::extend]. + /// + /// # Panics + /// + /// This method panics if the heap is already initialized. + /// + /// It also panics when the length of the given `mem` slice is not large enough to + /// store the required metadata. Depending on the alignment of the slice, the minimum + /// size is between `2 * size_of::` and `3 * size_of::`. + pub fn init_from_slice(&mut self, mem: &'static mut [MaybeUninit]) { + assert!( + self.bottom().is_null(), + "The heap has already been initialized." + ); + let size = mem.len(); + let address = mem.as_mut_ptr().cast(); + // SAFETY: All initialization requires the bottom address to be valid, which implies it + // must not be 0. Initially the address is 0. The assertion above ensures that no + // initialization had been called before. + // The given address and size is valid according to the safety invariants of the mutable + // reference handed to us by the caller. + unsafe { self.init(address, size) } + } + + /// Creates a new heap with the given `bottom` and `size`. + /// + /// The `heap_bottom` pointer is automatically aligned, so the [`bottom()`][Self::bottom] + /// method might return a pointer that is larger than `heap_bottom` after construction. + /// + /// The given `heap_size` must be large enough to store the required + /// metadata, otherwise this function will panic. Depending on the + /// alignment of the `hole_addr` pointer, the minimum size is between + /// `2 * size_of::` and `3 * size_of::`. + /// + /// The usable size for allocations will be truncated to the nearest + /// alignment of `align_of::`. Any extra bytes left at the end + /// will be reclaimed once sufficient additional space is given to + /// [`extend`][Heap::extend]. + /// + /// # Safety + /// + /// The bottom address must be valid and the memory in the + /// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else. + /// This function is unsafe because it can cause undefined behavior if the given address + /// is invalid. + /// + /// The provided memory range must be valid for the `'static` lifetime. + pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> Heap { + Heap { + used: 0, + holes: HoleList::new(heap_bottom, heap_size), + } + } + + /// Creates a new heap from a slice of raw memory. + /// + /// This is a convenience function that has the same effect as calling + /// [`init_from_slice`] on an empty heap. All the requirements of `init_from_slice` + /// apply to this function as well. + pub fn from_slice(mem: &'static mut [MaybeUninit]) -> Heap { + let size = mem.len(); + let address = mem.as_mut_ptr().cast(); + // SAFETY: The given address and size is valid according to the safety invariants of the + // mutable reference handed to us by the caller. + unsafe { Self::new(address, size) } + } + + /// Allocates a chunk of the given size with the given alignment. Returns a pointer to the + /// beginning of that chunk if it was successful. Else it returns `None`. + /// This function scans the list of free memory blocks and uses the first block that is big + /// enough. The runtime is in O(n) where n is the number of free blocks, but it should be + /// reasonably fast for small allocations. + // + // NOTE: We could probably replace this with an `Option` instead of a `Result` in a later + // release to remove this clippy warning + #[allow(clippy::result_unit_err)] + pub fn allocate_first_fit(&mut self, layout: Layout) -> Result, ()> { + match self.holes.allocate_first_fit(layout) { + Ok((ptr, aligned_layout)) => { + self.used += aligned_layout.size(); + Ok(ptr) + } + Err(err) => Err(err), + } + } + + /// Frees the given allocation. `ptr` must be a pointer returned + /// by a call to the `allocate_first_fit` function with identical size and alignment. + /// + /// This function walks the list of free memory blocks and inserts the freed block at the + /// correct place. If the freed block is adjacent to another free block, the blocks are merged + /// again. This operation is in `O(n)` since the list needs to be sorted by address. + /// + /// # Safety + /// + /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with + /// identical layout. Undefined behavior may occur for invalid arguments. + pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) { + self.used -= self.holes.deallocate(ptr, layout).size(); + } + + /// Returns the bottom address of the heap. + /// + /// The bottom pointer is automatically aligned, so the returned pointer + /// might be larger than the bottom pointer used for initialization. + pub fn bottom(&self) -> *mut u8 { + self.holes.bottom + } + + /// Returns the size of the heap. + /// + /// This is the size the heap is using for allocations, not necessarily the + /// total amount of bytes given to the heap. To determine the exact memory + /// boundaries, use [`bottom`][Self::bottom] and [`top`][Self::top]. + pub fn size(&self) -> usize { + unsafe { self.holes.top.offset_from(self.holes.bottom) as usize } + } + + /// Return the top address of the heap. + /// + /// Note: The heap may choose to not use bytes at the end for allocations + /// until there is enough room for metadata, but it still retains ownership + /// over memory from [`bottom`][Self::bottom] to the address returned. + pub fn top(&self) -> *mut u8 { + unsafe { self.holes.top.add(self.holes.pending_extend as usize) } + } + + /// Returns the size of the used part of the heap + pub fn used(&self) -> usize { + self.used + } + + /// Returns the size of the free part of the heap + pub fn free(&self) -> usize { + self.size() - self.used + } + + /// Extends the size of the heap by creating a new hole at the end. + /// + /// Small extensions are not guaranteed to grow the usable size of + /// the heap. In order to grow the Heap most effectively, extend by + /// at least `2 * size_of::`, keeping the amount a multiple of + /// `size_of::`. + /// + /// Calling this method on an uninitialized Heap will panic. + /// + /// # Safety + /// + /// The amount of data given in `by` MUST exist directly after the original + /// range of data provided when constructing the [Heap]. The additional data + /// must have the same lifetime of the original range of data. + /// + /// Even if this operation doesn't increase the [usable size][`Self::size`] + /// by exactly `by` bytes, those bytes are still owned by the Heap for + /// later use. + pub unsafe fn extend(&mut self, by: usize) { + self.holes.extend(by); + } +} + +#[cfg(all(feature = "alloc_ref", feature = "use_spin"))] +unsafe impl Allocator for LockedHeap { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + if layout.size() == 0 { + return Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)); + } + match self.0.lock().allocate_first_fit(layout) { + Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), + Err(()) => Err(AllocError), + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + self.0.lock().deallocate(ptr, layout); + } + } +} + +#[cfg(feature = "use_spin")] +pub struct LockedHeap(Spinlock); + +#[cfg(feature = "use_spin")] +impl LockedHeap { + pub const fn empty() -> LockedHeap { + LockedHeap(Spinlock::new(Heap::empty())) + } + + /// Creates a new heap with the given `bottom` and `size`. + /// + /// The `heap_bottom` pointer is automatically aligned, so the [`bottom()`][Heap::bottom] + /// method might return a pointer that is larger than `heap_bottom` after construction. + /// + /// The given `heap_size` must be large enough to store the required + /// metadata, otherwise this function will panic. Depending on the + /// alignment of the `hole_addr` pointer, the minimum size is between + /// `2 * size_of::` and `3 * size_of::`. + /// + /// # Safety + /// + /// The bottom address must be valid and the memory in the + /// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else. + /// This function is unsafe because it can cause undefined behavior if the given address + /// is invalid. + /// + /// The provided memory range must be valid for the `'static` lifetime. + pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> LockedHeap { + LockedHeap(Spinlock::new(Heap { + used: 0, + holes: HoleList::new(heap_bottom, heap_size), + })) + } +} + +#[cfg(feature = "use_spin")] +impl Deref for LockedHeap { + type Target = Spinlock; + + fn deref(&self) -> &Spinlock { + &self.0 + } +} + +#[cfg(feature = "use_spin")] +unsafe impl GlobalAlloc for LockedHeap { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.0 + .lock() + .allocate_first_fit(layout) + .ok() + .map_or(core::ptr::null_mut(), |allocation| allocation.as_ptr()) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.0 + .lock() + .deallocate(NonNull::new_unchecked(ptr), layout) + } +} + +/// Align downwards. Returns the greatest x with alignment `align` +/// so that x <= addr. The alignment must be a power of 2. +pub fn align_down_size(size: usize, align: usize) -> usize { + if align.is_power_of_two() { + size & !(align - 1) + } else if align == 0 { + size + } else { + panic!("`align` must be a power of 2"); + } +} + +pub fn align_up_size(size: usize, align: usize) -> usize { + align_down_size(size + align - 1, align) +} + +/// Align upwards. Returns the smallest x with alignment `align` +/// so that x >= addr. The alignment must be a power of 2. +pub fn align_up(addr: *mut u8, align: usize) -> *mut u8 { + let offset = addr.align_offset(align); + addr.wrapping_add(offset) +} diff --git a/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/test.rs b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/test.rs new file mode 100644 index 0000000..3ff0514 --- /dev/null +++ b/example-code/qemu-aarch64v8a/vendor/linked_list_allocator/src/test.rs @@ -0,0 +1,617 @@ +use super::*; +use core::{ + alloc::Layout, + ops::{Deref, DerefMut}, +}; +use std::{ + mem::{align_of, size_of, MaybeUninit}, + prelude::v1::*, +}; + +#[repr(align(128))] +struct Chonk { + data: MaybeUninit<[u8; N]>, +} + +impl Chonk { + /// Returns (almost certainly aliasing) pointers to the Chonk + /// as well as the data payload. + /// + /// MUST be freed with a matching call to `Chonk::unleak` + pub fn new() -> (*mut Chonk, *mut u8) { + let heap_space_ptr: *mut Chonk = { + let owned_box = Box::new(Self { + data: MaybeUninit::uninit(), + }); + let mutref = Box::leak(owned_box); + mutref + }; + let data_ptr: *mut u8 = unsafe { core::ptr::addr_of_mut!((*heap_space_ptr).data).cast() }; + (heap_space_ptr, data_ptr) + } + + pub unsafe fn unleak(putter: *mut Chonk) { + drop(Box::from_raw(putter)) + } +} + +pub struct Dropper { + putter: *mut Chonk, +} + +impl Dropper { + fn new(putter: *mut Chonk) -> Self { + Self { putter } + } +} + +impl Drop for Dropper { + fn drop(&mut self) { + unsafe { Chonk::unleak(self.putter) } + } +} + +pub struct OwnedHeap { + heap: Heap, + // /!\ SAFETY /!\: Load bearing drop order! `_drop` MUST be dropped AFTER + // `heap` is dropped. This is enforced by rust's built-in drop ordering, as + // long as `_drop` is declared after `heap`. + _drop: Dropper, +} + +impl Deref for OwnedHeap { + type Target = Heap; + + fn deref(&self) -> &Self::Target { + &self.heap + } +} + +impl DerefMut for OwnedHeap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.heap + } +} + +pub fn new_heap() -> OwnedHeap<1000> { + const HEAP_SIZE: usize = 1000; + let (heap_space_ptr, data_ptr) = Chonk::::new(); + + let heap = unsafe { Heap::new(data_ptr, HEAP_SIZE) }; + assert_eq!(heap.bottom(), data_ptr); + assert_eq!(heap.size(), align_down_size(HEAP_SIZE, size_of::())); + OwnedHeap { + heap, + _drop: Dropper::new(heap_space_ptr), + } +} + +fn new_max_heap() -> OwnedHeap<2048> { + const HEAP_SIZE: usize = 1024; + const HEAP_SIZE_MAX: usize = 2048; + let (heap_space_ptr, data_ptr) = Chonk::::new(); + + // Unsafe so that we have provenance over the whole allocation. + let heap = unsafe { Heap::new(data_ptr, HEAP_SIZE) }; + assert_eq!(heap.bottom(), data_ptr); + assert_eq!(heap.size(), HEAP_SIZE); + + OwnedHeap { + heap, + _drop: Dropper::new(heap_space_ptr), + } +} + +fn new_heap_skip(ct: usize) -> OwnedHeap<1000> { + const HEAP_SIZE: usize = 1000; + let (heap_space_ptr, data_ptr) = Chonk::::new(); + + let heap = unsafe { Heap::new(data_ptr.add(ct), HEAP_SIZE - ct) }; + OwnedHeap { + heap, + _drop: Dropper::new(heap_space_ptr), + } +} + +#[test] +fn empty() { + let mut heap = Heap::empty(); + let layout = Layout::from_size_align(1, 1).unwrap(); + assert!(heap.allocate_first_fit(layout.clone()).is_err()); +} + +#[test] +fn oom() { + const HEAP_SIZE: usize = 1000; + let (heap_space_ptr, data_ptr) = Chonk::::new(); + + let mut heap = unsafe { Heap::new(data_ptr, HEAP_SIZE) }; + assert_eq!(heap.bottom(), data_ptr); + assert_eq!(heap.size(), align_down_size(HEAP_SIZE, size_of::())); + + let layout = Layout::from_size_align(heap.size() + 1, align_of::()); + let addr = heap.allocate_first_fit(layout.unwrap()); + assert!(addr.is_err()); + + // Explicitly unleak the heap allocation + unsafe { Chonk::unleak(heap_space_ptr) }; +} + +#[test] +fn allocate_double_usize() { + let mut heap = new_heap(); + let size = size_of::() * 2; + let layout = Layout::from_size_align(size, align_of::()); + let addr = heap.allocate_first_fit(layout.unwrap()); + assert!(addr.is_ok()); + let addr = addr.unwrap().as_ptr(); + assert!(addr == heap.bottom()); + let (hole_addr, hole_size) = heap.holes.first_hole().expect("ERROR: no hole left"); + assert!(hole_addr == heap.bottom().wrapping_add(size)); + assert!(hole_size == heap.size() - size); + + unsafe { + assert_eq!( + (*((addr.wrapping_add(size)) as *const Hole)).size, + heap.size() - size + ); + } +} + +#[test] +fn allocate_and_free_double_usize() { + let mut heap = new_heap(); + + let layout = Layout::from_size_align(size_of::() * 2, align_of::()).unwrap(); + let x = heap.allocate_first_fit(layout.clone()).unwrap(); + unsafe { + *(x.as_ptr() as *mut (usize, usize)) = (0xdeafdeadbeafbabe, 0xdeafdeadbeafbabe); + + heap.deallocate(x, layout.clone()); + let real_first = heap.holes.first.next.as_ref().unwrap().as_ref(); + + assert_eq!(real_first.size, heap.size()); + assert!(real_first.next.is_none()); + } +} + +#[test] +fn deallocate_right_before() { + let mut heap = new_heap(); + let layout = Layout::from_size_align(size_of::() * 5, 1).unwrap(); + + let x = heap.allocate_first_fit(layout.clone()).unwrap(); + let y = heap.allocate_first_fit(layout.clone()).unwrap(); + let z = heap.allocate_first_fit(layout.clone()).unwrap(); + + unsafe { + heap.deallocate(y, layout.clone()); + assert_eq!((*(y.as_ptr() as *const Hole)).size, layout.size()); + heap.deallocate(x, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, layout.size() * 2); + heap.deallocate(z, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, heap.size()); + } +} + +#[test] +fn deallocate_right_behind() { + let mut heap = new_heap(); + let size = size_of::() * 5; + let layout = Layout::from_size_align(size, 1).unwrap(); + + let x = heap.allocate_first_fit(layout.clone()).unwrap(); + let y = heap.allocate_first_fit(layout.clone()).unwrap(); + let z = heap.allocate_first_fit(layout.clone()).unwrap(); + + unsafe { + heap.deallocate(x, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, size); + heap.deallocate(y, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, size * 2); + heap.deallocate(z, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, heap.size()); + } +} + +#[test] +fn deallocate_middle() { + let mut heap = new_heap(); + let size = size_of::() * 5; + let layout = Layout::from_size_align(size, 1).unwrap(); + + let x = heap.allocate_first_fit(layout.clone()).unwrap(); + let y = heap.allocate_first_fit(layout.clone()).unwrap(); + let z = heap.allocate_first_fit(layout.clone()).unwrap(); + let a = heap.allocate_first_fit(layout.clone()).unwrap(); + + unsafe { + heap.deallocate(x, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, size); + heap.deallocate(z, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, size); + assert_eq!((*(z.as_ptr() as *const Hole)).size, size); + heap.deallocate(y, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, size * 3); + heap.deallocate(a, layout.clone()); + assert_eq!((*(x.as_ptr() as *const Hole)).size, heap.size()); + } +} + +#[test] +fn reallocate_double_usize() { + let mut heap = new_heap(); + + let layout = Layout::from_size_align(size_of::() * 2, align_of::()).unwrap(); + + let x = heap.allocate_first_fit(layout.clone()).unwrap(); + unsafe { + heap.deallocate(x, layout.clone()); + } + + let y = heap.allocate_first_fit(layout.clone()).unwrap(); + unsafe { + heap.deallocate(y, layout.clone()); + } + + assert_eq!(x, y); +} + +#[test] +fn allocate_many_size_aligns() { + use core::ops::{Range, RangeInclusive}; + + #[cfg(not(miri))] + const SIZE: RangeInclusive = 1..=512; + + #[cfg(miri)] + const SIZE: RangeInclusive = 256..=(256 + core::mem::size_of::()); + + #[cfg(not(miri))] + const ALIGN: Range = 0..10; + + #[cfg(miri)] + const ALIGN: Range = 1..4; + + const STRATS: Range = 0..4; + + let mut heap = new_heap(); + let aligned_heap_size = align_down_size(1000, size_of::()); + assert_eq!(heap.size(), aligned_heap_size); + + heap.holes.debug(); + + let max_alloc = Layout::from_size_align(aligned_heap_size, 1).unwrap(); + let full = heap.allocate_first_fit(max_alloc).unwrap(); + unsafe { + heap.deallocate(full, max_alloc); + } + + heap.holes.debug(); + + struct Alloc { + alloc: NonNull, + layout: Layout, + } + + // NOTE: Printing to the console SIGNIFICANTLY slows down miri. + + for strat in STRATS { + for align in ALIGN { + for size in SIZE { + #[cfg(not(miri))] + { + println!("========================================================="); + println!("Align: {}", 1 << align); + println!("Size: {}", size); + println!("Free Pattern: {}/0..4", strat); + println!(); + } + let mut allocs = vec![]; + + let layout = Layout::from_size_align(size, 1 << align).unwrap(); + while let Ok(alloc) = heap.allocate_first_fit(layout) { + #[cfg(not(miri))] + heap.holes.debug(); + allocs.push(Alloc { alloc, layout }); + } + + #[cfg(not(miri))] + println!("Allocs: {} - {} bytes", allocs.len(), allocs.len() * size); + + match strat { + 0 => { + // Forward + allocs.drain(..).for_each(|a| unsafe { + heap.deallocate(a.alloc, a.layout); + #[cfg(not(miri))] + heap.holes.debug(); + }); + } + 1 => { + // Backwards + allocs.drain(..).rev().for_each(|a| unsafe { + heap.deallocate(a.alloc, a.layout); + #[cfg(not(miri))] + heap.holes.debug(); + }); + } + 2 => { + // Interleaved forwards + let mut a = Vec::new(); + let mut b = Vec::new(); + for (i, alloc) in allocs.drain(..).enumerate() { + if (i % 2) == 0 { + a.push(alloc); + } else { + b.push(alloc); + } + } + a.drain(..).for_each(|a| unsafe { + heap.deallocate(a.alloc, a.layout); + #[cfg(not(miri))] + heap.holes.debug(); + }); + b.drain(..).for_each(|a| unsafe { + heap.deallocate(a.alloc, a.layout); + #[cfg(not(miri))] + heap.holes.debug(); + }); + } + 3 => { + // Interleaved backwards + let mut a = Vec::new(); + let mut b = Vec::new(); + for (i, alloc) in allocs.drain(..).rev().enumerate() { + if (i % 2) == 0 { + a.push(alloc); + } else { + b.push(alloc); + } + } + a.drain(..).for_each(|a| unsafe { + heap.deallocate(a.alloc, a.layout); + #[cfg(not(miri))] + heap.holes.debug(); + }); + b.drain(..).for_each(|a| unsafe { + heap.deallocate(a.alloc, a.layout); + #[cfg(not(miri))] + heap.holes.debug(); + }); + } + _ => panic!(), + } + + #[cfg(not(miri))] + println!("MAX CHECK"); + + let full = heap.allocate_first_fit(max_alloc).unwrap(); + unsafe { + heap.deallocate(full, max_alloc); + } + + #[cfg(not(miri))] + println!(); + } + } + } +} + +#[test] +fn allocate_multiple_sizes() { + let mut heap = new_heap(); + let base_size = size_of::(); + let base_align = align_of::(); + + let layout_1 = Layout::from_size_align(base_size * 2, base_align).unwrap(); + let layout_2 = Layout::from_size_align(base_size * 7, base_align).unwrap(); + let layout_3 = Layout::from_size_align(base_size * 3, base_align * 4).unwrap(); + let layout_4 = Layout::from_size_align(base_size * 4, base_align).unwrap(); + + let x = heap.allocate_first_fit(layout_1.clone()).unwrap(); + let y = heap.allocate_first_fit(layout_2.clone()).unwrap(); + assert_eq!(y.as_ptr() as usize, x.as_ptr() as usize + base_size * 2); + let z = heap.allocate_first_fit(layout_3.clone()).unwrap(); + assert_eq!(z.as_ptr() as usize % (base_size * 4), 0); + + unsafe { + heap.deallocate(x, layout_1.clone()); + } + + let a = heap.allocate_first_fit(layout_4.clone()).unwrap(); + let b = heap.allocate_first_fit(layout_1.clone()).unwrap(); + assert_eq!(b, x); + + unsafe { + heap.deallocate(y, layout_2); + heap.deallocate(z, layout_3); + heap.deallocate(a, layout_4); + heap.deallocate(b, layout_1); + } +} + +// This test makes sure that the heap works correctly when the input slice has +// a variety of non-Hole aligned starting addresses +#[test] +fn allocate_multiple_unaligned() { + for offset in 0..=Layout::new::().size() { + let mut heap = new_heap_skip(offset); + let base_size = size_of::(); + let base_align = align_of::(); + + let layout_1 = Layout::from_size_align(base_size * 2, base_align).unwrap(); + let layout_2 = Layout::from_size_align(base_size * 7, base_align).unwrap(); + let layout_3 = Layout::from_size_align(base_size * 3, base_align * 4).unwrap(); + let layout_4 = Layout::from_size_align(base_size * 4, base_align).unwrap(); + + let x = heap.allocate_first_fit(layout_1.clone()).unwrap(); + let y = heap.allocate_first_fit(layout_2.clone()).unwrap(); + assert_eq!(y.as_ptr() as usize, x.as_ptr() as usize + base_size * 2); + let z = heap.allocate_first_fit(layout_3.clone()).unwrap(); + assert_eq!(z.as_ptr() as usize % (base_size * 4), 0); + + unsafe { + heap.deallocate(x, layout_1.clone()); + } + + let a = heap.allocate_first_fit(layout_4.clone()).unwrap(); + let b = heap.allocate_first_fit(layout_1.clone()).unwrap(); + assert_eq!(b, x); + + unsafe { + heap.deallocate(y, layout_2); + heap.deallocate(z, layout_3); + heap.deallocate(a, layout_4); + heap.deallocate(b, layout_1); + } + } +} + +#[test] +fn allocate_usize() { + let mut heap = new_heap(); + + let layout = Layout::from_size_align(size_of::(), 1).unwrap(); + + assert!(heap.allocate_first_fit(layout.clone()).is_ok()); +} + +#[test] +fn allocate_usize_in_bigger_block() { + let mut heap = new_heap(); + + let layout_1 = Layout::from_size_align(size_of::() * 2, 1).unwrap(); + let layout_2 = Layout::from_size_align(size_of::(), 1).unwrap(); + + let x = heap.allocate_first_fit(layout_1.clone()).unwrap(); + let y = heap.allocate_first_fit(layout_1.clone()).unwrap(); + unsafe { + heap.deallocate(x, layout_1.clone()); + } + + let z = heap.allocate_first_fit(layout_2.clone()); + assert!(z.is_ok()); + let z = z.unwrap(); + assert_eq!(x, z); + + unsafe { + heap.deallocate(y, layout_1.clone()); + heap.deallocate(z, layout_2); + } +} + +#[test] +// see https://github.com/phil-opp/blog_os/issues/160 +fn align_from_small_to_big() { + let mut heap = new_heap(); + + let layout_1 = Layout::from_size_align(28, 4).unwrap(); + let layout_2 = Layout::from_size_align(8, 8).unwrap(); + + // allocate 28 bytes so that the heap end is only 4 byte aligned + assert!(heap.allocate_first_fit(layout_1.clone()).is_ok()); + // try to allocate a 8 byte aligned block + assert!(heap.allocate_first_fit(layout_2.clone()).is_ok()); +} + +#[test] +fn extend_empty_heap() { + let mut heap = new_max_heap(); + + unsafe { + heap.extend(1024); + } + + // Try to allocate full heap after extend + let layout = Layout::from_size_align(2048, 1).unwrap(); + assert!(heap.allocate_first_fit(layout.clone()).is_ok()); +} + +#[test] +fn extend_full_heap() { + let mut heap = new_max_heap(); + + let layout = Layout::from_size_align(1024, 1).unwrap(); + + // Allocate full heap, extend and allocate again to the max + assert!(heap.allocate_first_fit(layout.clone()).is_ok()); + unsafe { + heap.extend(1024); + } + assert!(heap.allocate_first_fit(layout.clone()).is_ok()); +} + +#[test] +fn extend_fragmented_heap() { + let mut heap = new_max_heap(); + + let layout_1 = Layout::from_size_align(512, 1).unwrap(); + let layout_2 = Layout::from_size_align(1024, 1).unwrap(); + + let alloc1 = heap.allocate_first_fit(layout_1.clone()); + let alloc2 = heap.allocate_first_fit(layout_1.clone()); + + assert!(alloc1.is_ok()); + assert!(alloc2.is_ok()); + + unsafe { + // Create a hole at the beginning of the heap + heap.deallocate(alloc1.unwrap(), layout_1.clone()); + } + + unsafe { + heap.extend(1024); + } + + // We got additional 1024 bytes hole at the end of the heap + // Try to allocate there + assert!(heap.allocate_first_fit(layout_2.clone()).is_ok()); +} + +/// Ensures that `Heap::extend` fails for very small sizes. +/// +/// The size needs to be big enough to hold a hole, otherwise +/// the hole write would result in an out of bounds write. +#[test] +fn small_heap_extension() { + // define an array of `u64` instead of `u8` for alignment + static mut HEAP: [u64; 5] = [0; 5]; + unsafe { + let mut heap = Heap::new(HEAP.as_mut_ptr().cast(), 32); + heap.extend(1); + assert_eq!(1, heap.holes.pending_extend); + } +} + +/// Ensures that `Heap::extend` fails for sizes that are not a multiple of the hole size. +#[test] +fn oddly_sized_heap_extension() { + // define an array of `u64` instead of `u8` for alignment + static mut HEAP: [u64; 5] = [0; 5]; + unsafe { + let mut heap = Heap::new(HEAP.as_mut_ptr().cast(), 16); + heap.extend(17); + assert_eq!(1, heap.holes.pending_extend); + assert_eq!(16 + 16, heap.size()); + } +} + +/// Ensures that heap extension fails when trying to extend an oddly-sized heap. +/// +/// To extend the heap, we need to place a hole at the old top of the heap. This +/// only works if the top pointer is sufficiently aligned. +#[test] +fn extend_odd_size() { + // define an array of `u64` instead of `u8` for alignment + static mut HEAP: [u64; 6] = [0; 6]; + unsafe { + let mut heap = Heap::new(HEAP.as_mut_ptr().cast(), 17); + assert_eq!(1, heap.holes.pending_extend); + heap.extend(16); + assert_eq!(1, heap.holes.pending_extend); + heap.extend(15); + assert_eq!(0, heap.holes.pending_extend); + assert_eq!(17 + 16 + 15, heap.size()); + } +} diff --git a/example-code/qemu-armv8r/Cargo.lock b/example-code/qemu-armv8r/Cargo.lock index d760b9e..d2faf97 100644 --- a/example-code/qemu-armv8r/Cargo.lock +++ b/example-code/qemu-armv8r/Cargo.lock @@ -25,7 +25,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" [[package]] -name = "qemu-demo" +name = "qemu-armv8r" version = "0.1.0" dependencies = [ "critical-section", diff --git a/example-code/qemu-armv8r/Cargo.toml b/example-code/qemu-armv8r/Cargo.toml index 0464638..9d32b91 100644 --- a/example-code/qemu-armv8r/Cargo.toml +++ b/example-code/qemu-armv8r/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "qemu-demo" +name = "qemu-armv8r" version = "0.1.0" edition = "2021" authors = ["Ferrous Systems"] diff --git a/example-code/qemu-armv8r/README.md b/example-code/qemu-armv8r/README.md index 235892e..10b5d8b 100644 --- a/example-code/qemu-armv8r/README.md +++ b/example-code/qemu-armv8r/README.md @@ -58,12 +58,12 @@ the linker script to the `cargo` temporary output directory where the linker will look for it. The compiled outputs will go into `./target/armv8r-none-eabihf/`, where -`` is `debug` or `release`. The binary is called `qemu-demo`, because +`` is `debug` or `release`. The binary is called `qemu-armv8r`, because that's the name given in the `Cargo.toml` file. ```console $ criticalup run cargo run --release -bin no_heap - Compiling qemu-demo v0.1.0 (/Users/jonathan/work/qemu-demo) + Compiling qemu-armv8r v0.1.0 (/Users/jonathan/work/qemu-armv8r) Finished release [optimized] target(s) in 0.16s Running `qemu-system-arm -machine mps3-an536 -cpu cortex-r52 -semihosting -nographic -kernel target/armv8r-none-eabihf/release/no_heap` Hello, this is Rust! @@ -133,6 +133,24 @@ Hello, this is Rust! PANIC: PanicInfo { payload: Any { .. }, message: Some(I am a panic), location: Location { file: "src/bin/with_heap.rs", line: 61, col: 5 }, can_unwind: true, force_no_backtrace: false } ``` +Rather than type out the full QEMU command line, you can also use `qemu.sh`: + +```console +$ ./qemu.sh ./target/production/with_heap +Hello, this is Rust! + 1.00 2.00 3.00 4.00 5.00 6.00 7.00 8.00 9.00 10.00 + 2.00 4.00 6.00 8.00 10.00 12.00 14.00 16.00 18.00 20.00 + 3.00 6.00 9.00 12.00 15.00 18.00 21.00 24.00 27.00 30.00 + 4.00 8.00 12.00 16.00 20.00 24.00 28.00 32.00 36.00 40.00 + 5.00 10.00 15.00 20.00 25.00 30.00 35.00 40.00 45.00 50.00 + 6.00 12.00 18.00 24.00 30.00 36.00 42.00 48.00 54.00 60.00 + 7.00 14.00 21.00 28.00 35.00 42.00 49.00 56.00 63.00 70.00 + 8.00 16.00 24.00 32.00 40.00 48.00 56.00 64.00 72.00 80.00 + 9.00 18.00 27.00 36.00 45.00 54.00 63.00 72.00 81.00 90.00 + 10.00 20.00 30.00 40.00 50.00 60.00 70.00 80.00 90.00 100.00 +PANIC: PanicInfo { payload: Any { .. }, message: Some(I am a panic), location: Location { file: "src/bin/with_heap.rs", line: 61, col: 5 }, can_unwind: true, force_no_backtrace: false } +``` + ## License Licensed under either of diff --git a/example-code/qemu-armv8r/build.sh b/example-code/qemu-armv8r/build.sh index 8188829..762d140 100755 --- a/example-code/qemu-armv8r/build.sh +++ b/example-code/qemu-armv8r/build.sh @@ -61,7 +61,7 @@ echo Running rustc for lib... # ############################################################################ "${RUSTC}" ${RUSTC_FLAGS} \ --crate-type=lib \ - --crate-name=qemu_demo \ + --crate-name=qemu_armv8r \ --emit=dep-info,metadata,link \ --out-dir ${TARGET_DIR} \ -L ${TARGET_DIR} \ @@ -78,7 +78,7 @@ echo Running rustc for no_heap... -Clink-arg=-Tlinker.ld \ --edition 2021 \ -L ${TARGET_DIR} \ - --extern qemu_demo=${TARGET_DIR}/libqemu_demo.rlib \ + --extern qemu_armv8r=${TARGET_DIR}/libqemu_armv8r.rlib \ -o ${NO_HEAP_OUTPUT_BINARY} \ src/bin/no_heap.rs echo Generating asm for no_heap... @@ -93,7 +93,7 @@ echo Running rustc for with_heap... -Clink-arg=-Tlinker.ld \ --edition 2021 \ -L ${TARGET_DIR} \ - --extern qemu_demo=${TARGET_DIR}/libqemu_demo.rlib \ + --extern qemu_armv8r=${TARGET_DIR}/libqemu_armv8r.rlib \ --extern embedded_alloc=${TARGET_DIR}/libembedded_alloc.rlib \ -o ${WITH_HEAP_OUTPUT_BINARY} \ src/bin/with_heap.rs diff --git a/example-code/qemu-armv8r/qemu.sh b/example-code/qemu-armv8r/qemu.sh index 7233fed..ba10797 100755 --- a/example-code/qemu-armv8r/qemu.sh +++ b/example-code/qemu-armv8r/qemu.sh @@ -1,5 +1,5 @@ #!/bin/sh TARGET_DIR=target/production -BINARY=${1:-${TARGET_DIR}/no-heap} +BINARY=${1:-${TARGET_DIR}/no_heap} qemu-system-arm -machine mps3-an536 -cpu cortex-r52 -semihosting -nographic -kernel ${BINARY} diff --git a/example-code/qemu-armv8r/src/bin/no_heap.rs b/example-code/qemu-armv8r/src/bin/no_heap.rs index fedf855..ae40b78 100644 --- a/example-code/qemu-armv8r/src/bin/no_heap.rs +++ b/example-code/qemu-armv8r/src/bin/no_heap.rs @@ -8,7 +8,7 @@ #![no_main] use core::fmt::Write; -use qemu_demo::cmsdk_uart; +use qemu_armv8r::cmsdk_uart; /// The clock speed of the peripheral subsystem on an SSE-300 SoC an on MPS3 board. /// @@ -17,8 +17,7 @@ const PERIPHERAL_CLOCK: u32 = 25_000_000; /// The entry-point to the Rust application. /// -/// It is called by the start-up code in [`boot.S`](./boot.S) and thus exported -/// as a C-compatible symbol. +/// It is called by the start-up code in `lib.rs`. #[no_mangle] pub extern "C" fn kmain() { if let Err(e) = main() { @@ -65,31 +64,4 @@ fn panic(info: &core::panic::PanicInfo) -> ! { } } -core::arch::global_asm!( - r#" - -.section .text.startup -.global _start -.code 32 -.align 0 - -_start: - // Set stack pointer - ldr r3, =stack_top - mov sp, r3 - // Allow VFP coprocessor access - mrc p15, 0, r0, c1, c0, 2 - orr r0, r0, #0xF00000 - mcr p15, 0, r0, c1, c0, 2 - // Enable VFP - mov r0, #0x40000000 - vmsr fpexc, r0 - // Jump to application - bl kmain - // In case the application returns, loop forever - b . - -"# -); - // End of file diff --git a/example-code/qemu-armv8r/src/bin/with_heap.rs b/example-code/qemu-armv8r/src/bin/with_heap.rs index 1ae91d1..b726375 100644 --- a/example-code/qemu-armv8r/src/bin/with_heap.rs +++ b/example-code/qemu-armv8r/src/bin/with_heap.rs @@ -11,7 +11,7 @@ extern crate alloc; use core::{fmt::Write, ptr::addr_of_mut}; use embedded_alloc::Heap; -use qemu_demo::cmsdk_uart; +use qemu_armv8r::cmsdk_uart; #[global_allocator] static HEAP: Heap = Heap::empty(); @@ -23,8 +23,7 @@ const PERIPHERAL_CLOCK: u32 = 25_000_000; /// The entry-point to the Rust application. /// -/// It is called by the start-up code in [`boot.S`](./boot.S) and thus exported -/// as a C-compatible symbol. +/// It is called by the start-up code in `lib.rs`. #[no_mangle] pub extern "C" fn kmain() { // Initialize the allocator BEFORE you use it @@ -83,31 +82,4 @@ fn panic(info: &core::panic::PanicInfo) -> ! { } } -core::arch::global_asm!( - r#" - -.section .text.startup -.global _start -.code 32 -.align 0 - -_start: - // Set stack pointer - ldr r3, =stack_top - mov sp, r3 - // Allow VFP coprocessor access - mrc p15, 0, r0, c1, c0, 2 - orr r0, r0, #0xF00000 - mcr p15, 0, r0, c1, c0, 2 - // Enable VFP - mov r0, #0x40000000 - vmsr fpexc, r0 - // Jump to application - bl kmain - // In case the application returns, loop forever - b . - -"# -); - // End of file diff --git a/example-code/qemu-armv8r/src/critical_section.rs b/example-code/qemu-armv8r/src/critical_section.rs index 7559072..2961d92 100644 --- a/example-code/qemu-armv8r/src/critical_section.rs +++ b/example-code/qemu-armv8r/src/critical_section.rs @@ -19,7 +19,6 @@ pub fn interrupts_enabled() -> bool { unsafe impl critical_section::Impl for SingleCoreCriticalSection { unsafe fn acquire() -> critical_section::RawRestoreState { let was_active = interrupts_enabled(); - // NOTE: Fence guarantees are provided by interrupt::disable(), which performs a `compiler_fence(SeqCst)`. core::arch::asm!("cpsid i", options(nomem, nostack, preserves_flags)); core::sync::atomic::compiler_fence(core::sync::atomic::Ordering::SeqCst); was_active diff --git a/example-code/qemu-armv8r/src/lib.rs b/example-code/qemu-armv8r/src/lib.rs index 62cdaff..7112a16 100644 --- a/example-code/qemu-armv8r/src/lib.rs +++ b/example-code/qemu-armv8r/src/lib.rs @@ -2,3 +2,30 @@ pub mod cmsdk_uart; pub mod critical_section; + +core::arch::global_asm!( + r#" + +.section .text.startup +.global _start +.code 32 +.align 0 + +_start: + // Set stack pointer + ldr r3, =stack_top + mov sp, r3 + // Allow VFP coprocessor access + mrc p15, 0, r0, c1, c0, 2 + orr r0, r0, #0xF00000 + mcr p15, 0, r0, c1, c0, 2 + // Enable VFP + mov r0, #0x40000000 + vmsr fpexc, r0 + // Jump to application + bl kmain + // In case the application returns, loop forever + b . + +"# +);