axlibc/
setjmp.rs

1use core::ffi::c_int;
2
3use crate::ctypes;
4
5/// `setjmp` implementation
6#[unsafe(naked)]
7#[unsafe(no_mangle)]
8pub unsafe extern "C" fn setjmp(_buf: *mut ctypes::__jmp_buf_tag) {
9    #[cfg(all(target_arch = "aarch64", feature = "fp_simd"))]
10    core::arch::naked_asm!(
11        "
12        stp x19, x20, [x0,#0]
13        stp x21, x22, [x0,#16]
14        stp x23, x24, [x0,#32]
15        stp x25, x26, [x0,#48]
16        stp x27, x28, [x0,#64]
17        stp x29, x30, [x0,#80]
18        mov x2, sp
19        str x2, [x0,#104]
20        stp  d8,  d9, [x0,#112]
21        stp d10, d11, [x0,#128]
22        stp d12, d13, [x0,#144]
23        stp d14, d15, [x0,#160]
24        mov x0, #0
25        ret",
26    );
27    #[cfg(all(target_arch = "aarch64", not(feature = "fp_simd")))]
28    core::arch::naked_asm!(
29        "
30        stp x19, x20, [x0,#0]
31        stp x21, x22, [x0,#16]
32        stp x23, x24, [x0,#32]
33        stp x25, x26, [x0,#48]
34        stp x27, x28, [x0,#64]
35        stp x29, x30, [x0,#80]
36        mov x2, sp
37        str x2, [x0,#104]
38        mov x0, #0
39        ret",
40    );
41    #[cfg(target_arch = "x86_64")]
42    core::arch::naked_asm!(
43        "mov [rdi], rbx
44        mov [rdi + 8], rbp
45        mov [rdi + 16], r12
46        mov [rdi + 24], r13
47        mov [rdi + 32], r14
48        mov [rdi + 40], r15
49        lea rdx, [rsp + 8]
50        mov [rdi + 48], rdx
51        mov rdx, [rsp]
52        mov [rdi + 56], rdx
53        xor rax, rax
54        ret",
55    );
56    #[cfg(all(target_arch = "riscv64", feature = "fp_simd"))]
57    core::arch::naked_asm!(
58        "sd s0,    0(a0)
59        sd s1,    8(a0)
60        sd s2,    16(a0)
61        sd s3,    24(a0)
62        sd s4,    32(a0)
63        sd s5,    40(a0)
64        sd s6,    48(a0)
65        sd s7,    56(a0)
66        sd s8,    64(a0)
67        sd s9,    72(a0)
68        sd s10,   80(a0)
69        sd s11,   88(a0)
70        sd sp,    96(a0)
71        sd ra,    104(a0)
72
73        fsd fs0,  112(a0)
74        fsd fs1,  120(a0)
75        fsd fs2,  128(a0)
76        fsd fs3,  136(a0)
77        fsd fs4,  144(a0)
78        fsd fs5,  152(a0)
79        fsd fs6,  160(a0)
80        fsd fs7,  168(a0)
81        fsd fs8,  176(a0)
82        fsd fs9,  184(a0)
83        fsd fs10, 192(a0)
84        fsd fs11, 200(a0)
85
86        li a0, 0
87        ret",
88    );
89    #[cfg(all(target_arch = "riscv64", not(feature = "fp_simd")))]
90    core::arch::naked_asm!(
91        "sd s0,    0(a0)
92        sd s1,    8(a0)
93        sd s2,    16(a0)
94        sd s3,    24(a0)
95        sd s4,    32(a0)
96        sd s5,    40(a0)
97        sd s6,    48(a0)
98        sd s7,    56(a0)
99        sd s8,    64(a0)
100        sd s9,    72(a0)
101        sd s10,   80(a0)
102        sd s11,   88(a0)
103        sd sp,    96(a0)
104        sd ra,    104(a0)
105
106        li a0, 0
107        ret",
108    );
109    #[cfg(all(target_arch = "loongarch64", feature = "fp_simd"))]
110    core::arch::naked_asm!(
111        "
112        st.d     $ra, $a0, 0
113        st.d     $sp, $a0, 1 * 8
114        st.d     $s0, $a0, 2 * 8
115        st.d     $s1, $a0, 3 * 8
116        st.d     $s2, $a0, 4 * 8
117        st.d     $s3, $a0, 5 * 8
118        st.d     $s4, $a0, 6 * 8
119        st.d     $s5, $a0, 7 * 8
120        st.d     $s6, $a0, 8 * 8
121        st.d     $s7, $a0, 9 * 8
122        st.d     $s8, $a0, 10 * 8
123        st.d     $fp, $a0, 11 * 8
124        st.d     $r1, $a0, 12 * 8
125        fst.d    $f24, $a0, 13 * 8
126        fst.d    $f25, $a0, 14 * 8
127        fst.d    $f26, $a0, 15 * 8
128        fst.d    $f27, $a0, 16 * 8
129        fst.d    $f28, $a0, 17 * 8
130        fst.d    $f29, $a0, 18 * 8
131        fst.d    $f30, $a0, 19 * 8
132        fst.d    $f31, $a0, 20 * 8
133        li.w  $a0, 0
134        ret",
135    );
136    #[cfg(all(target_arch = "loongarch64", not(feature = "fp_simd")))]
137    core::arch::naked_asm!(
138        "
139        st.d     $ra, $a0, 0
140        st.d     $sp, $a0, 1 * 8
141        st.d     $s0, $a0, 2 * 8
142        st.d     $s1, $a0, 3 * 8
143        st.d     $s2, $a0, 4 * 8
144        st.d     $s3, $a0, 5 * 8
145        st.d     $s4, $a0, 6 * 8
146        st.d     $s5, $a0, 7 * 8
147        st.d     $s6, $a0, 8 * 8
148        st.d     $s7, $a0, 9 * 8
149        st.d     $s8, $a0, 10 * 8
150        st.d     $fp, $a0, 11 * 8
151        st.d     $r1, $a0, 12 * 8
152        li.w  $a0, 0
153        ret",
154    );
155    #[cfg(not(any(
156        target_arch = "aarch64",
157        target_arch = "x86_64",
158        target_arch = "riscv64",
159        target_arch = "loongarch64"
160    )))]
161    core::arch::naked_asm!("ret")
162}
163
164/// `longjmp` implementation
165#[unsafe(naked)]
166#[unsafe(no_mangle)]
167pub unsafe extern "C" fn longjmp(_buf: *mut ctypes::__jmp_buf_tag, _val: c_int) -> ! {
168    #[cfg(all(target_arch = "aarch64", feature = "fp_simd"))]
169    core::arch::naked_asm!(
170        "ldp x19, x20, [x0,#0]
171        ldp x21, x22, [x0,#16]
172        ldp x23, x24, [x0,#32]
173        ldp x25, x26, [x0,#48]
174        ldp x27, x28, [x0,#64]
175        ldp x29, x30, [x0,#80]
176        ldr x2, [x0,#104]
177        mov sp, x2
178        ldp d8 , d9, [x0,#112]
179        ldp d10, d11, [x0,#128]
180        ldp d12, d13, [x0,#144]
181        ldp d14, d15, [x0,#160]
182
183        cmp w1, 0
184        csinc w0, w1, wzr, ne
185        br x30",
186    );
187    #[cfg(all(target_arch = "aarch64", not(feature = "fp_simd")))]
188    core::arch::naked_asm!(
189        "ldp x19, x20, [x0,#0]
190        ldp x21, x22, [x0,#16]
191        ldp x23, x24, [x0,#32]
192        ldp x25, x26, [x0,#48]
193        ldp x27, x28, [x0,#64]
194        ldp x29, x30, [x0,#80]
195        ldr x2, [x0,#104]
196        mov sp, x2
197
198        cmp w1, 0
199        csinc w0, w1, wzr, ne
200        br x30",
201    );
202    #[cfg(target_arch = "x86_64")]
203    core::arch::naked_asm!(
204        "mov rax,rsi
205        test rax,rax
206        jnz 2f
207        inc rax
208    2:
209        mov rbx, [rdi]
210        mov rbp, [rdi + 8]
211        mov r12, [rdi + 16]
212        mov r13, [rdi + 24]
213        mov r14, [rdi + 32]
214        mov r15, [rdi + 40]
215        mov rdx, [rdi + 48]
216        mov rsp, rdx
217        mov rdx, [rdi + 56]
218        jmp rdx",
219    );
220    #[cfg(all(target_arch = "riscv64", feature = "fp_simd"))]
221    core::arch::naked_asm!(
222        "ld s0,    0(a0)
223        ld s1,    8(a0)
224        ld s2,    16(a0)
225        ld s3,    24(a0)
226        ld s4,    32(a0)
227        ld s5,    40(a0)
228        ld s6,    48(a0)
229        ld s7,    56(a0)
230        ld s8,    64(a0)
231        ld s9,    72(a0)
232        ld s10,   80(a0)
233        ld s11,   88(a0)
234        ld sp,    96(a0)
235        ld ra,    104(a0)
236
237        fld fs0,  112(a0)
238        fld fs1,  120(a0)
239        fld fs2,  128(a0)
240        fld fs3,  136(a0)
241        fld fs4,  144(a0)
242        fld fs5,  152(a0)
243        fld fs6,  160(a0)
244        fld fs7,  168(a0)
245        fld fs8,  176(a0)
246        fld fs9,  184(a0)
247        fld fs10, 192(a0)
248        fld fs11, 200(a0)
249
250        seqz a0, a1
251        add a0, a0, a1
252        ret",
253    );
254    #[cfg(all(target_arch = "riscv64", not(feature = "fp_simd")))]
255    core::arch::naked_asm!(
256        "ld s0,    0(a0)
257        ld s1,    8(a0)
258        ld s2,    16(a0)
259        ld s3,    24(a0)
260        ld s4,    32(a0)
261        ld s5,    40(a0)
262        ld s6,    48(a0)
263        ld s7,    56(a0)
264        ld s8,    64(a0)
265        ld s9,    72(a0)
266        ld s10,   80(a0)
267        ld s11,   88(a0)
268        ld sp,    96(a0)
269        ld ra,    104(a0)
270
271        seqz a0, a1
272        add a0, a0, a1
273        ret",
274    );
275
276    #[cfg(all(target_arch = "loongarch64", feature = "fp_simd"))]
277    core::arch::naked_asm!(
278        "
279        ld.d     $ra, $a1, 0
280        ld.d     $s0, $a1, 2 * 8
281        ld.d     $s1, $a1, 3 * 8
282        ld.d     $s2, $a1, 4 * 8
283        ld.d     $s3, $a1, 5 * 8
284        ld.d     $s4, $a1, 6 * 8
285        ld.d     $s5, $a1, 7 * 8
286        ld.d     $s6, $a1, 8 * 8
287        ld.d     $s7, $a1, 9 * 8
288        ld.d     $s8, $a1, 10 * 8
289        ld.d     $fp, $a1, 11 * 8
290        ld.d     $sp, $a1, 1 * 8
291        ld.d     $r21, $a1, 12 * 8
292        fld.d    $f24, $a0, 13 * 8
293        fld.d    $f25, $a0, 14 * 8
294        fld.d    $f26, $a0, 15 * 8
295        fld.d    $f27, $a0, 16 * 8
296        fld.d    $f28, $a0, 17 * 8
297        fld.d    $f29, $a0, 18 * 8
298        fld.d    $f30, $a0, 19 * 8
299        fld.d    $f31, $a0, 20 * 8
300        sltui    $a0, $a1, 1
301        add.d    $a0, $a0, $a1
302        jirl     $zero,$ra, 0"
303    );
304    #[cfg(all(target_arch = "loongarch64", not(feature = "fp_simd")))]
305    core::arch::naked_asm!(
306        "
307        ld.d     $ra, $a1, 0
308        ld.d     $s0, $a1, 2 * 8
309        ld.d     $s1, $a1, 3 * 8
310        ld.d     $s2, $a1, 4 * 8
311        ld.d     $s3, $a1, 5 * 8
312        ld.d     $s4, $a1, 6 * 8
313        ld.d     $s5, $a1, 7 * 8
314        ld.d     $s6, $a1, 8 * 8
315        ld.d     $s7, $a1, 9 * 8
316        ld.d     $s8, $a1, 10 * 8
317        ld.d     $fp, $a1, 11 * 8
318        ld.d     $sp, $a1, 1 * 8
319        ld.d     $r21, $a1, 12 * 8
320        sltui    $a0, $a1, 1
321        add.d    $a0, $a0, $a1
322        jirl     $zero,$ra, 0",
323    );
324}