x86: Reinstate hand-written unwind info for sysv.S

This commit is contained in:
Richard Henderson
2014-12-10 13:37:36 -08:00
parent 6cedf81ca7
commit b7f6d7aa9b

View File

@@ -30,7 +30,6 @@
#define LIBFFI_ASM
#include <fficonfig.h>
#include <ffi.h>
#include <ffi_cfi.h>
#include "internal.h"
#define C2(X, Y) X ## Y
@@ -89,7 +88,8 @@
*/
ffi_call_i386:
cfi_startproc
L(UW0):
# cfi_startproc
movl (%esp), %eax /* move the return address */
movl %ebp, (%ecx) /* store %ebp into local frame */
movl %eax, 4(%ecx) /* store retaddr into local frame */
@@ -102,8 +102,9 @@ ffi_call_i386:
moved the return address is (the new) CFA-4, so from the
perspective of the unwind info, it hasn't moved. */
movl %ecx, %ebp
cfi_def_cfa(%ebp, 8)
cfi_rel_offset(%ebp, 0)
L(UW1):
# cfi_def_cfa(%ebp, 8)
# cfi_rel_offset(%ebp, 0)
movl %edx, %esp /* set outgoing argument stack */
movl 20+R_EAX*4(%ebp), %eax /* set register arguments */
@@ -114,7 +115,8 @@ ffi_call_i386:
movl 12(%ebp), %ecx /* load return type code */
movl %ebx, 8(%ebp) /* preserve %ebx */
cfi_rel_offset(%ebx, 8)
L(UW2):
# cfi_rel_offset(%ebx, 8)
andl $X86_RET_TYPE_MASK, %ecx
#ifdef __PIC__
@@ -165,12 +167,14 @@ L(e1):
movl 8(%ebp), %ebx
movl %ebp, %esp
popl %ebp
cfi_remember_state
cfi_def_cfa(%esp, 4)
cfi_restore(%ebx)
cfi_restore(%ebp)
L(UW3):
# cfi_remember_state
# cfi_def_cfa(%esp, 4)
# cfi_restore(%ebx)
# cfi_restore(%ebp)
ret
cfi_restore_state
L(UW4):
# cfi_restore_state
E(L(store_table), X86_RET_STRUCTPOP)
jmp L(e1)
@@ -189,7 +193,8 @@ E(L(store_table), X86_RET_UNUSED14)
E(L(store_table), X86_RET_UNUSED15)
ud2
cfi_endproc
L(UW5):
# cfi_endproc
ENDF(ffi_call_i386)
/* The inner helper is declared as
@@ -220,11 +225,11 @@ ENDF(ffi_call_i386)
movl %ecx, 32(%esp); \
movl %eax, 36(%esp)
# define FFI_CLOSURE_CALL_INNER \
# define FFI_CLOSURE_CALL_INNER(UW) \
movl %esp, %ecx; /* load closure_data */ \
leal closure_FS+4(%esp), %edx; /* load incoming stack */ \
call ffi_closure_inner
#define FFI_CLOSURE_MASK_AND_JUMP(N) \
#define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \
andl $X86_RET_TYPE_MASK, %eax; \
leal L(C1(load_table,N))(, %eax, 8), %eax; \
jmp *%eax
@@ -232,28 +237,31 @@ ENDF(ffi_call_i386)
#ifdef __PIC__
# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
# undef FFI_CLOSURE_MASK_AND_JUMP
# define FFI_CLOSURE_MASK_AND_JUMP(N) \
# define FFI_CLOSURE_MASK_AND_JUMP(N, UW) \
andl $X86_RET_TYPE_MASK, %eax; \
call C(__x86.get_pc_thunk.dx); \
L(C1(pc,N)): \
leal L(C1(load_table,N))-L(C1(pc,N))(%edx, %eax, 8), %eax; \
jmp *%eax
# else
# define FFI_CLOSURE_CALL_INNER_SAVE_EBX
# undef FFI_CLOSURE_CALL_INNER
# define FFI_CLOSURE_CALL_INNER \
# define FFI_CLOSURE_CALL_INNER(UWN) \
movl %esp, %ecx; /* load closure_data */ \
leal closure_FS+4(%esp), %edx; /* load incoming stack */ \
movl %ebx, 40(%esp); /* save ebx */ \
cfi_rel_offset(%ebx, 40); \
L(C1(UW,UWN)): \
# cfi_rel_offset(%ebx, 40); \
call C(__x86.get_pc_thunk.bx); /* load got register */ \
addl $C(_GLOBAL_OFFSET_TABLE_), %ebx; \
call ffi_closure_inner@PLT
# undef FFI_CLOSURE_MASK_AND_JUMP
# define FFI_CLOSURE_MASK_AND_JUMP(N) \
# define FFI_CLOSURE_MASK_AND_JUMP(N, UWN) \
andl $X86_RET_TYPE_MASK, %eax; \
leal L(C1(load_table,N))@GOTOFF(%ebx, %eax, 8), %eax; \
movl 40(%esp), %ebx; /* restore ebx */ \
cfi_restore(%ebx); \
L(C1(UW,UWN)): \
# cfi_restore(%ebx); \
jmp *%eax
# endif /* DARWIN || HIDDEN */
#endif /* __PIC__ */
@@ -262,9 +270,11 @@ L(C1(pc,N)): \
.globl C(ffi_go_closure_EAX)
FFI_HIDDEN(C(ffi_go_closure_EAX))
C(ffi_go_closure_EAX):
cfi_startproc
L(UW6):
# cfi_startproc
subl $closure_FS, %esp
cfi_def_cfa_offset(closure_FS + 4)
L(UW7):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
movl 4(%eax), %edx /* copy cif */
movl 8(%eax), %ecx /* copy fun */
@@ -272,16 +282,19 @@ C(ffi_go_closure_EAX):
movl %ecx, 32(%esp)
movl %eax, 36(%esp) /* closure is user_data */
jmp L(do_closure_i386)
cfi_endproc
L(UW8):
# cfi_endproc
ENDF(C(ffi_go_closure_EAX))
.balign 16
.globl C(ffi_go_closure_ECX)
FFI_HIDDEN(C(ffi_go_closure_ECX))
C(ffi_go_closure_ECX):
cfi_startproc
L(UW9):
# cfi_startproc
subl $closure_FS, %esp
cfi_def_cfa_offset(closure_FS + 4)
L(UW10):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
movl 4(%ecx), %edx /* copy cif */
movl 8(%ecx), %eax /* copy fun */
@@ -289,7 +302,8 @@ C(ffi_go_closure_ECX):
movl %eax, 32(%esp)
movl %ecx, 36(%esp) /* closure is user_data */
jmp L(do_closure_i386)
cfi_endproc
L(UW11):
# cfi_endproc
ENDF(C(ffi_go_closure_ECX))
/* The closure entry points are reached from the ffi_closure trampoline.
@@ -300,10 +314,11 @@ ENDF(C(ffi_go_closure_ECX))
FFI_HIDDEN(C(ffi_closure_i386))
C(ffi_closure_i386):
cfi_startproc
L(UW12):
# cfi_startproc
subl $closure_FS, %esp
/* Note clang bug 21515: adjust_cfa_offset error across endproc. */
cfi_def_cfa_offset(closure_FS + 4)
L(UW13):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
FFI_CLOSURE_COPY_TRAMP_DATA
@@ -311,8 +326,8 @@ C(ffi_closure_i386):
/* Entry point from preceeding Go closures. */
L(do_closure_i386):
FFI_CLOSURE_CALL_INNER
FFI_CLOSURE_MASK_AND_JUMP(2)
FFI_CLOSURE_CALL_INNER(14)
FFI_CLOSURE_MASK_AND_JUMP(2, 15)
.balign 8
L(load_table2):
@@ -346,14 +361,18 @@ E(L(load_table2), X86_RET_INT32)
E(L(load_table2), X86_RET_VOID)
L(e2):
addl $closure_FS, %esp
cfi_adjust_cfa_offset(-closure_FS)
L(UW16):
# cfi_adjust_cfa_offset(-closure_FS)
ret
cfi_adjust_cfa_offset(closure_FS)
L(UW17):
# cfi_adjust_cfa_offset(closure_FS)
E(L(load_table2), X86_RET_STRUCTPOP)
addl $closure_FS, %esp
cfi_adjust_cfa_offset(-closure_FS)
L(UW18):
# cfi_adjust_cfa_offset(-closure_FS)
ret $4
cfi_adjust_cfa_offset(closure_FS)
L(UW19):
# cfi_adjust_cfa_offset(closure_FS)
E(L(load_table2), X86_RET_STRUCTARG)
movl (%esp), %eax
jmp L(e2)
@@ -370,16 +389,19 @@ E(L(load_table2), X86_RET_UNUSED14)
E(L(load_table2), X86_RET_UNUSED15)
ud2
cfi_endproc
L(UW20):
# cfi_endproc
ENDF(C(ffi_closure_i386))
.balign 16
.globl C(ffi_go_closure_STDCALL)
FFI_HIDDEN(C(ffi_go_closure_STDCALL))
C(ffi_go_closure_STDCALL):
cfi_startproc
L(UW21):
# cfi_startproc
subl $closure_FS, %esp
cfi_def_cfa_offset(closure_FS + 4)
L(UW22):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
movl 4(%ecx), %edx /* copy cif */
movl 8(%ecx), %eax /* copy fun */
@@ -387,7 +409,8 @@ C(ffi_go_closure_STDCALL):
movl %eax, 32(%esp)
movl %ecx, 36(%esp) /* closure is user_data */
jmp L(do_closure_STDCALL)
cfi_endproc
L(UW23):
# cfi_endproc
ENDF(C(ffi_go_closure_STDCALL))
/* For REGISTER, we have no available parameter registers, and so we
@@ -397,21 +420,20 @@ ENDF(C(ffi_go_closure_STDCALL))
.globl C(ffi_closure_REGISTER)
FFI_HIDDEN(C(ffi_closure_REGISTER))
C(ffi_closure_REGISTER):
cfi_startproc
cfi_def_cfa(%esp, 8)
cfi_offset(%eip, -8)
L(UW24):
# cfi_startproc
# cfi_def_cfa(%esp, 8)
# cfi_offset(%eip, -8)
subl $closure_FS-4, %esp
/* Note clang bug 21515: adjust_cfa_offset error across endproc. */
cfi_def_cfa_offset(closure_FS + 4)
L(UW25):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
movl closure_FS-4(%esp), %ecx /* load retaddr */
movl closure_FS(%esp), %eax /* load closure */
movl %ecx, closure_FS(%esp) /* move retaddr */
jmp L(do_closure_REGISTER)
cfi_endproc
L(UW26):
# cfi_endproc
ENDF(C(ffi_closure_REGISTER))
/* For STDCALL (and others), we need to pop N bytes of arguments off
@@ -422,10 +444,11 @@ ENDF(C(ffi_closure_REGISTER))
.globl C(ffi_closure_STDCALL)
FFI_HIDDEN(C(ffi_closure_STDCALL))
C(ffi_closure_STDCALL):
cfi_startproc
L(UW27):
# cfi_startproc
subl $closure_FS, %esp
/* Note clang bug 21515: adjust_cfa_offset error across endproc. */
cfi_def_cfa_offset(closure_FS + 4)
L(UW28):
# cfi_def_cfa_offset(closure_FS + 4)
FFI_CLOSURE_SAVE_REGS
@@ -437,7 +460,7 @@ L(do_closure_REGISTER):
/* Entry point from preceeding Go closure. */
L(do_closure_STDCALL):
FFI_CLOSURE_CALL_INNER
FFI_CLOSURE_CALL_INNER(29)
movl %eax, %ecx
shrl $X86_RET_POP_SHIFT, %ecx /* isolate pop count */
@@ -451,7 +474,7 @@ L(do_closure_STDCALL):
there is always a window between the mov and the ret which
will be wrong from one point of view or another. */
FFI_CLOSURE_MASK_AND_JUMP(3)
FFI_CLOSURE_MASK_AND_JUMP(3, 30)
.balign 8
L(load_table3):
@@ -517,7 +540,8 @@ E(L(load_table3), X86_RET_UNUSED14)
E(L(load_table3), X86_RET_UNUSED15)
ud2
cfi_endproc
L(UW31):
# cfi_endproc
ENDF(C(ffi_closure_STDCALL))
#if !FFI_NO_RAW_API
@@ -528,12 +552,14 @@ ENDF(C(ffi_closure_STDCALL))
.globl C(ffi_closure_raw_SYSV)
FFI_HIDDEN(C(ffi_closure_raw_SYSV))
C(ffi_closure_raw_SYSV):
cfi_startproc
L(UW32):
# cfi_startproc
subl $raw_closure_S_FS, %esp
/* Note clang bug 21515: adjust_cfa_offset error across endproc. */
cfi_def_cfa_offset(raw_closure_S_FS + 4)
L(UW33):
# cfi_def_cfa_offset(raw_closure_S_FS + 4)
movl %ebx, raw_closure_S_FS-4(%esp)
cfi_rel_offset(%ebx, raw_closure_S_FS-4)
L(UW34):
# cfi_rel_offset(%ebx, raw_closure_S_FS-4)
movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */
movl %edx, 12(%esp)
@@ -555,7 +581,8 @@ L(pc4):
leal L(load_table4)(,%eax, 8), %eax
#endif
movl raw_closure_S_FS-4(%esp), %ebx
cfi_restore(%ebx)
L(UW35):
# cfi_restore(%ebx)
jmp *%eax
.balign 8
@@ -590,14 +617,18 @@ E(L(load_table4), X86_RET_INT32)
E(L(load_table4), X86_RET_VOID)
L(e4):
addl $raw_closure_S_FS, %esp
cfi_adjust_cfa_offset(-raw_closure_S_FS)
L(UW36):
# cfi_adjust_cfa_offset(-raw_closure_S_FS)
ret
cfi_adjust_cfa_offset(raw_closure_S_FS)
L(UW37):
# cfi_adjust_cfa_offset(raw_closure_S_FS)
E(L(load_table4), X86_RET_STRUCTPOP)
addl $raw_closure_S_FS, %esp
cfi_adjust_cfa_offset(-raw_closure_S_FS)
L(UW38):
# cfi_adjust_cfa_offset(-raw_closure_S_FS)
ret $4
cfi_adjust_cfa_offset(raw_closure_S_FS)
L(UW39):
# cfi_adjust_cfa_offset(raw_closure_S_FS)
E(L(load_table4), X86_RET_STRUCTARG)
movl 16(%esp), %eax
jmp L(e4)
@@ -614,32 +645,37 @@ E(L(load_table4), X86_RET_UNUSED14)
E(L(load_table4), X86_RET_UNUSED15)
ud2
cfi_endproc
L(UW40):
# cfi_endproc
ENDF(C(ffi_closure_raw_SYSV))
#undef raw_closure_S_FS
#define raw_closure_T_FS (16+16+8)
.balign 16
.globl C(ffi_closure_raw_THISCALL)
FFI_HIDDEN(C(ffi_closure_raw_THISCALL))
C(ffi_closure_raw_THISCALL):
cfi_startproc
L(UW41):
# cfi_startproc
/* Rearrange the stack such that %ecx is the first argument.
This means moving the return address. */
popl %edx
/* Note clang bug 21515: adjust_cfa_offset error across endproc. */
cfi_def_cfa_offset(0)
cfi_register(%eip, %edx)
L(UW42):
# cfi_def_cfa_offset(0)
# cfi_register(%eip, %edx)
pushl %ecx
cfi_adjust_cfa_offset(4)
L(UW43):
# cfi_adjust_cfa_offset(4)
pushl %edx
cfi_adjust_cfa_offset(4)
cfi_rel_offset(%eip, 0)
L(UW44):
# cfi_adjust_cfa_offset(4)
# cfi_rel_offset(%eip, 0)
subl $raw_closure_T_FS, %esp
cfi_adjust_cfa_offset(raw_closure_T_FS)
L(UW45):
# cfi_adjust_cfa_offset(raw_closure_T_FS)
movl %ebx, raw_closure_T_FS-4(%esp)
cfi_rel_offset(%ebx, raw_closure_T_FS-4)
L(UW46):
# cfi_rel_offset(%ebx, raw_closure_T_FS-4)
movl FFI_TRAMPOLINE_SIZE+8(%eax), %edx /* load cl->user_data */
movl %edx, 12(%esp)
@@ -661,7 +697,8 @@ L(pc5):
leal L(load_table5)(,%eax, 8), %eax
#endif
movl raw_closure_T_FS-4(%esp), %ebx
cfi_restore(%ebx)
L(UW47):
# cfi_restore(%ebx)
jmp *%eax
.balign 8
@@ -696,15 +733,19 @@ E(L(load_table5), X86_RET_INT32)
E(L(load_table5), X86_RET_VOID)
L(e5):
addl $raw_closure_T_FS, %esp
cfi_adjust_cfa_offset(-raw_closure_T_FS)
L(UW48):
# cfi_adjust_cfa_offset(-raw_closure_T_FS)
/* Remove the extra %ecx argument we pushed. */
ret $4
cfi_adjust_cfa_offset(raw_closure_T_FS)
L(UW49):
# cfi_adjust_cfa_offset(raw_closure_T_FS)
E(L(load_table5), X86_RET_STRUCTPOP)
addl $raw_closure_T_FS, %esp
cfi_adjust_cfa_offset(-raw_closure_T_FS)
L(UW50):
# cfi_adjust_cfa_offset(-raw_closure_T_FS)
ret $8
cfi_adjust_cfa_offset(raw_closure_T_FS)
L(UW51):
# cfi_adjust_cfa_offset(raw_closure_T_FS)
E(L(load_table5), X86_RET_STRUCTARG)
movl 16(%esp), %eax
jmp L(e5)
@@ -721,7 +762,8 @@ E(L(load_table5), X86_RET_UNUSED14)
E(L(load_table5), X86_RET_UNUSED15)
ud2
cfi_endproc
L(UW52):
# cfi_endproc
ENDF(C(ffi_closure_raw_THISCALL))
#endif /* !FFI_NO_RAW_API */
@@ -743,22 +785,232 @@ ENDF(C(ffi_closure_raw_THISCALL))
#if defined(__PIC__)
COMDAT(C(__x86.get_pc_thunk.bx))
C(__x86.get_pc_thunk.bx):
cfi_startproc
movl (%esp), %ebx
ret
cfi_endproc
ENDF(C(__x86.get_pc_thunk.bx))
# if defined X86_DARWIN || defined HAVE_HIDDEN_VISIBILITY_ATTRIBUTE
COMDAT(C(__x86.get_pc_thunk.dx))
C(__x86.get_pc_thunk.dx):
cfi_startproc
movl (%esp), %edx
ret
cfi_endproc
ENDF(C(__x86.get_pc_thunk.dx))
#endif /* DARWIN || HIDDEN */
#endif /* __PIC__ */
/* Sadly, OSX cctools-as doesn't understand .cfi directives at all. */
#ifdef __APPLE__
.section __TEXT,__eh_frame,coalesced,no_toc+strip_static_syms+live_support
EHFrame0:
#elif defined(HAVE_AS_X86_64_UNWIND_SECTION_TYPE)
.section .eh_frame,"a",@unwind
#else
.section .eh_frame,"a",@progbits
#endif
#ifdef HAVE_AS_X86_PCREL
# define PCREL(X) X - .
#else
# define PCREL(X) X@rel
#endif
/* Simplify advancing between labels. Assume DW_CFA_advance_loc1 fits. */
#define ADV(N, P) .byte 2, L(N)-L(P)
.balign 4
L(CIE):
.set L(set0),L(ECIE)-L(SCIE)
.long L(set0) /* CIE Length */
L(SCIE):
.long 0 /* CIE Identifier Tag */
.byte 1 /* CIE Version */
.ascii "zR\0" /* CIE Augmentation */
.byte 1 /* CIE Code Alignment Factor */
.byte 0x7c /* CIE Data Alignment Factor */
.byte 0x8 /* CIE RA Column */
.byte 1 /* Augmentation size */
.byte 0x1b /* FDE Encoding (pcrel sdata4) */
.byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp offset 4 */
.byte 0x80+8, 1 /* DW_CFA_offset, %eip offset 1*-4 */
.balign 4
L(ECIE):
.set L(set1),L(EFDE1)-L(SFDE1)
.long L(set1) /* FDE Length */
L(SFDE1):
.long L(SFDE1)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW0)) /* Initial location */
.long L(UW5)-L(UW0) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW1, UW0)
.byte 0xc, 5, 8 /* DW_CFA_def_cfa, %ebp 8 */
.byte 0x80+5, 2 /* DW_CFA_offset, %ebp 2*-4 */
ADV(UW2, UW1)
.byte 0x80+3, 0 /* DW_CFA_offset, %ebx 0*-4 */
ADV(UW3, UW2)
.byte 0xa /* DW_CFA_remember_state */
.byte 0xc, 4, 4 /* DW_CFA_def_cfa, %esp 4 */
.byte 0xc0+3 /* DW_CFA_restore, %ebx */
.byte 0xc0+5 /* DW_CFA_restore, %ebp */
ADV(UW4, UW3)
.byte 0xb /* DW_CFA_restore_state */
.balign 4
L(EFDE1):
.set L(set2),L(EFDE2)-L(SFDE2)
.long L(set2) /* FDE Length */
L(SFDE2):
.long L(SFDE2)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW6)) /* Initial location */
.long L(UW8)-L(UW6) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW7, UW6)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE2):
.set L(set3),L(EFDE3)-L(SFDE3)
.long L(set3) /* FDE Length */
L(SFDE3):
.long L(SFDE3)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW9)) /* Initial location */
.long L(UW11)-L(UW9) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW10, UW9)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE3):
.set L(set4),L(EFDE4)-L(SFDE4)
.long L(set4) /* FDE Length */
L(SFDE4):
.long L(SFDE4)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW12)) /* Initial location */
.long L(UW20)-L(UW12) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW13, UW12)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX
ADV(UW14, UW13)
.byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */
ADV(UW15, UW14)
.byte 0xc0+3 /* DW_CFA_restore %ebx */
ADV(UW16, UW15)
#else
ADV(UW16, UW13)
#endif
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW17, UW16)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
ADV(UW18, UW17)
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW19, UW18)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE4):
.set L(set5),L(EFDE5)-L(SFDE5)
.long L(set5) /* FDE Length */
L(SFDE5):
.long L(SFDE5)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW21)) /* Initial location */
.long L(UW23)-L(UW21) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW22, UW21)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE5):
.set L(set6),L(EFDE6)-L(SFDE6)
.long L(set6) /* FDE Length */
L(SFDE6):
.long L(SFDE6)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW24)) /* Initial location */
.long L(UW26)-L(UW24) /* Address range */
.byte 0 /* Augmentation size */
.byte 0xe, 8 /* DW_CFA_def_cfa_offset */
.byte 0x80+8, 2 /* DW_CFA_offset %eip, 2*-4 */
ADV(UW25, UW24)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE6):
.set L(set7),L(EFDE7)-L(SFDE7)
.long L(set7) /* FDE Length */
L(SFDE7):
.long L(SFDE7)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW27)) /* Initial location */
.long L(UW31)-L(UW27) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW28, UW27)
.byte 0xe, closure_FS+4 /* DW_CFA_def_cfa_offset */
#ifdef FFI_CLOSURE_CALL_INNER_SAVE_EBX
ADV(UW29, UW28)
.byte 0x80+3, (40-(closure_FS+4))/-4 /* DW_CFA_offset %ebx */
ADV(UW30, UW29)
.byte 0xc0+3 /* DW_CFA_restore %ebx */
#endif
.balign 4
L(EFDE7):
#if !FFI_NO_RAW_API
.set L(set8),L(EFDE8)-L(SFDE8)
.long L(set8) /* FDE Length */
L(SFDE8):
.long L(SFDE8)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW32)) /* Initial location */
.long L(UW40)-L(UW32) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW33, UW32)
.byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
ADV(UW34, UW33)
.byte 0x80+3, 2 /* DW_CFA_offset %ebx 2*-4 */
ADV(UW35, UW34)
.byte 0xc0+3 /* DW_CFA_restore %ebx */
ADV(UW36, UW35)
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW37, UW36)
.byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
ADV(UW38, UW37)
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW39, UW38)
.byte 0xe, raw_closure_S_FS+4 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE8):
.set L(set9),L(EFDE9)-L(SFDE9)
.long L(set9) /* FDE Length */
L(SFDE9):
.long L(SFDE9)-L(CIE) /* FDE CIE offset */
.long PCREL(L(UW41)) /* Initial location */
.long L(UW52)-L(UW41) /* Address range */
.byte 0 /* Augmentation size */
ADV(UW42, UW41)
.byte 0xe, 0 /* DW_CFA_def_cfa_offset */
.byte 0x9, 8, 2 /* DW_CFA_register %eip, %edx */
ADV(UW43, UW42)
.byte 0xe, 4 /* DW_CFA_def_cfa_offset */
ADV(UW44, UW43)
.byte 0xe, 8 /* DW_CFA_def_cfa_offset */
.byte 0x80+8, 2 /* DW_CFA_offset %eip 2*-4 */
ADV(UW45, UW44)
.byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
ADV(UW46, UW45)
.byte 0x80+3, 3 /* DW_CFA_offset %ebx 3*-4 */
ADV(UW47, UW46)
.byte 0xc0+3 /* DW_CFA_restore %ebx */
ADV(UW48, UW47)
.byte 0xe, 8 /* DW_CFA_def_cfa_offset */
ADV(UW49, UW48)
.byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
ADV(UW50, UW49)
.byte 0xe, 8 /* DW_CFA_def_cfa_offset */
ADV(UW51, UW50)
.byte 0xe, raw_closure_T_FS+8 /* DW_CFA_def_cfa_offset */
.balign 4
L(EFDE9):
#endif /* !FFI_NO_RAW_API */
#endif /* ifndef __x86_64__ */
#if defined __ELF__ && defined __linux__