idx
int64
0
2.11M
name
stringlengths
1
118k
code
stringlengths
6
516k
asm
stringlengths
21
4.64M
file
stringlengths
39
143
opt
stringclasses
1 value
path
stringlengths
20
133
2,113,300
ncnn::UnaryOp_x86_avx512_functor::unary_op_tan::func(float const&) const
float func(const float& x) const { return (float)tan(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x163a300 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,301
ncnn::UnaryOp_x86_avx512_functor::unary_op_asin::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { //TODO avx512 optimize float tmp[16]; _mm512_storeu_ps(tmp, x); for (int i = 0; i < 16; i++) tmp[i] = asin(tmp[i]); return _mm512_loadu_ps(tmp); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x100, %rsp # imm = 0x100 movq %rdi, 0x70(%rsp) movq %rsi, 0x68(%rsp) leaq 0x20(%rsp), %rax movq 0x68(%rsp), %rcx vmovaps (%rcx), %zmm0 movq %rax, 0xe8(%rsp) vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm0 movq 0xe8(%rsp), %rax vmovups %zmm0, (%rax) movl $0x0, 0x1c(%rsp) cmpl $0x10, 0x1c(%rsp) jge 0x1657f69 movslq 0x1c(%rsp), %rax vmovss 0x20(%rsp,%rax,4), %xmm0 vzeroupper callq 0x163a350 movslq 0x1c(%rsp), %rax vmovss %xmm0, 0x20(%rsp,%rax,4) movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x1657f37 leaq 0x20(%rsp), %rax movq %rax, 0x78(%rsp) movq 0x78(%rsp), %rax vmovups (%rax), %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,302
ncnn::UnaryOp_x86_avx512_functor::unary_op_asin::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { //TODO avx optimize float tmp[8]; _mm256_storeu_ps(tmp, x); tmp[0] = asin(tmp[0]); tmp[1] = asin(tmp[1]); tmp[2] = asin(tmp[2]); tmp[3] = asin(tmp[3]); tmp[4] = asin(tmp[4]); tmp[5] = asin(tmp[5]); tmp[6] = asin(tmp[6]); tmp[7] = asin(tmp[7]); return _mm256_loadu_ps(tmp); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x80, %rsp movq %rdi, 0x30(%rsp) movq %rsi, 0x28(%rsp) movq %rsp, %rax movq 0x28(%rsp), %rcx vmovaps (%rcx), %ymm0 movq %rax, 0x68(%rsp) vmovaps %ymm0, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm0 movq 0x68(%rsp), %rax vmovups %ymm0, (%rax) vmovss (%rsp), %xmm0 vzeroupper callq 0x163a350 vmovss %xmm0, (%rsp) vmovss 0x4(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0x4(%rsp) vmovss 0x8(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0x8(%rsp) vmovss 0xc(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0xc(%rsp) vmovss 0x10(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0x10(%rsp) vmovss 0x14(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0x14(%rsp) vmovss 0x18(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0x18(%rsp) vmovss 0x1c(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0x1c(%rsp) movq %rsp, %rax movq %rax, 0x38(%rsp) movq 0x38(%rsp), %rax vmovups (%rax), %ymm0 movq %rbp, %rsp popq %rbp retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,303
ncnn::UnaryOp_x86_avx512_functor::unary_op_asin::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { //TODO sse optimize float tmp[4]; _mm_storeu_ps(tmp, x); tmp[0] = asin(tmp[0]); tmp[1] = asin(tmp[1]); tmp[2] = asin(tmp[2]); tmp[3] = asin(tmp[3]); return _mm_loadu_ps(tmp); }
subq $0x48, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rsp, %rax movq 0x10(%rsp), %rcx vmovaps (%rcx), %xmm0 movq %rax, 0x38(%rsp) vmovaps %xmm0, 0x20(%rsp) vmovaps 0x20(%rsp), %xmm0 movq 0x38(%rsp), %rax vmovups %xmm0, (%rax) vmovss (%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, (%rsp) vmovss 0x4(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0x4(%rsp) vmovss 0x8(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0x8(%rsp) vmovss 0xc(%rsp), %xmm0 callq 0x163a350 vmovss %xmm0, 0xc(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x40(%rsp), %rax vmovups (%rax), %xmm0 addq $0x48, %rsp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,304
ncnn::UnaryOp_x86_avx512_functor::unary_op_asin::func(float const&) const
float func(const float& x) const { return (float)asin(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x163a350 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,305
ncnn::UnaryOp_x86_avx512_functor::unary_op_acos::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { //TODO avx512 optimize float tmp[16]; _mm512_storeu_ps(tmp, x); for (int i = 0; i < 16; i++) tmp[i] = acos(tmp[i]); return _mm512_loadu_ps(tmp); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x100, %rsp # imm = 0x100 movq %rdi, 0x70(%rsp) movq %rsi, 0x68(%rsp) leaq 0x20(%rsp), %rax movq 0x68(%rsp), %rcx vmovaps (%rcx), %zmm0 movq %rax, 0xe8(%rsp) vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm0 movq 0xe8(%rsp), %rax vmovups %zmm0, (%rax) movl $0x0, 0x1c(%rsp) cmpl $0x10, 0x1c(%rsp) jge 0x16581b9 movslq 0x1c(%rsp), %rax vmovss 0x20(%rsp,%rax,4), %xmm0 vzeroupper callq 0x163a3a0 movslq 0x1c(%rsp), %rax vmovss %xmm0, 0x20(%rsp,%rax,4) movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x1658187 leaq 0x20(%rsp), %rax movq %rax, 0x78(%rsp) movq 0x78(%rsp), %rax vmovups (%rax), %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,306
ncnn::UnaryOp_x86_avx512_functor::unary_op_acos::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { //TODO avx optimize float tmp[8]; _mm256_storeu_ps(tmp, x); tmp[0] = acos(tmp[0]); tmp[1] = acos(tmp[1]); tmp[2] = acos(tmp[2]); tmp[3] = acos(tmp[3]); tmp[4] = acos(tmp[4]); tmp[5] = acos(tmp[5]); tmp[6] = acos(tmp[6]); tmp[7] = acos(tmp[7]); return _mm256_loadu_ps(tmp); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x80, %rsp movq %rdi, 0x30(%rsp) movq %rsi, 0x28(%rsp) movq %rsp, %rax movq 0x28(%rsp), %rcx vmovaps (%rcx), %ymm0 movq %rax, 0x68(%rsp) vmovaps %ymm0, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm0 movq 0x68(%rsp), %rax vmovups %ymm0, (%rax) vmovss (%rsp), %xmm0 vzeroupper callq 0x163a3a0 vmovss %xmm0, (%rsp) vmovss 0x4(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0x4(%rsp) vmovss 0x8(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0x8(%rsp) vmovss 0xc(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0xc(%rsp) vmovss 0x10(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0x10(%rsp) vmovss 0x14(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0x14(%rsp) vmovss 0x18(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0x18(%rsp) vmovss 0x1c(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0x1c(%rsp) movq %rsp, %rax movq %rax, 0x38(%rsp) movq 0x38(%rsp), %rax vmovups (%rax), %ymm0 movq %rbp, %rsp popq %rbp retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,307
ncnn::UnaryOp_x86_avx512_functor::unary_op_acos::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { //TODO sse optimize float tmp[4]; _mm_storeu_ps(tmp, x); tmp[0] = acos(tmp[0]); tmp[1] = acos(tmp[1]); tmp[2] = acos(tmp[2]); tmp[3] = acos(tmp[3]); return _mm_loadu_ps(tmp); }
subq $0x48, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rsp, %rax movq 0x10(%rsp), %rcx vmovaps (%rcx), %xmm0 movq %rax, 0x38(%rsp) vmovaps %xmm0, 0x20(%rsp) vmovaps 0x20(%rsp), %xmm0 movq 0x38(%rsp), %rax vmovups %xmm0, (%rax) vmovss (%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, (%rsp) vmovss 0x4(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0x4(%rsp) vmovss 0x8(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0x8(%rsp) vmovss 0xc(%rsp), %xmm0 callq 0x163a3a0 vmovss %xmm0, 0xc(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x40(%rsp), %rax vmovups (%rax), %xmm0 addq $0x48, %rsp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,308
ncnn::UnaryOp_x86_avx512_functor::unary_op_acos::func(float const&) const
float func(const float& x) const { return (float)acos(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x163a3a0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,309
ncnn::UnaryOp_x86_avx512_functor::unary_op_atan::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { //TODO avx512 optimize float tmp[16]; _mm512_storeu_ps(tmp, x); for (int i = 0; i < 16; i++) tmp[i] = atan(tmp[i]); return _mm512_loadu_ps(tmp); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x100, %rsp # imm = 0x100 movq %rdi, 0x70(%rsp) movq %rsi, 0x68(%rsp) leaq 0x20(%rsp), %rax movq 0x68(%rsp), %rcx vmovaps (%rcx), %zmm0 movq %rax, 0xe8(%rsp) vmovaps %zmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %zmm0 movq 0xe8(%rsp), %rax vmovups %zmm0, (%rax) movl $0x0, 0x1c(%rsp) cmpl $0x10, 0x1c(%rsp) jge 0x1658409 movslq 0x1c(%rsp), %rax vmovss 0x20(%rsp,%rax,4), %xmm0 vzeroupper callq 0x163a3f0 movslq 0x1c(%rsp), %rax vmovss %xmm0, 0x20(%rsp,%rax,4) movl 0x1c(%rsp), %eax addl $0x1, %eax movl %eax, 0x1c(%rsp) jmp 0x16583d7 leaq 0x20(%rsp), %rax movq %rax, 0x78(%rsp) movq 0x78(%rsp), %rax vmovups (%rax), %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,310
ncnn::UnaryOp_x86_avx512_functor::unary_op_atan::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { //TODO avx optimize float tmp[8]; _mm256_storeu_ps(tmp, x); tmp[0] = atan(tmp[0]); tmp[1] = atan(tmp[1]); tmp[2] = atan(tmp[2]); tmp[3] = atan(tmp[3]); tmp[4] = atan(tmp[4]); tmp[5] = atan(tmp[5]); tmp[6] = atan(tmp[6]); tmp[7] = atan(tmp[7]); return _mm256_loadu_ps(tmp); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x80, %rsp movq %rdi, 0x30(%rsp) movq %rsi, 0x28(%rsp) movq %rsp, %rax movq 0x28(%rsp), %rcx vmovaps (%rcx), %ymm0 movq %rax, 0x68(%rsp) vmovaps %ymm0, 0x40(%rsp) vmovaps 0x40(%rsp), %ymm0 movq 0x68(%rsp), %rax vmovups %ymm0, (%rax) vmovss (%rsp), %xmm0 vzeroupper callq 0x163a3f0 vmovss %xmm0, (%rsp) vmovss 0x4(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0x4(%rsp) vmovss 0x8(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0x8(%rsp) vmovss 0xc(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0xc(%rsp) vmovss 0x10(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0x10(%rsp) vmovss 0x14(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0x14(%rsp) vmovss 0x18(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0x18(%rsp) vmovss 0x1c(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0x1c(%rsp) movq %rsp, %rax movq %rax, 0x38(%rsp) movq 0x38(%rsp), %rax vmovups (%rax), %ymm0 movq %rbp, %rsp popq %rbp retq nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,311
ncnn::UnaryOp_x86_avx512_functor::unary_op_atan::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { //TODO sse optimize float tmp[4]; _mm_storeu_ps(tmp, x); tmp[0] = atan(tmp[0]); tmp[1] = atan(tmp[1]); tmp[2] = atan(tmp[2]); tmp[3] = atan(tmp[3]); return _mm_loadu_ps(tmp); }
subq $0x48, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rsp, %rax movq 0x10(%rsp), %rcx vmovaps (%rcx), %xmm0 movq %rax, 0x38(%rsp) vmovaps %xmm0, 0x20(%rsp) vmovaps 0x20(%rsp), %xmm0 movq 0x38(%rsp), %rax vmovups %xmm0, (%rax) vmovss (%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, (%rsp) vmovss 0x4(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0x4(%rsp) vmovss 0x8(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0x8(%rsp) vmovss 0xc(%rsp), %xmm0 callq 0x163a3f0 vmovss %xmm0, 0xc(%rsp) movq %rsp, %rax movq %rax, 0x40(%rsp) movq 0x40(%rsp), %rax vmovups (%rax), %xmm0 addq $0x48, %rsp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,312
ncnn::UnaryOp_x86_avx512_functor::unary_op_atan::func(float const&) const
float func(const float& x) const { return (float)atan(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x163a3f0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,313
ncnn::UnaryOp_x86_avx512_functor::unary_op_reciprocal::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return _mm512_div_ps(*(__m512*)_ps512_1, x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x100, %rsp # imm = 0x100 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) vmovaps 0x7c484d(%rip), %zmm1 # 0x1e1ce40 movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm1, 0x80(%rsp) vmovaps %zmm0, 0x40(%rsp) vmovaps 0x80(%rsp), %zmm0 vdivps 0x40(%rsp), %zmm0, %zmm0 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,314
ncnn::UnaryOp_x86_avx512_functor::unary_op_reciprocal::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return _mm256_div_ps(*(__m256*)_ps256_1, x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x80, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) vmovaps 0x7c4b6f(%rip), %ymm1 # 0x1e1d1c0 movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm1, 0x40(%rsp) vmovaps %ymm0, 0x20(%rsp) vmovaps 0x40(%rsp), %ymm0 vdivps 0x20(%rsp), %ymm0, %ymm0 movq %rbp, %rsp popq %rbp retq nopw (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,315
ncnn::UnaryOp_x86_avx512_functor::unary_op_reciprocal::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return _mm_div_ps(*(__m128*)_ps_1, x); }
movq %rdi, -0x30(%rsp) movq %rsi, -0x38(%rsp) vmovaps 0x7c4cee(%rip), %xmm1 # 0x1e1d380 movq -0x38(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm1, -0x18(%rsp) vmovaps %xmm0, -0x28(%rsp) vmovaps -0x18(%rsp), %xmm0 vdivps -0x28(%rsp), %xmm0, %xmm0 retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,316
ncnn::UnaryOp_x86_avx512_functor::unary_op_reciprocal::func(float const&) const
float func(const float& x) const { return 1.f / x; }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x10(%rsp), %rax vmovss 0x7b092d(%rip), %xmm0 # 0x1e09004 vdivss (%rax), %xmm0, %xmm0 retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,317
ncnn::UnaryOp_x86_avx512_functor::unary_op_tanh::func_pack16(float vector[16] const&) const
__m512 func_pack16(const __m512& x) const { return tanh_avx512(x); }
pushq %rbp movq %rsp, %rbp andq $-0x40, %rsp subq $0x1500, %rsp # imm = 0x1500 movq %rdi, 0x38(%rsp) movq %rsi, 0x30(%rsp) movq 0x30(%rsp), %rax vmovaps (%rax), %zmm0 vmovaps %zmm0, 0xc0(%rsp) movl $0x3f800000, 0x27c(%rsp) # imm = 0x3F800000 vbroadcastss 0x27c(%rsp), %zmm0 vmovaps %zmm0, 0x200(%rsp) vmovaps 0x200(%rsp), %zmm0 vmovaps %zmm0, 0x80(%rsp) movl $0x40000000, 0x1fc(%rsp) # imm = 0x40000000 vbroadcastss 0x1fc(%rsp), %zmm0 vmovaps %zmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %zmm0 vmovaps %zmm0, 0x40(%rsp) vmovaps 0xc0(%rsp), %zmm1 vmovaps 0x40(%rsp), %zmm0 vmovaps %zmm1, 0x140(%rsp) vmovaps %zmm0, 0x100(%rsp) vmovaps 0x140(%rsp), %zmm0 vmovaps 0x100(%rsp), %zmm1 vmulps %zmm1, %zmm0, %zmm0 vmovaps %zmm0, 0x380(%rsp) movl $0x3f800000, 0x143c(%rsp) # imm = 0x3F800000 vbroadcastss 0x143c(%rsp), %zmm0 vmovaps %zmm0, 0x13c0(%rsp) vmovaps 0x13c0(%rsp), %zmm0 vmovaps %zmm0, 0x340(%rsp) vmovaps 0x340(%rsp), %zmm1 vpxor %xmm0, %xmm0, %xmm0 vmovaps %zmm0, 0x440(%rsp) vmovaps 0x440(%rsp), %zmm3 vmovaps 0x380(%rsp), %zmm2 vmovaps %zmm3, 0x400(%rsp) vmovaps %zmm2, 0x3c0(%rsp) vmovaps 0x400(%rsp), %zmm2 vmovaps 0x3c0(%rsp), %zmm3 vsubps %zmm3, %zmm2, %zmm2 vmovaps %zmm2, 0x640(%rsp) vmovaps %zmm0, 0x6c0(%rsp) vmovaps 0x6c0(%rsp), %zmm0 vmovaps %zmm0, 0x600(%rsp) vmovaps 0x7c45fb(%rip), %zmm0 # 0x1e1ce40 vmovaps %zmm0, 0x540(%rsp) vmovaps 0x640(%rsp), %zmm2 vmovaps 0x7c4621(%rip), %zmm0 # 0x1e1ce80 vmovaps %zmm2, 0x840(%rsp) vmovaps %zmm0, 0x800(%rsp) vmovaps 0x840(%rsp), %zmm0 vmovaps 0x800(%rsp), %zmm2 vminps %zmm2, %zmm0, %zmm0 vmovaps %zmm0, 0x640(%rsp) vmovaps 0x640(%rsp), %zmm2 vmovaps 0x7c4621(%rip), %zmm0 # 0x1e1cec0 vmovaps %zmm2, 0x8c0(%rsp) vmovaps %zmm0, 0x880(%rsp) vmovaps 0x8c0(%rsp), %zmm0 vmovaps 0x880(%rsp), %zmm2 vmaxps %zmm2, %zmm0, %zmm0 vmovaps %zmm0, 0x640(%rsp) vmovaps 0x640(%rsp), %zmm3 vmovaps 0x7c4621(%rip), %zmm2 # 0x1e1cf00 vmovaps 0x7c4657(%rip), %zmm0 # 0x1e1cf40 vmovaps %zmm3, 0xe00(%rsp) vmovaps %zmm2, 0xdc0(%rsp) vmovaps %zmm0, 0xd80(%rsp) vmovaps 0xe00(%rsp), %zmm2 vmovaps 0xdc0(%rsp), %zmm0 vmovaps 0xd80(%rsp), %zmm3 vfmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x5c0(%rsp) vmovaps 0x5c0(%rsp), %zmm0 vrndscaleps $0x1, %zmm0, %zmm0 vmovaps %zmm0, 0x600(%rsp) vmovaps 0x600(%rsp), %zmm2 vmovaps 0x5c0(%rsp), %zmm0 vcmpltps %zmm2, %zmm0, %k0 kmovw %k0, 0x53e(%rsp) vmovaps 0x600(%rsp), %zmm2 movw 0x53e(%rsp), %ax vmovaps 0x540(%rsp), %zmm0 vmovaps %zmm2, 0xf00(%rsp) movw %ax, 0xefe(%rsp) vmovaps %zmm2, 0xe80(%rsp) vmovaps %zmm0, 0xe40(%rsp) vmovaps 0xe80(%rsp), %zmm2 vmovaps 0xe40(%rsp), %zmm0 kmovw 0xefe(%rsp), %k1 vmovaps %zmm2, 0xf80(%rsp) vmovaps %zmm0, 0xf40(%rsp) vmovaps 0xf80(%rsp), %zmm2 vmovaps 0xf40(%rsp), %zmm3 vmovaps 0xf00(%rsp), %zmm0 vsubps %zmm3, %zmm2, %zmm0 {%k1} vmovaps %zmm0, 0x5c0(%rsp) vmovaps 0x5c0(%rsp), %zmm3 vmovaps 0x7c4589(%rip), %zmm2 # 0x1e1cf80 vmovaps 0x640(%rsp), %zmm0 vmovaps %zmm3, 0x1100(%rsp) vmovaps %zmm2, 0x10c0(%rsp) vmovaps %zmm0, 0x1080(%rsp) vmovaps 0x1100(%rsp), %zmm2 vmovaps 0x10c0(%rsp), %zmm0 vmovaps 0x1080(%rsp), %zmm3 vfnmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = -(zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x640(%rsp) vmovaps 0x5c0(%rsp), %zmm3 vmovaps 0x7c4571(%rip), %zmm2 # 0x1e1cfc0 vmovaps 0x640(%rsp), %zmm0 vmovaps %zmm3, 0x1040(%rsp) vmovaps %zmm2, 0x1000(%rsp) vmovaps %zmm0, 0xfc0(%rsp) vmovaps 0x1040(%rsp), %zmm2 vmovaps 0x1000(%rsp), %zmm0 vmovaps 0xfc0(%rsp), %zmm3 vfnmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = -(zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x640(%rsp) vmovaps 0x640(%rsp), %zmm0 vmovaps %zmm0, 0x7c0(%rsp) vmovaps %zmm0, 0x780(%rsp) vmovaps 0x7c0(%rsp), %zmm0 vmovaps 0x780(%rsp), %zmm2 vmulps %zmm2, %zmm0, %zmm0 vmovaps %zmm0, 0x600(%rsp) vmovaps 0x7c452b(%rip), %zmm0 # 0x1e1d000 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x4c0(%rsp), %zmm3 vmovaps 0x640(%rsp), %zmm2 vmovaps 0x7c4549(%rip), %zmm0 # 0x1e1d040 vmovaps %zmm3, 0xd40(%rsp) vmovaps %zmm2, 0xd00(%rsp) vmovaps %zmm0, 0xcc0(%rsp) vmovaps 0xd40(%rsp), %zmm2 vmovaps 0xd00(%rsp), %zmm0 vmovaps 0xcc0(%rsp), %zmm3 vfmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x4c0(%rsp), %zmm3 vmovaps 0x640(%rsp), %zmm2 vmovaps 0x7c4531(%rip), %zmm0 # 0x1e1d080 vmovaps %zmm3, 0xc80(%rsp) vmovaps %zmm2, 0xc40(%rsp) vmovaps %zmm0, 0xc00(%rsp) vmovaps 0xc80(%rsp), %zmm2 vmovaps 0xc40(%rsp), %zmm0 vmovaps 0xc00(%rsp), %zmm3 vfmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x4c0(%rsp), %zmm3 vmovaps 0x640(%rsp), %zmm2 vmovaps 0x7c4519(%rip), %zmm0 # 0x1e1d0c0 vmovaps %zmm3, 0xbc0(%rsp) vmovaps %zmm2, 0xb80(%rsp) vmovaps %zmm0, 0xb40(%rsp) vmovaps 0xbc0(%rsp), %zmm2 vmovaps 0xb80(%rsp), %zmm0 vmovaps 0xb40(%rsp), %zmm3 vfmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x4c0(%rsp), %zmm3 vmovaps 0x640(%rsp), %zmm2 vmovaps 0x7c4501(%rip), %zmm0 # 0x1e1d100 vmovaps %zmm3, 0xb00(%rsp) vmovaps %zmm2, 0xac0(%rsp) vmovaps %zmm0, 0xa80(%rsp) vmovaps 0xb00(%rsp), %zmm2 vmovaps 0xac0(%rsp), %zmm0 vmovaps 0xa80(%rsp), %zmm3 vfmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x4c0(%rsp), %zmm3 vmovaps 0x640(%rsp), %zmm2 vmovaps 0x7c44e9(%rip), %zmm0 # 0x1e1d140 vmovaps %zmm3, 0xa40(%rsp) vmovaps %zmm2, 0xa00(%rsp) vmovaps %zmm0, 0x9c0(%rsp) vmovaps 0xa40(%rsp), %zmm2 vmovaps 0xa00(%rsp), %zmm0 vmovaps 0x9c0(%rsp), %zmm3 vfmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x4c0(%rsp), %zmm3 vmovaps 0x600(%rsp), %zmm2 vmovaps 0x640(%rsp), %zmm0 vmovaps %zmm3, 0x980(%rsp) vmovaps %zmm2, 0x940(%rsp) vmovaps %zmm0, 0x900(%rsp) vmovaps 0x980(%rsp), %zmm2 vmovaps 0x940(%rsp), %zmm0 vmovaps 0x900(%rsp), %zmm3 vfmadd213ps %zmm3, %zmm2, %zmm0 # zmm0 = (zmm2 * zmm0) + zmm3 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x4c0(%rsp), %zmm2 vmovaps 0x540(%rsp), %zmm0 vmovaps %zmm2, 0x1180(%rsp) vmovaps %zmm0, 0x1140(%rsp) vmovaps 0x1180(%rsp), %zmm0 vmovaps 0x1140(%rsp), %zmm2 vaddps %zmm2, %zmm0, %zmm0 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x5c0(%rsp), %zmm0 vmovaps %zmm0, 0x1240(%rsp) vpxor %xmm2, %xmm2, %xmm2 vcvttps2dq 0x1240(%rsp), %zmm0 vmovdqa64 %zmm2, 0x1380(%rsp) vmovdqa64 %zmm0, 0x580(%rsp) vmovdqa64 0x580(%rsp), %zmm2 vmovdqa64 0x7c4419(%rip), %zmm0 # 0x1e1d180 vmovdqa64 %zmm2, 0x12c0(%rsp) vmovdqa64 %zmm0, 0x1280(%rsp) vmovdqa64 0x12c0(%rsp), %zmm0 vmovdqa64 0x1280(%rsp), %zmm2 vpaddd %zmm2, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x580(%rsp) vmovdqa64 0x580(%rsp), %zmm0 vmovdqa64 %zmm0, 0x1340(%rsp) movl $0x17, 0x133c(%rsp) vmovdqa64 0x1340(%rsp), %zmm0 vmovd 0x133c(%rsp), %xmm2 vpslld %xmm2, %zmm0, %zmm0 vmovdqa64 %zmm0, 0x580(%rsp) vmovdqa64 0x580(%rsp), %zmm0 vmovdqa64 %zmm0, 0x680(%rsp) vmovdqa64 0x680(%rsp), %zmm0 vmovdqa64 %zmm0, 0x480(%rsp) vmovaps 0x4c0(%rsp), %zmm2 vmovaps 0x480(%rsp), %zmm0 vmovaps %zmm2, 0x740(%rsp) vmovaps %zmm0, 0x700(%rsp) vmovaps 0x740(%rsp), %zmm0 vmovaps 0x700(%rsp), %zmm2 vmulps %zmm2, %zmm0, %zmm0 vmovaps %zmm0, 0x4c0(%rsp) vmovaps 0x4c0(%rsp), %zmm0 vmovaps %zmm1, 0x1200(%rsp) vmovaps %zmm0, 0x11c0(%rsp) vmovaps 0x1200(%rsp), %zmm0 vmovaps 0x11c0(%rsp), %zmm2 vaddps %zmm2, %zmm0, %zmm0 vmovaps %zmm1, 0x1480(%rsp) vmovaps %zmm0, 0x1440(%rsp) vmovaps 0x1480(%rsp), %zmm4 vmovaps 0x1440(%rsp), %zmm3 vrcp14ps %zmm3, %zmm0 vmulps %zmm0, %zmm4, %zmm1 vmovaps %zmm1, %zmm2 vfmsub213ps %zmm4, %zmm3, %zmm2 # zmm2 = (zmm3 * zmm2) - zmm4 vfnmadd213ps %zmm1, %zmm0, %zmm2 # zmm2 = -(zmm0 * zmm2) + zmm1 vmovaps 0x40(%rsp), %zmm1 vmovaps 0x80(%rsp), %zmm0 vmovaps %zmm2, 0x300(%rsp) vmovaps %zmm1, 0x2c0(%rsp) vmovaps %zmm0, 0x280(%rsp) vmovaps 0x300(%rsp), %zmm1 vmovaps 0x2c0(%rsp), %zmm0 vmovaps 0x280(%rsp), %zmm2 vpbroadcastd 0x7b4fd5(%rip), %zmm3 # 0x1e0deb8 vpxord %zmm3, %zmm2, %zmm2 vfmadd213ps %zmm2, %zmm1, %zmm0 # zmm0 = (zmm1 * zmm0) + zmm2 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nop
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,318
ncnn::UnaryOp_x86_avx512_functor::unary_op_tanh::func_pack8(float vector[8] const&) const
__m256 func_pack8(const __m256& x) const { return tanh_avx(x); }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0xe40, %rsp # imm = 0xE40 movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq 0x10(%rsp), %rax vmovaps (%rax), %ymm0 vmovaps %ymm0, 0x60(%rsp) movl $0x3f800000, 0xdc(%rsp) # imm = 0x3F800000 vmovss 0xdc(%rsp), %xmm0 vmovss %xmm0, 0x11c(%rsp) vmovss %xmm0, 0x118(%rsp) vmovss %xmm0, 0x114(%rsp) vmovss %xmm0, 0x110(%rsp) vmovss %xmm0, 0x10c(%rsp) vmovss %xmm0, 0x108(%rsp) vmovss %xmm0, 0x104(%rsp) vmovss %xmm0, 0x100(%rsp) vmovss 0x104(%rsp), %xmm1 vmovss 0x100(%rsp), %xmm0 vinsertps $0x10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3] vmovss 0x108(%rsp), %xmm1 vinsertps $0x20, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm1[0],xmm0[3] vmovss 0x10c(%rsp), %xmm1 vinsertps $0x30, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0] vmovss 0x114(%rsp), %xmm2 vmovss 0x110(%rsp), %xmm1 vinsertps $0x10, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[2,3] vmovss 0x118(%rsp), %xmm2 vinsertps $0x20, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1],xmm2[0],xmm1[3] vmovss 0x11c(%rsp), %xmm2 vinsertps $0x30, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0] vmovaps %xmm1, 0xf0(%rsp) vmovaps %xmm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %ymm0 vmovaps %ymm0, 0x40(%rsp) movl $0x40000000, 0xd8(%rsp) # imm = 0x40000000 vmovss 0xd8(%rsp), %xmm0 vmovss %xmm0, 0x15c(%rsp) vmovss %xmm0, 0x158(%rsp) vmovss %xmm0, 0x154(%rsp) vmovss %xmm0, 0x150(%rsp) vmovss %xmm0, 0x14c(%rsp) vmovss %xmm0, 0x148(%rsp) vmovss %xmm0, 0x144(%rsp) vmovss %xmm0, 0x140(%rsp) vmovss 0x144(%rsp), %xmm1 vmovss 0x140(%rsp), %xmm0 vinsertps $0x10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3] vmovss 0x148(%rsp), %xmm1 vinsertps $0x20, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm1[0],xmm0[3] vmovss 0x14c(%rsp), %xmm1 vinsertps $0x30, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0] vmovss 0x154(%rsp), %xmm2 vmovss 0x150(%rsp), %xmm1 vinsertps $0x10, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[2,3] vmovss 0x158(%rsp), %xmm2 vinsertps $0x20, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1],xmm2[0],xmm1[3] vmovss 0x15c(%rsp), %xmm2 vinsertps $0x30, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0] vmovaps %xmm1, 0x130(%rsp) vmovaps %xmm0, 0x120(%rsp) vmovaps 0x120(%rsp), %ymm0 vmovaps %ymm0, 0x20(%rsp) vmovaps 0x60(%rsp), %ymm1 vmovaps 0x20(%rsp), %ymm0 vmovaps %ymm1, 0xa0(%rsp) vmovaps %ymm0, 0x80(%rsp) vmovaps 0xa0(%rsp), %ymm0 vmovaps 0x80(%rsp), %ymm1 vmulps %ymm1, %ymm0, %ymm0 vmovaps %ymm0, 0x1e0(%rsp) movl $0x3f800000, 0xd9c(%rsp) # imm = 0x3F800000 vmovss 0xd9c(%rsp), %xmm0 vmovss %xmm0, 0xe2c(%rsp) vmovss %xmm0, 0xe28(%rsp) vmovss %xmm0, 0xe24(%rsp) vmovss %xmm0, 0xe20(%rsp) vmovss %xmm0, 0xe1c(%rsp) vmovss %xmm0, 0xe18(%rsp) vmovss %xmm0, 0xe14(%rsp) vmovss %xmm0, 0xe10(%rsp) vmovss 0xe14(%rsp), %xmm1 vmovss 0xe10(%rsp), %xmm0 vinsertps $0x10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3] vmovss 0xe18(%rsp), %xmm1 vinsertps $0x20, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm1[0],xmm0[3] vmovss 0xe1c(%rsp), %xmm1 vinsertps $0x30, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0] vmovss 0xe24(%rsp), %xmm2 vmovss 0xe20(%rsp), %xmm1 vinsertps $0x10, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[2,3] vmovss 0xe28(%rsp), %xmm2 vinsertps $0x20, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1],xmm2[0],xmm1[3] vmovss 0xe2c(%rsp), %xmm2 vinsertps $0x30, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0] vmovaps %xmm1, 0xdf0(%rsp) vmovaps %xmm0, 0xde0(%rsp) vmovaps 0xde0(%rsp), %ymm0 vmovaps %ymm0, 0x1c0(%rsp) vmovaps 0x1c0(%rsp), %ymm1 vpxor %xmm0, %xmm0, %xmm0 vmovaps %ymm0, 0x240(%rsp) vmovaps 0x240(%rsp), %ymm3 vmovaps 0x1e0(%rsp), %ymm2 vmovaps %ymm3, 0x220(%rsp) vmovaps %ymm2, 0x200(%rsp) vmovaps 0x220(%rsp), %ymm2 vmovaps 0x200(%rsp), %ymm3 vsubps %ymm3, %ymm2, %ymm2 vmovaps %ymm2, 0x340(%rsp) vmovaps %ymm0, 0x3e0(%rsp) vmovaps 0x3e0(%rsp), %ymm0 vmovaps %ymm0, 0x320(%rsp) vbroadcastss 0x7afd64(%rip), %ymm0 # 0x1e09004 vmovaps %ymm0, 0x2c0(%rsp) vmovaps 0x340(%rsp), %ymm0 vmovaps %ymm0, 0x4a0(%rsp) vbroadcastss 0x7b2afc(%rip), %ymm0 # 0x1e0bdc0 vmovaps %ymm0, 0x480(%rsp) vmovaps 0x4a0(%rsp), %ymm0 vmovaps 0x480(%rsp), %ymm2 vminps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x340(%rsp) vmovaps 0x340(%rsp), %ymm0 vmovaps %ymm0, 0x380(%rsp) vbroadcastss 0x7b2abd(%rip), %ymm0 # 0x1e0bdc4 vmovaps %ymm0, 0x360(%rsp) vmovaps 0x380(%rsp), %ymm0 vmovaps 0x360(%rsp), %ymm2 vmaxps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x340(%rsp) vmovaps 0x340(%rsp), %ymm0 vmovaps %ymm0, 0x740(%rsp) vbroadcastss 0x7b2a7e(%rip), %ymm0 # 0x1e0bdc8 vmovaps %ymm0, 0x720(%rsp) vbroadcastss 0x7afcc0(%rip), %ymm0 # 0x1e0901c vmovaps %ymm0, 0x700(%rsp) vmovaps 0x740(%rsp), %ymm4 vmovaps 0x720(%rsp), %ymm3 vmovaps 0x700(%rsp), %ymm2 vmovaps %ymm4, 0x9e0(%rsp) vmovaps %ymm3, 0x9c0(%rsp) vmovaps %ymm2, 0x9a0(%rsp) vmovaps 0x9e0(%rsp), %ymm3 vmovaps 0x9c0(%rsp), %ymm2 vmovaps 0x9a0(%rsp), %ymm4 vfmadd213ps %ymm4, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm4 vmovaps %ymm2, 0x300(%rsp) vmovaps 0x300(%rsp), %ymm2 vroundps $0x1, %ymm2, %ymm2 vmovaps %ymm2, 0x320(%rsp) vmovaps 0x320(%rsp), %ymm3 vmovaps 0x300(%rsp), %ymm2 vcmpltps %ymm3, %ymm2, %ymm2 vmovdqa %ymm2, 0x2a0(%rsp) vmovaps 0x2a0(%rsp), %ymm3 vmovaps 0x2c0(%rsp), %ymm2 vmovaps %ymm3, 0x780(%rsp) vmovaps %ymm2, 0x760(%rsp) vmovdqa 0x780(%rsp), %ymm2 vmovdqa 0x760(%rsp), %ymm3 vpand %ymm3, %ymm2, %ymm2 vmovdqa %ymm2, 0x2a0(%rsp) vmovaps 0x320(%rsp), %ymm3 vmovaps 0x2a0(%rsp), %ymm2 vmovaps %ymm3, 0x3c0(%rsp) vmovaps %ymm2, 0x3a0(%rsp) vmovaps 0x3c0(%rsp), %ymm2 vmovaps 0x3a0(%rsp), %ymm3 vsubps %ymm3, %ymm2, %ymm2 vmovaps %ymm2, 0x300(%rsp) vmovaps 0x300(%rsp), %ymm3 vmovaps 0x340(%rsp), %ymm2 vmovaps %ymm3, 0x840(%rsp) vbroadcastss 0x7b2926(%rip), %ymm3 # 0x1e0bdcc vmovaps %ymm3, 0x820(%rsp) vmovaps %ymm2, 0x800(%rsp) vmovaps 0x840(%rsp), %ymm4 vmovaps 0x820(%rsp), %ymm3 vmovaps 0x800(%rsp), %ymm2 vmovaps %ymm4, 0xc80(%rsp) vmovaps %ymm3, 0xc60(%rsp) vmovaps %ymm2, 0xc40(%rsp) vmovaps 0xc80(%rsp), %ymm3 vmovaps 0xc60(%rsp), %ymm2 vmovaps 0xc40(%rsp), %ymm4 vfnmadd213ps %ymm4, %ymm3, %ymm2 # ymm2 = -(ymm3 * ymm2) + ymm4 vmovaps %ymm2, 0x340(%rsp) vmovaps 0x300(%rsp), %ymm3 vmovaps 0x340(%rsp), %ymm2 vmovaps %ymm3, 0x7e0(%rsp) vbroadcastss 0x7b2895(%rip), %ymm3 # 0x1e0bdd0 vmovaps %ymm3, 0x7c0(%rsp) vmovaps %ymm2, 0x7a0(%rsp) vmovaps 0x7e0(%rsp), %ymm4 vmovaps 0x7c0(%rsp), %ymm3 vmovaps 0x7a0(%rsp), %ymm2 vmovaps %ymm4, 0xce0(%rsp) vmovaps %ymm3, 0xcc0(%rsp) vmovaps %ymm2, 0xca0(%rsp) vmovaps 0xce0(%rsp), %ymm3 vmovaps 0xcc0(%rsp), %ymm2 vmovaps 0xca0(%rsp), %ymm4 vfnmadd213ps %ymm4, %ymm3, %ymm2 # ymm2 = -(ymm3 * ymm2) + ymm4 vmovaps %ymm2, 0x340(%rsp) vmovaps 0x340(%rsp), %ymm2 vmovaps %ymm2, 0x460(%rsp) vmovaps %ymm2, 0x440(%rsp) vmovaps 0x460(%rsp), %ymm2 vmovaps 0x440(%rsp), %ymm3 vmulps %ymm3, %ymm2, %ymm2 vmovaps %ymm2, 0x320(%rsp) vbroadcastss 0x7b27e5(%rip), %ymm2 # 0x1e0bdd4 vmovaps %ymm2, 0x280(%rsp) vmovaps 0x280(%rsp), %ymm3 vmovaps 0x340(%rsp), %ymm2 vmovaps %ymm3, 0x6e0(%rsp) vmovaps %ymm2, 0x6c0(%rsp) vbroadcastss 0x7b27b3(%rip), %ymm2 # 0x1e0bdd8 vmovaps %ymm2, 0x6a0(%rsp) vmovaps 0x6e0(%rsp), %ymm4 vmovaps 0x6c0(%rsp), %ymm3 vmovaps 0x6a0(%rsp), %ymm2 vmovaps %ymm4, 0xa40(%rsp) vmovaps %ymm3, 0xa20(%rsp) vmovaps %ymm2, 0xa00(%rsp) vmovaps 0xa40(%rsp), %ymm3 vmovaps 0xa20(%rsp), %ymm2 vmovaps 0xa00(%rsp), %ymm4 vfmadd213ps %ymm4, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm4 vmovaps %ymm2, 0x280(%rsp) vmovaps 0x280(%rsp), %ymm3 vmovaps 0x340(%rsp), %ymm2 vmovaps %ymm3, 0x680(%rsp) vmovaps %ymm2, 0x660(%rsp) vbroadcastss 0x7b2722(%rip), %ymm2 # 0x1e0bddc vmovaps %ymm2, 0x640(%rsp) vmovaps 0x680(%rsp), %ymm4 vmovaps 0x660(%rsp), %ymm3 vmovaps 0x640(%rsp), %ymm2 vmovaps %ymm4, 0xaa0(%rsp) vmovaps %ymm3, 0xa80(%rsp) vmovaps %ymm2, 0xa60(%rsp) vmovaps 0xaa0(%rsp), %ymm3 vmovaps 0xa80(%rsp), %ymm2 vmovaps 0xa60(%rsp), %ymm4 vfmadd213ps %ymm4, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm4 vmovaps %ymm2, 0x280(%rsp) vmovaps 0x280(%rsp), %ymm3 vmovaps 0x340(%rsp), %ymm2 vmovaps %ymm3, 0x620(%rsp) vmovaps %ymm2, 0x600(%rsp) vbroadcastss 0x7b2691(%rip), %ymm2 # 0x1e0bde0 vmovaps %ymm2, 0x5e0(%rsp) vmovaps 0x620(%rsp), %ymm4 vmovaps 0x600(%rsp), %ymm3 vmovaps 0x5e0(%rsp), %ymm2 vmovaps %ymm4, 0xb00(%rsp) vmovaps %ymm3, 0xae0(%rsp) vmovaps %ymm2, 0xac0(%rsp) vmovaps 0xb00(%rsp), %ymm3 vmovaps 0xae0(%rsp), %ymm2 vmovaps 0xac0(%rsp), %ymm4 vfmadd213ps %ymm4, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm4 vmovaps %ymm2, 0x280(%rsp) vmovaps 0x280(%rsp), %ymm3 vmovaps 0x340(%rsp), %ymm2 vmovaps %ymm3, 0x5c0(%rsp) vmovaps %ymm2, 0x5a0(%rsp) vbroadcastss 0x7b2600(%rip), %ymm2 # 0x1e0bde4 vmovaps %ymm2, 0x580(%rsp) vmovaps 0x5c0(%rsp), %ymm4 vmovaps 0x5a0(%rsp), %ymm3 vmovaps 0x580(%rsp), %ymm2 vmovaps %ymm4, 0xb60(%rsp) vmovaps %ymm3, 0xb40(%rsp) vmovaps %ymm2, 0xb20(%rsp) vmovaps 0xb60(%rsp), %ymm3 vmovaps 0xb40(%rsp), %ymm2 vmovaps 0xb20(%rsp), %ymm4 vfmadd213ps %ymm4, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) + ymm4 vmovaps %ymm2, 0x280(%rsp) vmovaps 0x280(%rsp), %ymm3 vmovaps 0x340(%rsp), %ymm2 vmovaps %ymm3, 0x560(%rsp) vmovaps %ymm2, 0x540(%rsp) vmovaps %ymm0, 0x520(%rsp) vmovaps 0x560(%rsp), %ymm3 vmovaps 0x540(%rsp), %ymm2 vmovaps 0x520(%rsp), %ymm0 vmovaps %ymm3, 0xbc0(%rsp) vmovaps %ymm2, 0xba0(%rsp) vmovaps %ymm0, 0xb80(%rsp) vmovaps 0xbc0(%rsp), %ymm2 vmovaps 0xba0(%rsp), %ymm0 vmovaps 0xb80(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x280(%rsp) vmovaps 0x280(%rsp), %ymm3 vmovaps 0x320(%rsp), %ymm2 vmovaps 0x340(%rsp), %ymm0 vmovaps %ymm3, 0x500(%rsp) vmovaps %ymm2, 0x4e0(%rsp) vmovaps %ymm0, 0x4c0(%rsp) vmovaps 0x500(%rsp), %ymm3 vmovaps 0x4e0(%rsp), %ymm2 vmovaps 0x4c0(%rsp), %ymm0 vmovaps %ymm3, 0xc20(%rsp) vmovaps %ymm2, 0xc00(%rsp) vmovaps %ymm0, 0xbe0(%rsp) vmovaps 0xc20(%rsp), %ymm2 vmovaps 0xc00(%rsp), %ymm0 vmovaps 0xbe0(%rsp), %ymm3 vfmadd213ps %ymm3, %ymm2, %ymm0 # ymm0 = (ymm2 * ymm0) + ymm3 vmovaps %ymm0, 0x280(%rsp) vmovaps 0x280(%rsp), %ymm2 vmovaps 0x2c0(%rsp), %ymm0 vmovaps %ymm2, 0x880(%rsp) vmovaps %ymm0, 0x860(%rsp) vmovaps 0x880(%rsp), %ymm0 vmovaps 0x860(%rsp), %ymm2 vaddps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x280(%rsp) vmovaps 0x300(%rsp), %ymm0 vmovaps %ymm0, 0x8e0(%rsp) vcvttps2dq 0x8e0(%rsp), %ymm0 vmovdqa %ymm0, 0x2e0(%rsp) vmovdqa 0x2e0(%rsp), %ymm0 vmovdqa %ymm0, 0x920(%rsp) vpbroadcastq 0x7b2429(%rip), %ymm0 # 0x1e0be18 vmovdqa %ymm0, 0x900(%rsp) vmovdqa 0x920(%rsp), %ymm2 vmovdqa 0x900(%rsp), %ymm0 vmovdqa %ymm2, 0xd20(%rsp) vmovdqa %ymm0, 0xd00(%rsp) vmovdqa 0xd20(%rsp), %ymm0 vmovdqa 0xd00(%rsp), %ymm2 vpaddd %ymm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x2e0(%rsp) vmovdqa 0x2e0(%rsp), %ymm0 vmovdqa %ymm0, 0x960(%rsp) movl $0x17, 0x95c(%rsp) vmovdqa 0x960(%rsp), %ymm0 movl 0x95c(%rsp), %eax vmovdqa %ymm0, 0xd60(%rsp) movl %eax, 0xd5c(%rsp) vmovdqa 0xd60(%rsp), %ymm0 vmovd 0xd5c(%rsp), %xmm2 vpslld %xmm2, %ymm0, %ymm0 vmovdqa %ymm0, 0x2e0(%rsp) vmovdqa 0x2e0(%rsp), %ymm0 vmovdqa %ymm0, 0x980(%rsp) vmovdqa 0x980(%rsp), %ymm0 vmovdqa %ymm0, 0x260(%rsp) vmovaps 0x280(%rsp), %ymm2 vmovaps 0x260(%rsp), %ymm0 vmovaps %ymm2, 0x420(%rsp) vmovaps %ymm0, 0x400(%rsp) vmovaps 0x420(%rsp), %ymm0 vmovaps 0x400(%rsp), %ymm2 vmulps %ymm2, %ymm0, %ymm0 vmovaps %ymm0, 0x280(%rsp) vmovaps 0x280(%rsp), %ymm0 vmovaps %ymm1, 0x8c0(%rsp) vmovaps %ymm0, 0x8a0(%rsp) vmovaps 0x8c0(%rsp), %ymm0 vmovaps 0x8a0(%rsp), %ymm2 vaddps %ymm2, %ymm0, %ymm0 vmovaps %ymm1, 0xdc0(%rsp) vmovaps %ymm0, 0xda0(%rsp) vmovaps 0xdc0(%rsp), %ymm4 vmovaps 0xda0(%rsp), %ymm3 vrcpps %ymm3, %ymm0 vmulps %ymm0, %ymm4, %ymm1 vmovaps %ymm1, %ymm2 vfmsub213ps %ymm4, %ymm3, %ymm2 # ymm2 = (ymm3 * ymm2) - ymm4 vfnmadd213ps %ymm1, %ymm0, %ymm2 # ymm2 = -(ymm0 * ymm2) + ymm1 vmovaps 0x20(%rsp), %ymm1 vmovaps 0x40(%rsp), %ymm0 vmovaps %ymm2, 0x1a0(%rsp) vmovaps %ymm1, 0x180(%rsp) vmovaps %ymm0, 0x160(%rsp) vmovaps 0x1a0(%rsp), %ymm1 vmovaps 0x180(%rsp), %ymm0 vmovaps 0x160(%rsp), %ymm2 vpbroadcastd 0x7b4304(%rip), %ymm3 # 0x1e0deb8 vpxor %ymm3, %ymm2, %ymm2 vfmadd213ps %ymm2, %ymm1, %ymm0 # ymm0 = (ymm1 * ymm0) + ymm2 movq %rbp, %rsp popq %rbp retq nopw %cs:(%rax,%rax) nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,319
ncnn::UnaryOp_x86_avx512_functor::unary_op_tanh::func_pack4(float vector[4] const&) const
__m128 func_pack4(const __m128& x) const { return tanh_sse(x); }
subq $0x668, %rsp # imm = 0x668 movq %rdi, -0x78(%rsp) movq %rsi, -0x80(%rsp) movq -0x80(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, -0x50(%rsp) movl $0x3f800000, -0x4(%rsp) # imm = 0x3F800000 vbroadcastss -0x4(%rsp), %xmm0 vmovaps %xmm0, -0x20(%rsp) vmovaps -0x20(%rsp), %xmm0 vmovaps %xmm0, -0x60(%rsp) movl $0x40000000, -0x24(%rsp) # imm = 0x40000000 vbroadcastss -0x24(%rsp), %xmm0 vmovaps %xmm0, -0x40(%rsp) vmovaps -0x40(%rsp), %xmm0 vmovaps %xmm0, -0x70(%rsp) vmovaps -0x50(%rsp), %xmm1 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm1, 0x50(%rsp) vmovaps %xmm0, 0x40(%rsp) vmovaps 0x50(%rsp), %xmm0 vmovaps 0x40(%rsp), %xmm1 vmulps %xmm1, %xmm0, %xmm0 vmovaps %xmm0, 0x70(%rsp) movl $0x3f800000, 0x9c(%rsp) # imm = 0x3F800000 vbroadcastss 0x9c(%rsp), %xmm0 vmovaps %xmm0, 0x80(%rsp) vmovaps 0x80(%rsp), %xmm0 vmovaps %xmm0, 0x60(%rsp) vmovaps 0x60(%rsp), %xmm2 vmovaps %xmm2, %xmm1 vpxor %xmm0, %xmm0, %xmm0 vmovaps %xmm0, 0xc0(%rsp) vmovaps 0xc0(%rsp), %xmm4 vmovaps 0x70(%rsp), %xmm3 vmovaps %xmm4, 0xb0(%rsp) vmovaps %xmm3, 0xa0(%rsp) vmovaps 0xb0(%rsp), %xmm3 vmovaps 0xa0(%rsp), %xmm4 vsubps %xmm4, %xmm3, %xmm3 vmovaps %xmm3, 0x140(%rsp) vmovaps %xmm0, 0x170(%rsp) vmovaps 0x170(%rsp), %xmm0 vmovaps %xmm0, 0x130(%rsp) vbroadcastss 0x7af2fc(%rip), %xmm0 # 0x1e09004 vmovaps %xmm0, 0x100(%rsp) vmovaps 0x140(%rsp), %xmm0 vmovaps %xmm0, 0x1f0(%rsp) vbroadcastss 0x7b2094(%rip), %xmm0 # 0x1e0bdc0 vmovaps %xmm0, 0x1e0(%rsp) vmovaps 0x1f0(%rsp), %xmm0 vmovaps 0x1e0(%rsp), %xmm3 vminps %xmm3, %xmm0, %xmm0 vmovaps %xmm0, 0x140(%rsp) vmovaps 0x140(%rsp), %xmm0 vmovaps %xmm0, 0x210(%rsp) vbroadcastss 0x7b2055(%rip), %xmm0 # 0x1e0bdc4 vmovaps %xmm0, 0x200(%rsp) vmovaps 0x210(%rsp), %xmm0 vmovaps 0x200(%rsp), %xmm3 vmaxps %xmm3, %xmm0, %xmm0 vmovaps %xmm0, 0x140(%rsp) vmovaps 0x140(%rsp), %xmm0 vmovaps %xmm0, 0x1d0(%rsp) vbroadcastss 0x7b2016(%rip), %xmm0 # 0x1e0bdc8 vmovaps %xmm0, 0x1c0(%rsp) vmovaps 0x1d0(%rsp), %xmm0 vmovaps 0x1c0(%rsp), %xmm3 vmulps %xmm3, %xmm0, %xmm0 vmovaps %xmm0, 0x120(%rsp) vmovaps 0x120(%rsp), %xmm0 vmovaps %xmm0, 0x250(%rsp) vbroadcastss 0x7af227(%rip), %xmm0 # 0x1e0901c vmovaps %xmm0, 0x240(%rsp) vmovaps 0x250(%rsp), %xmm3 vmovaps 0x240(%rsp), %xmm4 vaddps %xmm4, %xmm3, %xmm3 vmovaps %xmm3, 0x120(%rsp) vmovaps 0x120(%rsp), %xmm3 vmovaps %xmm3, 0x290(%rsp) vcvttps2dq 0x290(%rsp), %xmm3 vmovdqa %xmm3, 0x110(%rsp) vmovdqa 0x110(%rsp), %xmm3 vmovdqa %xmm3, 0x2a0(%rsp) vcvtdq2ps 0x2a0(%rsp), %xmm3 vmovaps %xmm3, 0x130(%rsp) vmovaps 0x130(%rsp), %xmm4 vmovaps 0x120(%rsp), %xmm3 vmovaps %xmm4, 0x2c0(%rsp) vmovaps %xmm3, 0x2b0(%rsp) vmovaps 0x2b0(%rsp), %xmm3 vmovaps 0x2c0(%rsp), %xmm4 vcmpltps %xmm4, %xmm3, %xmm3 vmovaps %xmm3, 0xf0(%rsp) vmovaps 0xf0(%rsp), %xmm4 vmovaps 0x100(%rsp), %xmm3 vmovaps %xmm4, 0x2e0(%rsp) vmovaps %xmm3, 0x2d0(%rsp) vmovdqa 0x2e0(%rsp), %xmm3 vmovdqa 0x2d0(%rsp), %xmm4 vpand %xmm4, %xmm3, %xmm3 vmovdqa %xmm3, 0xf0(%rsp) vmovaps 0x130(%rsp), %xmm4 vmovaps 0xf0(%rsp), %xmm3 vmovaps %xmm4, 0x160(%rsp) vmovaps %xmm3, 0x150(%rsp) vmovaps 0x160(%rsp), %xmm3 vmovaps 0x150(%rsp), %xmm4 vsubps %xmm4, %xmm3, %xmm3 vmovaps %xmm3, 0x120(%rsp) vmovaps 0x120(%rsp), %xmm4 vmovaps 0x140(%rsp), %xmm3 vmovaps %xmm4, 0x340(%rsp) vbroadcastss 0x7b1e79(%rip), %xmm4 # 0x1e0bdcc vmovaps %xmm4, 0x330(%rsp) vmovaps %xmm3, 0x320(%rsp) vmovaps 0x340(%rsp), %xmm5 vmovaps 0x330(%rsp), %xmm4 vmovaps 0x320(%rsp), %xmm3 vmovaps %xmm5, 0x4e0(%rsp) vmovaps %xmm4, 0x4d0(%rsp) vmovaps %xmm3, 0x4c0(%rsp) vmovaps 0x4e0(%rsp), %xmm4 vmovaps 0x4d0(%rsp), %xmm3 vmovaps 0x4c0(%rsp), %xmm5 vfnmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = -(xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0x140(%rsp) vmovaps 0x120(%rsp), %xmm4 vmovaps 0x140(%rsp), %xmm3 vmovaps %xmm4, 0x310(%rsp) vbroadcastss 0x7b1de8(%rip), %xmm4 # 0x1e0bdd0 vmovaps %xmm4, 0x300(%rsp) vmovaps %xmm3, 0x2f0(%rsp) vmovaps 0x310(%rsp), %xmm5 vmovaps 0x300(%rsp), %xmm4 vmovaps 0x2f0(%rsp), %xmm3 vmovaps %xmm5, 0x510(%rsp) vmovaps %xmm4, 0x500(%rsp) vmovaps %xmm3, 0x4f0(%rsp) vmovaps 0x510(%rsp), %xmm4 vmovaps 0x500(%rsp), %xmm3 vmovaps 0x4f0(%rsp), %xmm5 vfnmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = -(xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0x140(%rsp) vmovaps 0x140(%rsp), %xmm3 vmovaps %xmm3, 0x1b0(%rsp) vmovaps %xmm3, 0x1a0(%rsp) vmovaps 0x1b0(%rsp), %xmm3 vmovaps 0x1a0(%rsp), %xmm4 vmulps %xmm4, %xmm3, %xmm3 vmovaps %xmm3, 0x130(%rsp) vbroadcastss 0x7b1d38(%rip), %xmm3 # 0x1e0bdd4 vmovaps %xmm3, 0xe0(%rsp) vmovaps 0xe0(%rsp), %xmm4 vmovaps 0x140(%rsp), %xmm3 vmovaps %xmm4, 0x460(%rsp) vmovaps %xmm3, 0x450(%rsp) vbroadcastss 0x7b1d06(%rip), %xmm3 # 0x1e0bdd8 vmovaps %xmm3, 0x440(%rsp) vmovaps 0x460(%rsp), %xmm5 vmovaps 0x450(%rsp), %xmm4 vmovaps 0x440(%rsp), %xmm3 vmovaps %xmm5, 0x540(%rsp) vmovaps %xmm4, 0x530(%rsp) vmovaps %xmm3, 0x520(%rsp) vmovaps 0x540(%rsp), %xmm4 vmovaps 0x530(%rsp), %xmm3 vmovaps 0x520(%rsp), %xmm5 vfmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0xe0(%rsp) vmovaps 0xe0(%rsp), %xmm4 vmovaps 0x140(%rsp), %xmm3 vmovaps %xmm4, 0x430(%rsp) vmovaps %xmm3, 0x420(%rsp) vbroadcastss 0x7b1c75(%rip), %xmm3 # 0x1e0bddc vmovaps %xmm3, 0x410(%rsp) vmovaps 0x430(%rsp), %xmm5 vmovaps 0x420(%rsp), %xmm4 vmovaps 0x410(%rsp), %xmm3 vmovaps %xmm5, 0x570(%rsp) vmovaps %xmm4, 0x560(%rsp) vmovaps %xmm3, 0x550(%rsp) vmovaps 0x570(%rsp), %xmm4 vmovaps 0x560(%rsp), %xmm3 vmovaps 0x550(%rsp), %xmm5 vfmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0xe0(%rsp) vmovaps 0xe0(%rsp), %xmm4 vmovaps 0x140(%rsp), %xmm3 vmovaps %xmm4, 0x400(%rsp) vmovaps %xmm3, 0x3f0(%rsp) vbroadcastss 0x7b1be4(%rip), %xmm3 # 0x1e0bde0 vmovaps %xmm3, 0x3e0(%rsp) vmovaps 0x400(%rsp), %xmm5 vmovaps 0x3f0(%rsp), %xmm4 vmovaps 0x3e0(%rsp), %xmm3 vmovaps %xmm5, 0x5a0(%rsp) vmovaps %xmm4, 0x590(%rsp) vmovaps %xmm3, 0x580(%rsp) vmovaps 0x5a0(%rsp), %xmm4 vmovaps 0x590(%rsp), %xmm3 vmovaps 0x580(%rsp), %xmm5 vfmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0xe0(%rsp) vmovaps 0xe0(%rsp), %xmm4 vmovaps 0x140(%rsp), %xmm3 vmovaps %xmm4, 0x3d0(%rsp) vmovaps %xmm3, 0x3c0(%rsp) vbroadcastss 0x7b1b53(%rip), %xmm3 # 0x1e0bde4 vmovaps %xmm3, 0x3b0(%rsp) vmovaps 0x3d0(%rsp), %xmm5 vmovaps 0x3c0(%rsp), %xmm4 vmovaps 0x3b0(%rsp), %xmm3 vmovaps %xmm5, 0x5d0(%rsp) vmovaps %xmm4, 0x5c0(%rsp) vmovaps %xmm3, 0x5b0(%rsp) vmovaps 0x5d0(%rsp), %xmm4 vmovaps 0x5c0(%rsp), %xmm3 vmovaps 0x5b0(%rsp), %xmm5 vfmadd213ps %xmm5, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm3) + xmm5 vmovaps %xmm3, 0xe0(%rsp) vmovaps 0xe0(%rsp), %xmm4 vmovaps 0x140(%rsp), %xmm3 vmovaps %xmm4, 0x3a0(%rsp) vmovaps %xmm3, 0x390(%rsp) vmovaps %xmm0, 0x380(%rsp) vmovaps 0x3a0(%rsp), %xmm4 vmovaps 0x390(%rsp), %xmm3 vmovaps 0x380(%rsp), %xmm0 vmovaps %xmm4, 0x600(%rsp) vmovaps %xmm3, 0x5f0(%rsp) vmovaps %xmm0, 0x5e0(%rsp) vmovaps 0x600(%rsp), %xmm3 vmovaps 0x5f0(%rsp), %xmm0 vmovaps 0x5e0(%rsp), %xmm4 vfmadd213ps %xmm4, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm0) + xmm4 vmovaps %xmm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %xmm4 vmovaps 0x130(%rsp), %xmm3 vmovaps 0x140(%rsp), %xmm0 vmovaps %xmm4, 0x370(%rsp) vmovaps %xmm3, 0x360(%rsp) vmovaps %xmm0, 0x350(%rsp) vmovaps 0x370(%rsp), %xmm4 vmovaps 0x360(%rsp), %xmm3 vmovaps 0x350(%rsp), %xmm0 vmovaps %xmm4, 0x630(%rsp) vmovaps %xmm3, 0x620(%rsp) vmovaps %xmm0, 0x610(%rsp) vmovaps 0x630(%rsp), %xmm3 vmovaps 0x620(%rsp), %xmm0 vmovaps 0x610(%rsp), %xmm4 vfmadd213ps %xmm4, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm0) + xmm4 vmovaps %xmm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %xmm3 vmovaps 0x100(%rsp), %xmm0 vmovaps %xmm3, 0x230(%rsp) vmovaps %xmm0, 0x220(%rsp) vmovaps 0x230(%rsp), %xmm0 vmovaps 0x220(%rsp), %xmm3 vaddps %xmm3, %xmm0, %xmm0 vmovaps %xmm0, 0xe0(%rsp) vmovaps 0x120(%rsp), %xmm0 vmovaps %xmm0, 0x280(%rsp) vcvttps2dq 0x280(%rsp), %xmm0 vmovdqa %xmm0, 0x110(%rsp) vmovdqa 0x110(%rsp), %xmm0 vmovdqa %xmm0, 0x480(%rsp) vpbroadcastq 0x7b197c(%rip), %xmm0 # 0x1e0be18 vmovdqa %xmm0, 0x470(%rsp) vmovdqa 0x480(%rsp), %xmm0 vmovdqa 0x470(%rsp), %xmm3 vpaddd %xmm3, %xmm0, %xmm0 vmovdqa %xmm0, 0x110(%rsp) vmovdqa 0x110(%rsp), %xmm0 vmovdqa %xmm0, 0x4a0(%rsp) movl $0x17, 0x49c(%rsp) vmovdqa 0x4a0(%rsp), %xmm0 movl 0x49c(%rsp), %eax vmovd %eax, %xmm3 vpslld %xmm3, %xmm0, %xmm0 vmovdqa %xmm0, 0x110(%rsp) vmovdqa 0x110(%rsp), %xmm0 vmovdqa %xmm0, 0x4b0(%rsp) vmovdqa 0x4b0(%rsp), %xmm0 vmovaps %xmm0, 0xd0(%rsp) vmovaps 0xe0(%rsp), %xmm3 vmovaps 0xd0(%rsp), %xmm0 vmovaps %xmm3, 0x190(%rsp) vmovaps %xmm0, 0x180(%rsp) vmovaps 0x190(%rsp), %xmm0 vmulps 0x180(%rsp), %xmm0, %xmm0 vmovaps %xmm0, 0xe0(%rsp) vmovaps 0xe0(%rsp), %xmm0 vmovaps %xmm2, 0x270(%rsp) vmovaps %xmm0, 0x260(%rsp) vmovaps 0x270(%rsp), %xmm0 vaddps 0x260(%rsp), %xmm0, %xmm0 vmovaps %xmm1, 0x650(%rsp) vmovaps %xmm0, 0x640(%rsp) vmovaps 0x650(%rsp), %xmm0 vdivps 0x640(%rsp), %xmm0, %xmm1 vmovaps -0x70(%rsp), %xmm0 vmovaps %xmm1, 0x30(%rsp) vmovaps %xmm0, 0x20(%rsp) vmovaps 0x30(%rsp), %xmm0 vmulps 0x20(%rsp), %xmm0, %xmm1 vmovaps -0x60(%rsp), %xmm0 vmovaps %xmm1, 0x10(%rsp) vmovaps %xmm0, (%rsp) vmovaps 0x10(%rsp), %xmm0 vsubps (%rsp), %xmm0, %xmm0 addq $0x668, %rsp # imm = 0x668 retq nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,320
ncnn::UnaryOp_x86_avx512_functor::unary_op_tanh::func(float const&) const
float func(const float& x) const { return (float)tanh(x); }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq %rsi, 0x8(%rsp) movq 0x8(%rsp), %rax vmovss (%rax), %xmm0 callq 0x140cc0 addq $0x18, %rsp retq nopw %cs:(%rax,%rax) nopl (%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_avx512.cpp
2,113,321
ncnn::UnaryOp_x86_fma::UnaryOp_x86_fma()
UnaryOp_x86_fma::UnaryOp_x86_fma() { #if __SSE2__ support_packing = true; #endif // __SSE2__ }
movq %rdi, -0x8(%rsp) movq %rsi, -0x10(%rsp) movq -0x8(%rsp), %rax movq -0x10(%rsp), %rcx movq (%rcx), %rdx movq %rdx, (%rax) movq 0x8(%rcx), %rdx movq (%rax), %rcx movq -0x18(%rcx), %rcx movq %rdx, (%rax,%rcx) movq (%rax), %rcx movq -0x18(%rcx), %rcx movb $0x1, 0xb(%rax,%rcx) retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_fma.cpp
2,113,322
ncnn::UnaryOp_x86_fma::UnaryOp_x86_fma()
UnaryOp_x86_fma::UnaryOp_x86_fma() { #if __SSE2__ support_packing = true; #endif // __SSE2__ }
subq $0x18, %rsp movq %rdi, 0x10(%rsp) movq 0x10(%rsp), %rdi movq %rdi, 0x8(%rsp) addq $0x8, %rdi callq 0x1639190 movq 0x8(%rsp), %rax leaq 0x8775f0(%rip), %rcx # 0x1ed1c88 addq $0x18, %rcx movq %rcx, (%rax) leaq 0x8775e2(%rip), %rcx # 0x1ed1c88 addq $0x88, %rcx movq %rcx, 0x8(%rax) movq (%rax), %rcx movq -0x18(%rcx), %rcx movb $0x1, 0xb(%rax,%rcx) addq $0x18, %rsp retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_fma.cpp
2,113,323
ncnn::UnaryOp_x86_fma::forward_inplace(ncnn::Mat&, ncnn::Option const&) const
int UnaryOp_x86_fma::forward_inplace(Mat& bottom_top_blob, const Option& opt) const { using namespace UnaryOp_x86_fma_functor; if (op_type == Operation_ABS) return unary_op_inplace<unary_op_abs>(bottom_top_blob, opt); if (op_type == Operation_NEG) return unary_op_inplace<unary_op_neg>(bottom_top_blob, opt); if (op_type == Operation_FLOOR) return unary_op_inplace<unary_op_floor>(bottom_top_blob, opt); if (op_type == Operation_CEIL) return unary_op_inplace<unary_op_ceil>(bottom_top_blob, opt); if (op_type == Operation_SQUARE) return unary_op_inplace<unary_op_square>(bottom_top_blob, opt); if (op_type == Operation_SQRT) return unary_op_inplace<unary_op_sqrt>(bottom_top_blob, opt); if (op_type == Operation_RSQRT) return unary_op_inplace<unary_op_rsqrt>(bottom_top_blob, opt); if (op_type == Operation_EXP) return unary_op_inplace<unary_op_exp>(bottom_top_blob, opt); if (op_type == Operation_LOG) return unary_op_inplace<unary_op_log>(bottom_top_blob, opt); if (op_type == Operation_SIN) return unary_op_inplace<unary_op_sin>(bottom_top_blob, opt); if (op_type == Operation_COS) return unary_op_inplace<unary_op_cos>(bottom_top_blob, opt); if (op_type == Operation_TAN) return unary_op_inplace<unary_op_tan>(bottom_top_blob, opt); if (op_type == Operation_ASIN) return unary_op_inplace<unary_op_asin>(bottom_top_blob, opt); if (op_type == Operation_ACOS) return unary_op_inplace<unary_op_acos>(bottom_top_blob, opt); if (op_type == Operation_ATAN) return unary_op_inplace<unary_op_atan>(bottom_top_blob, opt); if (op_type == Operation_RECIPROCAL) return unary_op_inplace<unary_op_reciprocal>(bottom_top_blob, opt); if (op_type == Operation_TANH) return unary_op_inplace<unary_op_tanh>(bottom_top_blob, opt); return 0; }
subq $0x28, %rsp movq %rdi, 0x18(%rsp) movq %rsi, 0x10(%rsp) movq %rdx, 0x8(%rsp) movq 0x18(%rsp), %rax movq %rax, (%rsp) movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x0, 0xd0(%rax,%rcx) jne 0x165a715 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165a9f0 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x1, 0xd0(%rax,%rcx) jne 0x165a742 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165b0a0 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x2, 0xd0(%rax,%rcx) jne 0x165a76f movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165b750 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x3, 0xd0(%rax,%rcx) jne 0x165a79c movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165be00 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x4, 0xd0(%rax,%rcx) jne 0x165a7c9 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165c4b0 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x5, 0xd0(%rax,%rcx) jne 0x165a7f6 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165cb60 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x6, 0xd0(%rax,%rcx) jne 0x165a823 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165d210 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x7, 0xd0(%rax,%rcx) jne 0x165a850 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165d8c0 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x8, 0xd0(%rax,%rcx) jne 0x165a87d movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165df70 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x9, 0xd0(%rax,%rcx) jne 0x165a8aa movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165e620 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xa, 0xd0(%rax,%rcx) jne 0x165a8d7 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165ecd0 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xb, 0xd0(%rax,%rcx) jne 0x165a904 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165f380 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xc, 0xd0(%rax,%rcx) jne 0x165a931 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x165fa30 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xd, 0xd0(%rax,%rcx) jne 0x165a95e movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x16600e0 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xe, 0xd0(%rax,%rcx) jne 0x165a988 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1660790 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0xf, 0xd0(%rax,%rcx) jne 0x165a9b2 movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x1660e40 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movq (%rsp), %rax movq (%rax), %rcx movq -0x18(%rcx), %rcx cmpl $0x10, 0xd0(%rax,%rcx) jne 0x165a9dc movq 0x10(%rsp), %rdi movq 0x8(%rsp), %rsi callq 0x16614f0 movl %eax, 0x24(%rsp) jmp 0x165a9e4 movl $0x0, 0x24(%rsp) movl 0x24(%rsp), %eax addq $0x28, %rsp retq nopl (%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_fma.cpp
2,113,324
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_fma_functor::unary_op_abs>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x240, %rsp # imm = 0x240 movq %rdi, 0xf8(%rsp) movq %rsi, 0xf0(%rsp) movq 0xf8(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0xe8(%rsp) movq 0xf8(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0xe4(%rsp) movq 0xf8(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0xe0(%rsp) movq 0xf8(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0xdc(%rsp) movq 0xf8(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0xd8(%rsp) movl 0xe8(%rsp), %eax imull 0xe4(%rsp), %eax imull 0xe0(%rsp), %eax imull 0xd8(%rsp), %eax movl %eax, 0xd4(%rsp) movl $0x0, 0xd0(%rsp) movl 0xd0(%rsp), %eax cmpl 0xdc(%rsp), %eax jge 0x165b096 movq 0xf8(%rsp), %rcx movl 0xd0(%rsp), %eax leaq 0x80(%rsp), %rdx movq %rdx, 0x110(%rsp) movq %rcx, 0x108(%rsp) movl %eax, 0x104(%rsp) movq 0x108(%rsp), %rax movq %rax, 0x28(%rsp) movb $0x0, 0x103(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0x104(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0x80(%rsp), %r10 movq %r10, 0x1d0(%rsp) movl %r9d, 0x1cc(%rsp) movl %r8d, 0x1c8(%rsp) movl %edi, 0x1c4(%rsp) movq %rsi, 0x1b8(%rsp) movq %rdx, 0x1b0(%rsp) movl %ecx, 0x1ac(%rsp) movq %rax, 0x1a0(%rsp) movq 0x1d0(%rsp), %rcx movq %rcx, 0x20(%rsp) movq 0x1b8(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x1b0(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x1ac(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x1a0(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x1cc(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x1c8(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x1c4(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x1e0(%rsp) movl $0x10, 0x1dc(%rsp) movq 0x1e0(%rsp), %rax movslq 0x1dc(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x1dc(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x28(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0xa8(%rsp) cmpl $0x4, 0x28(%rax) jne 0x165ac5e movq 0x28(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0xc0(%rsp) movb $0x1, 0x103(%rsp) testb $0x1, 0x103(%rsp) jne 0x165ad8d leaq 0x80(%rsp), %rax movq %rax, 0x120(%rsp) movq 0x120(%rsp), %rax movq %rax, 0x210(%rsp) movq 0x210(%rsp), %rax movq %rax, 0x18(%rsp) cmpq $0x0, 0x8(%rax) je 0x165ad33 movq 0x18(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x20c(%rsp) # imm = 0xFFFFFFFF movl 0x20c(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x208(%rsp) cmpl $0x1, 0x208(%rsp) jne 0x165ad33 movq 0x18(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x165ad04 movq 0x18(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x165ad02 jmp 0x165ad31 movq 0x18(%rsp), %rax movq (%rax), %rax movq %rax, 0x218(%rsp) cmpq $0x0, 0x218(%rsp) je 0x165ad2f movq 0x218(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x165ad31 jmp 0x165ad33 movq 0x18(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x165ad8b movq %rax, %rdi callq 0x678a0 jmp 0x165ad8d leaq 0x80(%rsp), %rax movq %rax, 0x118(%rsp) movq 0x118(%rsp), %rax movq (%rax), %rax movq %rax, 0x10(%rsp) leaq 0x80(%rsp), %rax movq %rax, 0x128(%rsp) movq 0x128(%rsp), %rax movq %rax, 0x200(%rsp) movq 0x200(%rsp), %rax movq %rax, 0x8(%rsp) cmpq $0x0, 0x8(%rax) je 0x165ae6c movq 0x8(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x1fc(%rsp) # imm = 0xFFFFFFFF movl 0x1fc(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x1f8(%rsp) cmpl $0x1, 0x1f8(%rsp) jne 0x165ae6c movq 0x8(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x165ae3d movq 0x8(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x165ae3b jmp 0x165ae6a movq 0x8(%rsp), %rax movq (%rax), %rax movq %rax, 0x220(%rsp) cmpq $0x0, 0x220(%rsp) je 0x165ae68 movq 0x220(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x165ae6a jmp 0x165ae6c movq 0x8(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x165aec4 movq %rax, %rdi callq 0x678a0 movq 0x10(%rsp), %rax movq %rax, 0xc8(%rsp) movl $0x0, 0x70(%rsp) movl 0x70(%rsp), %eax addl $0x7, %eax cmpl 0xd4(%rsp), %eax jge 0x165af7f movq 0xc8(%rsp), %rax movq %rax, 0x138(%rsp) movq 0x138(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x40(%rsp) leaq 0xef(%rsp), %rdi leaq 0x40(%rsp), %rsi callq 0x1661bd0 vmovaps %ymm0, 0x40(%rsp) movq 0xc8(%rsp), %rax vmovaps 0x40(%rsp), %ymm0 movq %rax, 0x170(%rsp) vmovaps %ymm0, 0x140(%rsp) vmovaps 0x140(%rsp), %ymm0 movq 0x170(%rsp), %rax vmovups %ymm0, (%rax) movq 0xc8(%rsp), %rax addq $0x20, %rax movq %rax, 0xc8(%rsp) movl 0x70(%rsp), %eax addl $0x8, %eax movl %eax, 0x70(%rsp) jmp 0x165aed9 jmp 0x165af81 movl 0x70(%rsp), %eax addl $0x3, %eax cmpl 0xd4(%rsp), %eax jge 0x165b02a movq 0xc8(%rsp), %rax movq %rax, 0x178(%rsp) movq 0x178(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x30(%rsp) leaq 0xef(%rsp), %rdi leaq 0x30(%rsp), %rsi vzeroupper callq 0x1661c60 vmovaps %xmm0, 0x30(%rsp) movq 0xc8(%rsp), %rax vmovaps 0x30(%rsp), %xmm0 movq %rax, 0x198(%rsp) vmovaps %xmm0, 0x180(%rsp) vmovaps 0x180(%rsp), %xmm0 movq 0x198(%rsp), %rax vmovaps %xmm0, (%rax) movq 0xc8(%rsp), %rax addq $0x10, %rax movq %rax, 0xc8(%rsp) movl 0x70(%rsp), %eax addl $0x4, %eax movl %eax, 0x70(%rsp) jmp 0x165af81 jmp 0x165b02c movl 0x70(%rsp), %eax cmpl 0xd4(%rsp), %eax jge 0x165b07e movq 0xc8(%rsp), %rsi leaq 0xef(%rsp), %rdi vzeroupper callq 0x1661ce0 movq 0xc8(%rsp), %rax vmovss %xmm0, (%rax) movq 0xc8(%rsp), %rax addq $0x4, %rax movq %rax, 0xc8(%rsp) movl 0x70(%rsp), %eax addl $0x1, %eax movl %eax, 0x70(%rsp) jmp 0x165b02c jmp 0x165b080 movl 0xd0(%rsp), %eax addl $0x1, %eax movl %eax, 0xd0(%rsp) jmp 0x165aa9a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_fma.cpp
2,113,325
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_fma_functor::unary_op_neg>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x200, %rsp # imm = 0x200 movq %rdi, 0xe0(%rsp) movq %rsi, 0xd8(%rsp) movq 0xe0(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0xd0(%rsp) movq 0xe0(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0xcc(%rsp) movq 0xe0(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0xc8(%rsp) movq 0xe0(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0xc4(%rsp) movq 0xe0(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0xc0(%rsp) movl 0xd0(%rsp), %eax imull 0xcc(%rsp), %eax imull 0xc8(%rsp), %eax imull 0xc0(%rsp), %eax movl %eax, 0xbc(%rsp) movl $0x0, 0xb8(%rsp) movl 0xb8(%rsp), %eax cmpl 0xc4(%rsp), %eax jge 0x165b737 movq 0xe0(%rsp), %rcx movl 0xb8(%rsp), %eax leaq 0x68(%rsp), %rdx movq %rdx, 0xf8(%rsp) movq %rcx, 0xf0(%rsp) movl %eax, 0xec(%rsp) movq 0xf0(%rsp), %rax movq %rax, 0x28(%rsp) movb $0x0, 0xeb(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0xec(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0x68(%rsp), %r10 movq %r10, 0x1a8(%rsp) movl %r9d, 0x1a4(%rsp) movl %r8d, 0x1a0(%rsp) movl %edi, 0x19c(%rsp) movq %rsi, 0x190(%rsp) movq %rdx, 0x188(%rsp) movl %ecx, 0x184(%rsp) movq %rax, 0x178(%rsp) movq 0x1a8(%rsp), %rcx movq %rcx, 0x20(%rsp) movq 0x190(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x188(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x184(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x178(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x1a4(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x1a0(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x19c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x1b8(%rsp) movl $0x10, 0x1b4(%rsp) movq 0x1b8(%rsp), %rax movslq 0x1b4(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x1b4(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x28(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0x90(%rsp) cmpl $0x4, 0x28(%rax) jne 0x165b308 movq 0x28(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0xa8(%rsp) movb $0x1, 0xeb(%rsp) testb $0x1, 0xeb(%rsp) jne 0x165b434 leaq 0x68(%rsp), %rax movq %rax, 0x108(%rsp) movq 0x108(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax movq %rax, 0x18(%rsp) cmpq $0x0, 0x8(%rax) je 0x165b3da movq 0x18(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x1d4(%rsp) # imm = 0xFFFFFFFF movl 0x1d4(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x1d0(%rsp) cmpl $0x1, 0x1d0(%rsp) jne 0x165b3da movq 0x18(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x165b3ab movq 0x18(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x165b3a9 jmp 0x165b3d8 movq 0x18(%rsp), %rax movq (%rax), %rax movq %rax, 0x1e0(%rsp) cmpq $0x0, 0x1e0(%rsp) je 0x165b3d6 movq 0x1e0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x165b3d8 jmp 0x165b3da movq 0x18(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x165b432 movq %rax, %rdi callq 0x678a0 jmp 0x165b434 leaq 0x68(%rsp), %rax movq %rax, 0x100(%rsp) movq 0x100(%rsp), %rax movq (%rax), %rax movq %rax, 0x8(%rsp) leaq 0x68(%rsp), %rax movq %rax, 0x110(%rsp) movq 0x110(%rsp), %rax movq %rax, 0x1c8(%rsp) movq 0x1c8(%rsp), %rax movq %rax, 0x10(%rsp) cmpq $0x0, 0x8(%rax) je 0x165b50d movq 0x10(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x1c4(%rsp) # imm = 0xFFFFFFFF movl 0x1c4(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x1c0(%rsp) cmpl $0x1, 0x1c0(%rsp) jne 0x165b50d movq 0x10(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x165b4de movq 0x10(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x165b4dc jmp 0x165b50b movq 0x10(%rsp), %rax movq (%rax), %rax movq %rax, 0x1e8(%rsp) cmpq $0x0, 0x1e8(%rsp) je 0x165b509 movq 0x1e8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x165b50b jmp 0x165b50d movq 0x10(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x165b565 movq %rax, %rdi callq 0x678a0 movq 0x8(%rsp), %rax movq %rax, 0xb0(%rsp) movl $0x0, 0x64(%rsp) movl 0x64(%rsp), %eax addl $0x7, %eax cmpl 0xbc(%rsp), %eax jge 0x165b620 movq 0xb0(%rsp), %rax movq %rax, 0x118(%rsp) movq 0x118(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x40(%rsp) leaq 0xd7(%rsp), %rdi leaq 0x40(%rsp), %rsi callq 0x1661d10 vmovaps %ymm0, 0x40(%rsp) movq 0xb0(%rsp), %rax vmovaps 0x40(%rsp), %ymm0 movq %rax, 0x150(%rsp) vmovaps %ymm0, 0x120(%rsp) vmovaps 0x120(%rsp), %ymm0 movq 0x150(%rsp), %rax vmovups %ymm0, (%rax) movq 0xb0(%rsp), %rax addq $0x20, %rax movq %rax, 0xb0(%rsp) movl 0x64(%rsp), %eax addl $0x8, %eax movl %eax, 0x64(%rsp) jmp 0x165b57a jmp 0x165b622 movl 0x64(%rsp), %eax addl $0x3, %eax cmpl 0xbc(%rsp), %eax jge 0x165b6cb movq 0xb0(%rsp), %rax movq %rax, 0x158(%rsp) movq 0x158(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x30(%rsp) leaq 0xd7(%rsp), %rdi leaq 0x30(%rsp), %rsi vzeroupper callq 0x1661d60 vmovaps %xmm0, 0x30(%rsp) movq 0xb0(%rsp), %rax vmovaps 0x30(%rsp), %xmm0 movq %rax, 0x170(%rsp) vmovaps %xmm0, 0x160(%rsp) vmovaps 0x160(%rsp), %xmm0 movq 0x170(%rsp), %rax vmovaps %xmm0, (%rax) movq 0xb0(%rsp), %rax addq $0x10, %rax movq %rax, 0xb0(%rsp) movl 0x64(%rsp), %eax addl $0x4, %eax movl %eax, 0x64(%rsp) jmp 0x165b622 jmp 0x165b6cd movl 0x64(%rsp), %eax cmpl 0xbc(%rsp), %eax jge 0x165b71f movq 0xb0(%rsp), %rsi leaq 0xd7(%rsp), %rdi vzeroupper callq 0x1661da0 movq 0xb0(%rsp), %rax vmovss %xmm0, (%rax) movq 0xb0(%rsp), %rax addq $0x4, %rax movq %rax, 0xb0(%rsp) movl 0x64(%rsp), %eax addl $0x1, %eax movl %eax, 0x64(%rsp) jmp 0x165b6cd jmp 0x165b721 movl 0xb8(%rsp), %eax addl $0x1, %eax movl %eax, 0xb8(%rsp) jmp 0x165b14a xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_fma.cpp
2,113,326
int ncnn::unary_op_inplace<ncnn::UnaryOp_x86_fma_functor::unary_op_floor>(ncnn::Mat&, ncnn::Option const&)
static int unary_op_inplace(Mat& a, const Option& opt) { Op op; int w = a.w; int h = a.h; int d = a.d; int channels = a.c; int elempack = a.elempack; int size = w * h * d * elempack; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { float* ptr = a.channel(q); int i = 0; #if __SSE2__ #if __AVX__ #if __AVX512F__ for (; i + 15 < size; i += 16) { __m512 _p = _mm512_loadu_ps(ptr); _p = op.func_pack16(_p); _mm512_storeu_ps(ptr, _p); ptr += 16; } #endif // __AVX512F__ for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); _p = op.func_pack8(_p); _mm256_storeu_ps(ptr, _p); ptr += 8; } #endif // __AVX__ for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); _p = op.func_pack4(_p); _mm_store_ps(ptr, _p); ptr += 4; } #endif // __SSE2__ for (; i < size; i++) { *ptr = op.func(*ptr); ptr++; } } return 0; }
pushq %rbp movq %rsp, %rbp andq $-0x20, %rsp subq $0x200, %rsp # imm = 0x200 movq %rdi, 0xe0(%rsp) movq %rsi, 0xd8(%rsp) movq 0xe0(%rsp), %rax movl 0x2c(%rax), %eax movl %eax, 0xd0(%rsp) movq 0xe0(%rsp), %rax movl 0x30(%rax), %eax movl %eax, 0xcc(%rsp) movq 0xe0(%rsp), %rax movl 0x34(%rax), %eax movl %eax, 0xc8(%rsp) movq 0xe0(%rsp), %rax movl 0x38(%rax), %eax movl %eax, 0xc4(%rsp) movq 0xe0(%rsp), %rax movl 0x18(%rax), %eax movl %eax, 0xc0(%rsp) movl 0xd0(%rsp), %eax imull 0xcc(%rsp), %eax imull 0xc8(%rsp), %eax imull 0xc0(%rsp), %eax movl %eax, 0xbc(%rsp) movl $0x0, 0xb8(%rsp) movl 0xb8(%rsp), %eax cmpl 0xc4(%rsp), %eax jge 0x165bde7 movq 0xe0(%rsp), %rcx movl 0xb8(%rsp), %eax leaq 0x68(%rsp), %rdx movq %rdx, 0xf8(%rsp) movq %rcx, 0xf0(%rsp) movl %eax, 0xec(%rsp) movq 0xf0(%rsp), %rax movq %rax, 0x28(%rsp) movb $0x0, 0xeb(%rsp) movl 0x2c(%rax), %r9d movl 0x30(%rax), %r8d movl 0x34(%rax), %edi movq (%rax), %rsi movq 0x40(%rax), %rcx movslq 0xec(%rsp), %rdx imulq %rdx, %rcx imulq 0x10(%rax), %rcx addq %rcx, %rsi movq 0x10(%rax), %rdx movl 0x18(%rax), %ecx movq 0x20(%rax), %rax leaq 0x68(%rsp), %r10 movq %r10, 0x1a8(%rsp) movl %r9d, 0x1a4(%rsp) movl %r8d, 0x1a0(%rsp) movl %edi, 0x19c(%rsp) movq %rsi, 0x190(%rsp) movq %rdx, 0x188(%rsp) movl %ecx, 0x184(%rsp) movq %rax, 0x178(%rsp) movq 0x1a8(%rsp), %rcx movq %rcx, 0x20(%rsp) movq 0x190(%rsp), %rax movq %rax, (%rcx) movq $0x0, 0x8(%rcx) movq 0x188(%rsp), %rax movq %rax, 0x10(%rcx) movl 0x184(%rsp), %eax movl %eax, 0x18(%rcx) movq 0x178(%rsp), %rax movq %rax, 0x20(%rcx) movl $0x3, 0x28(%rcx) movl 0x1a4(%rsp), %eax movl %eax, 0x2c(%rcx) movl 0x1a0(%rsp), %eax movl %eax, 0x30(%rcx) movl $0x1, 0x34(%rcx) movl 0x19c(%rsp), %eax movl %eax, 0x38(%rcx) movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rdx imulq %rdx, %rax imulq 0x10(%rcx), %rax movq %rax, 0x1b8(%rsp) movl $0x10, 0x1b4(%rsp) movq 0x1b8(%rsp), %rax movslq 0x1b4(%rsp), %rdx addq %rdx, %rax subq $0x1, %rax xorl %edx, %edx subl 0x1b4(%rsp), %edx movslq %edx, %rdx andq %rdx, %rax xorl %edx, %edx divq 0x10(%rcx) movq %rax, %rdx movq 0x28(%rsp), %rax movq %rdx, 0x40(%rcx) movl 0x28(%rax), %ecx subl $0x1, %ecx movl %ecx, 0x90(%rsp) cmpl $0x4, 0x28(%rax) jne 0x165b9b8 movq 0x28(%rsp), %rcx movslq 0x2c(%rcx), %rax movslq 0x30(%rcx), %rcx imulq %rcx, %rax movq %rax, 0xa8(%rsp) movb $0x1, 0xeb(%rsp) testb $0x1, 0xeb(%rsp) jne 0x165bae4 leaq 0x68(%rsp), %rax movq %rax, 0x108(%rsp) movq 0x108(%rsp), %rax movq %rax, 0x1d8(%rsp) movq 0x1d8(%rsp), %rax movq %rax, 0x18(%rsp) cmpq $0x0, 0x8(%rax) je 0x165ba8a movq 0x18(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x1d4(%rsp) # imm = 0xFFFFFFFF movl 0x1d4(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x1d0(%rsp) cmpl $0x1, 0x1d0(%rsp) jne 0x165ba8a movq 0x18(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x165ba5b movq 0x18(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x165ba59 jmp 0x165ba88 movq 0x18(%rsp), %rax movq (%rax), %rax movq %rax, 0x1e0(%rsp) cmpq $0x0, 0x1e0(%rsp) je 0x165ba86 movq 0x1e0(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x165ba88 jmp 0x165ba8a movq 0x18(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x165bae2 movq %rax, %rdi callq 0x678a0 jmp 0x165bae4 leaq 0x68(%rsp), %rax movq %rax, 0x100(%rsp) movq 0x100(%rsp), %rax movq (%rax), %rax movq %rax, 0x8(%rsp) leaq 0x68(%rsp), %rax movq %rax, 0x110(%rsp) movq 0x110(%rsp), %rax movq %rax, 0x1c8(%rsp) movq 0x1c8(%rsp), %rax movq %rax, 0x10(%rsp) cmpq $0x0, 0x8(%rax) je 0x165bbbd movq 0x10(%rsp), %rax movq 0x8(%rax), %rcx movl $0xffffffff, 0x1c4(%rsp) # imm = 0xFFFFFFFF movl 0x1c4(%rsp), %eax lock xaddl %eax, (%rcx) movl %eax, 0x1c0(%rsp) cmpl $0x1, 0x1c0(%rsp) jne 0x165bbbd movq 0x10(%rsp), %rax cmpq $0x0, 0x20(%rax) je 0x165bb8e movq 0x10(%rsp), %rax movq (%rax), %rsi movq 0x20(%rax), %rdi movq (%rdi), %rax movq 0x18(%rax), %rax vzeroupper callq *%rax jmp 0x165bb8c jmp 0x165bbbb movq 0x10(%rsp), %rax movq (%rax), %rax movq %rax, 0x1e8(%rsp) cmpq $0x0, 0x1e8(%rsp) je 0x165bbb9 movq 0x1e8(%rsp), %rdi vzeroupper callq 0x5f480 jmp 0x165bbbb jmp 0x165bbbd movq 0x10(%rsp), %rax movq $0x0, (%rax) movq $0x0, 0x10(%rax) movl $0x0, 0x18(%rax) movl $0x0, 0x28(%rax) movl $0x0, 0x2c(%rax) movl $0x0, 0x30(%rax) movl $0x0, 0x34(%rax) movl $0x0, 0x38(%rax) movq $0x0, 0x40(%rax) movq $0x0, 0x8(%rax) jmp 0x165bc15 movq %rax, %rdi callq 0x678a0 movq 0x8(%rsp), %rax movq %rax, 0xb0(%rsp) movl $0x0, 0x64(%rsp) movl 0x64(%rsp), %eax addl $0x7, %eax cmpl 0xbc(%rsp), %eax jge 0x165bcd0 movq 0xb0(%rsp), %rax movq %rax, 0x118(%rsp) movq 0x118(%rsp), %rax vmovups (%rax), %ymm0 vmovaps %ymm0, 0x40(%rsp) leaq 0xd7(%rsp), %rdi leaq 0x40(%rsp), %rsi callq 0x1661dd0 vmovaps %ymm0, 0x40(%rsp) movq 0xb0(%rsp), %rax vmovaps 0x40(%rsp), %ymm0 movq %rax, 0x150(%rsp) vmovaps %ymm0, 0x120(%rsp) vmovaps 0x120(%rsp), %ymm0 movq 0x150(%rsp), %rax vmovups %ymm0, (%rax) movq 0xb0(%rsp), %rax addq $0x20, %rax movq %rax, 0xb0(%rsp) movl 0x64(%rsp), %eax addl $0x8, %eax movl %eax, 0x64(%rsp) jmp 0x165bc2a jmp 0x165bcd2 movl 0x64(%rsp), %eax addl $0x3, %eax cmpl 0xbc(%rsp), %eax jge 0x165bd7b movq 0xb0(%rsp), %rax movq %rax, 0x158(%rsp) movq 0x158(%rsp), %rax vmovaps (%rax), %xmm0 vmovaps %xmm0, 0x30(%rsp) leaq 0xd7(%rsp), %rdi leaq 0x30(%rsp), %rsi vzeroupper callq 0x1661df0 vmovaps %xmm0, 0x30(%rsp) movq 0xb0(%rsp), %rax vmovaps 0x30(%rsp), %xmm0 movq %rax, 0x170(%rsp) vmovaps %xmm0, 0x160(%rsp) vmovaps 0x160(%rsp), %xmm0 movq 0x170(%rsp), %rax vmovaps %xmm0, (%rax) movq 0xb0(%rsp), %rax addq $0x10, %rax movq %rax, 0xb0(%rsp) movl 0x64(%rsp), %eax addl $0x4, %eax movl %eax, 0x64(%rsp) jmp 0x165bcd2 jmp 0x165bd7d movl 0x64(%rsp), %eax cmpl 0xbc(%rsp), %eax jge 0x165bdcf movq 0xb0(%rsp), %rsi leaq 0xd7(%rsp), %rdi vzeroupper callq 0x1661e20 movq 0xb0(%rsp), %rax vmovss %xmm0, (%rax) movq 0xb0(%rsp), %rax addq $0x4, %rax movq %rax, 0xb0(%rsp) movl 0x64(%rsp), %eax addl $0x1, %eax movl %eax, 0x64(%rsp) jmp 0x165bd7d jmp 0x165bdd1 movl 0xb8(%rsp), %eax addl $0x1, %eax movl %eax, 0xb8(%rsp) jmp 0x165b7fa xorl %eax, %eax movq %rbp, %rsp popq %rbp vzeroupper retq nopw %cs:(%rax,%rax)
ysh329[P]ncnn[P]build_O0[P]examples[P]scrfd.asm_src.json
O0
ysh329[P]ncnn/build_O0/src/layer/x86/unaryop_x86_fma.cpp