Loop Id: 66 | Module: exec | Source: advec_cell_kernel.f90:110-155 [...] | Coverage: 3.43% |
---|
Loop Id: 66 | Module: exec | Source: advec_cell_kernel.f90:110-155 [...] | Coverage: 3.43% |
---|
0x4196a0 LEA -0x1(%RAX),%R13 |
0x4196a4 LEA -0x2(%RAX),%R15D |
0x4196a8 MOVSXD %R15D,%RCX |
0x4196ab MOV %R13,%RDX |
0x4196ae MOV 0xe8(%RSP),%RDI [11] |
0x4196b6 VMOVSD (%RBX,%RAX,8),%XMM28 [9] |
0x4196bd VANDPD %XMM6,%XMM27,%XMM1 |
0x4196c3 LEA (%RDI,%R13,1),%R15 |
0x4196c7 MOV 0xd0(%RSP),%RDI [11] |
0x4196cf VDIVSD (%RSI,%R15,8),%XMM1,%XMM2 [3] |
0x4196d5 VADDSD %XMM4,%XMM2,%XMM12 |
0x4196d9 VSUBSD %XMM2,%XMM11,%XMM16 |
0x4196df ADD %RDI,%RDX |
0x4196e2 VMOVQ %XMM9,%RDI |
0x4196e7 VDIVSD (%RDI,%RDX,8),%XMM28,%XMM29 [4] |
0x4196ee LEA (%R10,%RCX,1),%RDI |
0x4196f2 LEA (%R10,%R13,1),%RDX |
0x4196f6 VMULSD %XMM12,%XMM29,%XMM23 |
0x4196fc VMOVSD (%R12,%RDX,8),%XMM17 [12] |
0x419703 VSUBSD (%R12,%RDI,8),%XMM17,%XMM0 [7] |
0x41970a LEA (%R10,%R8,1),%RDI |
0x41970e VMOVSD (%R12,%RDI,8),%XMM15 [5] |
0x419714 VSUBSD %XMM17,%XMM15,%XMM1 |
0x41971a VCMPSD $0x6,%XMM3,%XMM1,%XMM12 |
0x41971f VMULSD %XMM0,%XMM1,%XMM18 |
0x419725 VBLENDVPD %XMM12,%XMM4,%XMM8,%XMM15 |
0x41972b VCOMISD %XMM3,%XMM18 |
0x419731 JBE 419766 |
0x419733 VANDPD %XMM6,%XMM1,%XMM1 |
0x419737 VANDPD %XMM6,%XMM0,%XMM0 |
0x41973b VSUBSD %XMM2,%XMM4,%XMM2 |
0x41973f VMINSD %XMM1,%XMM0,%XMM19 |
0x419745 VMULSD %XMM16,%XMM1,%XMM12 |
0x41974b VFMADD132SD %XMM23,%XMM12,%XMM0 |
0x419751 VMULSD %XMM10,%XMM0,%XMM1 |
0x419756 VMINSD %XMM19,%XMM1,%XMM0 |
0x41975c VMULSD %XMM2,%XMM0,%XMM12 |
0x419760 VFMADD231SD %XMM15,%XMM12,%XMM17 |
0x419766 MOV 0xd8(%RSP),%RDI [11] |
0x41976e ADD %R9,%R13 |
0x419771 ADD %R9,%R8 |
0x419774 ADD %R9,%RCX |
0x419777 VMULSD %XMM27,%XMM17,%XMM0 |
0x41977d VMOVSD %XMM0,(%RDI,%RAX,8) [13] |
0x419782 VMOVSD (%R11,%R13,8),%XMM22 [10] |
0x419789 VMOVSD (%R11,%R8,8),%XMM15 [1] |
0x41978f VSUBSD %XMM22,%XMM15,%XMM1 |
0x419795 VCMPSD $0x6,%XMM3,%XMM1,%XMM12 |
0x41979a VSUBSD (%R11,%RCX,8),%XMM22,%XMM2 [8] |
0x4197a1 VMOVSD (%R12,%RDX,8),%XMM26 [12] |
0x4197a8 VMULSD %XMM2,%XMM1,%XMM15 |
0x4197ac VBLENDVPD %XMM12,%XMM4,%XMM8,%XMM12 |
0x4197b2 VMOVSD (%RSI,%R15,8),%XMM21 [3] |
0x4197b9 VCOMISD %XMM3,%XMM15 |
0x4197bd JBE 419808 |
0x4197bf VANDPD %XMM6,%XMM1,%XMM1 |
0x4197c3 VANDPD %XMM6,%XMM2,%XMM2 |
0x4197c7 VANDPD %XMM6,%XMM0,%XMM24 |
0x4197cd VMULSD %XMM26,%XMM21,%XMM30 |
0x4197d3 VMINSD %XMM1,%XMM2,%XMM20 |
0x4197d9 VMULSD %XMM16,%XMM1,%XMM15 |
0x4197df VFMADD132SD %XMM23,%XMM15,%XMM2 |
0x4197e5 VDIVSD %XMM30,%XMM24,%XMM27 |
0x4197eb VSUBSD %XMM27,%XMM4,%XMM28 |
0x4197f1 VMULSD %XMM10,%XMM2,%XMM1 |
0x4197f6 VMINSD %XMM20,%XMM1,%XMM2 |
0x4197fc VMULSD %XMM2,%XMM28,%XMM15 |
0x419802 VFMADD231SD %XMM12,%XMM15,%XMM22 |
0x419808 MOV 0xe0(%RSP),%R8 [11] |
0x419810 VMULSD %XMM0,%XMM22,%XMM0 |
0x419816 VMOVSD %XMM0,(%R8,%RAX,8) [6] |
0x41981c INC %RAX |
0x41981f CMP %EAX,%R14D |
0x419822 JL 419860 |
0x419824 MOV 0xf0(%RSP),%R13 [11] |
0x41982c MOV %RAX,%R8 |
0x41982f VMOVSD (%R13,%RAX,8),%XMM27 [2] |
0x419837 VCOMISD %XMM3,%XMM27 |
0x41983d JA 4196a0 |
0x419843 LEA 0x1(%RAX),%EDX |
0x419846 MOV %RAX,%R13 |
0x419849 LEA -0x1(%RAX),%R8 |
0x41984d CMP %R14D,%EDX |
0x419850 CMOVG %R14D,%EDX |
0x419854 MOVSXD %EDX,%RDX |
0x419857 MOV %RDX,%RCX |
0x41985a JMP 4196ae |
/beegfs/hackathon/users/eoseret/qaas_runs/170-861-0321/intel/CloverLeafFC/build/CloverLeafFC/CloverLeaf_ref/kernels/advec_cell_kernel.f90: 110 - 155 |
-------------------------------------------------------------------------------- |
110: DO j=x_min,x_max+2 |
111: |
112: IF(vol_flux_x(j,k).GT.0.0)THEN |
113: upwind =j-2 |
114: donor =j-1 |
115: downwind =j |
116: dif =donor |
117: ELSE |
118: upwind =MIN(j+1,x_max+2) |
[...] |
124: sigmat=ABS(vol_flux_x(j,k))/pre_vol(donor,k) |
125: sigma3=(1.0_8+sigmat)*(vertexdx(j)/vertexdx(dif)) |
126: sigma4=2.0_8-sigmat |
127: |
128: sigma=sigmat |
129: sigmav=sigmat |
130: |
131: diffuw=density1(donor,k)-density1(upwind,k) |
132: diffdw=density1(downwind,k)-density1(donor,k) |
133: wind=1.0_8 |
134: IF(diffdw.LE.0.0) wind=-1.0_8 |
135: IF(diffuw*diffdw.GT.0.0)THEN |
136: limiter=(1.0_8-sigmav)*wind*MIN(ABS(diffuw),ABS(diffdw)& |
137: ,one_by_six*(sigma3*ABS(diffuw)+sigma4*ABS(diffdw))) |
138: ELSE |
139: limiter=0.0 |
140: ENDIF |
141: mass_flux_x(j,k)=vol_flux_x(j,k)*(density1(donor,k)+limiter) |
142: |
143: sigmam=ABS(mass_flux_x(j,k))/(density1(donor,k)*pre_vol(donor,k)) |
144: diffuw=energy1(donor,k)-energy1(upwind,k) |
145: diffdw=energy1(downwind,k)-energy1(donor,k) |
146: wind=1.0_8 |
147: IF(diffdw.LE.0.0) wind=-1.0_8 |
148: IF(diffuw*diffdw.GT.0.0)THEN |
149: limiter=(1.0_8-sigmam)*wind*MIN(ABS(diffuw),ABS(diffdw)& |
150: ,one_by_six*(sigma3*ABS(diffuw)+sigma4*ABS(diffdw))) |
151: ELSE |
152: limiter=0.0 |
153: ENDIF |
154: |
155: ener_flux(j,k)=mass_flux_x(j,k)*(energy1(donor,k)+limiter) |
Path / |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 2.73 |
CQA speedup if fully vectorized | 4.00 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.36 |
Bottlenecks | micro-operation queue, P8, P9, |
Function | __advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0 |
Source | advec_cell_kernel.f90:110-118,advec_cell_kernel.f90:124-155 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 15.00 |
CQA cycles if no scalar integer | 15.00 |
CQA cycles if FP arith vectorized | 5.50 |
CQA cycles if fully vectorized | 3.75 |
Front-end cycles | 15.00 |
DIV/SQRT cycles | 5.25 |
P0 cycles | 5.25 |
P1 cycles | 5.00 |
P2 cycles | 5.00 |
P3 cycles | 2.50 |
P4 cycles | 6.33 |
P5 cycles | 6.33 |
P6 cycles | 6.33 |
P7 cycles | 11.00 |
P8 cycles | 11.00 |
P9 cycles | 11.00 |
P10 cycles | 11.00 |
P11 cycles | 3.00 |
P12 cycles | 3.00 |
P13 cycles | 15.00 |
Inter-iter dependencies cycles | NA |
FE+BE cycles (UFS) | NA |
Stall cycles (UFS) | NA |
Nb insns | 87.00 |
Nb uops | 90.00 |
Nb loads | 17.00 |
Nb stores | 2.00 |
Nb stack references | 5.00 |
FLOP/cycle | 2.07 |
Nb FLOP add-sub | 8.00 |
Nb FLOP mul | 12.00 |
Nb FLOP fma | 4.00 |
Nb FLOP div | 3.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 10.13 |
Bytes prefetched | 0.00 |
Bytes loaded | 136.00 |
Bytes stored | 16.00 |
Stride 0 | NA |
Stride 1 | NA |
Stride n | NA |
Stride unknown | NA |
Stride indirect | NA |
Vectorization ratio all | 14.29 |
Vectorization ratio load | 0.00 |
Vectorization ratio store | 0.00 |
Vectorization ratio mul | 0.00 |
Vectorization ratio add_sub | 0.00 |
Vectorization ratio fma | 0.00 |
Vectorization ratio div_sqrt | 0.00 |
Vectorization ratio other | 42.11 |
Vector-efficiency ratio all | 14.17 |
Vector-efficiency ratio load | 12.50 |
Vector-efficiency ratio store | 12.50 |
Vector-efficiency ratio mul | 12.50 |
Vector-efficiency ratio add_sub | 12.50 |
Vector-efficiency ratio fma | 12.50 |
Vector-efficiency ratio div_sqrt | 12.50 |
Vector-efficiency ratio other | 17.43 |
Metric | Value |
---|---|
CQA speedup if no scalar integer | 1.00 |
CQA speedup if FP arith vectorized | 2.73 |
CQA speedup if fully vectorized | 4.00 |
CQA speedup if no inter-iteration dependency | NA |
CQA speedup if next bottleneck killed | 1.36 |
Bottlenecks | micro-operation queue, P8, P9, |
Function | __advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0 |
Source | advec_cell_kernel.f90:110-118,advec_cell_kernel.f90:124-155 |
Source loop unroll info | not unrolled or unrolled with no peel/tail loop |
Source loop unroll confidence level | max |
Unroll/vectorization loop type | NA |
Unroll factor | NA |
CQA cycles | 15.00 |
CQA cycles if no scalar integer | 15.00 |
CQA cycles if FP arith vectorized | 5.50 |
CQA cycles if fully vectorized | 3.75 |
Front-end cycles | 15.00 |
DIV/SQRT cycles | 5.25 |
P0 cycles | 5.25 |
P1 cycles | 5.00 |
P2 cycles | 5.00 |
P3 cycles | 2.50 |
P4 cycles | 6.33 |
P5 cycles | 6.33 |
P6 cycles | 6.33 |
P7 cycles | 11.00 |
P8 cycles | 11.00 |
P9 cycles | 11.00 |
P10 cycles | 11.00 |
P11 cycles | 3.00 |
P12 cycles | 3.00 |
P13 cycles | 15.00 |
Inter-iter dependencies cycles | NA |
FE+BE cycles (UFS) | NA |
Stall cycles (UFS) | NA |
Nb insns | 87.00 |
Nb uops | 90.00 |
Nb loads | 17.00 |
Nb stores | 2.00 |
Nb stack references | 5.00 |
FLOP/cycle | 2.07 |
Nb FLOP add-sub | 8.00 |
Nb FLOP mul | 12.00 |
Nb FLOP fma | 4.00 |
Nb FLOP div | 3.00 |
Nb FLOP rcp | 0.00 |
Nb FLOP sqrt | 0.00 |
Nb FLOP rsqrt | 0.00 |
Bytes/cycle | 10.13 |
Bytes prefetched | 0.00 |
Bytes loaded | 136.00 |
Bytes stored | 16.00 |
Stride 0 | NA |
Stride 1 | NA |
Stride n | NA |
Stride unknown | NA |
Stride indirect | NA |
Vectorization ratio all | 14.29 |
Vectorization ratio load | 0.00 |
Vectorization ratio store | 0.00 |
Vectorization ratio mul | 0.00 |
Vectorization ratio add_sub | 0.00 |
Vectorization ratio fma | 0.00 |
Vectorization ratio div_sqrt | 0.00 |
Vectorization ratio other | 42.11 |
Vector-efficiency ratio all | 14.17 |
Vector-efficiency ratio load | 12.50 |
Vector-efficiency ratio store | 12.50 |
Vector-efficiency ratio mul | 12.50 |
Vector-efficiency ratio add_sub | 12.50 |
Vector-efficiency ratio fma | 12.50 |
Vector-efficiency ratio div_sqrt | 12.50 |
Vector-efficiency ratio other | 17.43 |
Path / |
Function | __advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0 |
Source file and lines | advec_cell_kernel.f90:110-155 |
Module | exec |
nb instructions | 87 |
nb uops | 90 |
loop length | 447 |
used x86 registers | 15 |
used mmx registers | 0 |
used xmm registers | 26 |
used ymm registers | 0 |
used zmm registers | 0 |
nb stack references | 5 |
ADD-SUB / MUL ratio | 0.67 |
micro-operation queue | 15.00 cycles |
front end | 15.00 cycles |
ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 5.25 | 5.25 | 5.00 | 5.00 | 2.50 | 6.33 | 6.33 | 6.33 | 11.00 | 11.00 | 11.00 | 11.00 | 3.00 | 3.00 |
cycles | 5.25 | 5.25 | 5.00 | 5.00 | 2.50 | 6.33 | 6.33 | 6.33 | 11.00 | 11.00 | 11.00 | 11.00 | 3.00 | 3.00 |
Cycles executing div or sqrt instructions | 15.00 |
Front-end | 15.00 |
Dispatch | 11.00 |
DIV/SQRT | 15.00 |
Overall L1 | 15.00 |
all | 0% |
load | NA (no load vectorizable/vectorized instructions) |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 0% |
all | 14% |
load | 0% |
store | 0% |
mul | 0% |
add-sub | 0% |
fma | 0% |
div/sqrt | 0% |
other | 47% |
all | 14% |
load | 0% |
store | 0% |
mul | 0% |
add-sub | 0% |
fma | 0% |
div/sqrt | 0% |
other | 42% |
all | 9% |
load | NA (no load vectorizable/vectorized instructions) |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 9% |
all | 14% |
load | 12% |
store | 12% |
mul | 12% |
add-sub | 12% |
fma | 12% |
div/sqrt | 12% |
other | 18% |
all | 14% |
load | 12% |
store | 12% |
mul | 12% |
add-sub | 12% |
fma | 12% |
div/sqrt | 12% |
other | 17% |
Instruction | Nb FU | ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
LEA -0x1(%RAX),%R13 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
LEA -0x2(%RAX),%R15D | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOVSXD %R15D,%RCX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOV %R13,%RDX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
MOV 0xe8(%RSP),%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
VMOVSD (%RBX,%RAX,8),%XMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %XMM6,%XMM27,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
LEA (%RDI,%R13,1),%R15 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOV 0xd0(%RSP),%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
VDIVSD (%RSI,%R15,8),%XMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 13 | 5 |
VADDSD %XMM4,%XMM2,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VSUBSD %XMM2,%XMM11,%XMM16 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
ADD %RDI,%RDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
VMOVQ %XMM9,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 |
VDIVSD (%RDI,%RDX,8),%XMM28,%XMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 13 | 5 |
LEA (%R10,%RCX,1),%RDI | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
LEA (%R10,%R13,1),%RDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
VMULSD %XMM12,%XMM29,%XMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVSD (%R12,%RDX,8),%XMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VSUBSD (%R12,%RDI,8),%XMM17,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
LEA (%R10,%R8,1),%RDI | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
VMOVSD (%R12,%RDI,8),%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VSUBSD %XMM17,%XMM15,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VCMPSD $0x6,%XMM3,%XMM1,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM0,%XMM1,%XMM18 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VBLENDVPD %XMM12,%XMM4,%XMM8,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCOMISD %XMM3,%XMM18 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0.50 | 6 | 1 |
JBE 419766 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x1376> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 |
VANDPD %XMM6,%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VANDPD %XMM6,%XMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VSUBSD %XMM2,%XMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VMINSD %XMM1,%XMM0,%XMM19 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM16,%XMM1,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD132SD %XMM23,%XMM12,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULSD %XMM10,%XMM0,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMINSD %XMM19,%XMM1,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM2,%XMM0,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD231SD %XMM15,%XMM12,%XMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 0.50 |
MOV 0xd8(%RSP),%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
ADD %R9,%R13 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
ADD %R9,%R8 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
ADD %R9,%RCX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
VMULSD %XMM27,%XMM17,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVSD %XMM0,(%RDI,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 |
VMOVSD (%R11,%R13,8),%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMOVSD (%R11,%R8,8),%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VSUBSD %XMM22,%XMM15,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VCMPSD $0x6,%XMM3,%XMM1,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VSUBSD (%R11,%RCX,8),%XMM22,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VMOVSD (%R12,%RDX,8),%XMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMULSD %XMM2,%XMM1,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VBLENDVPD %XMM12,%XMM4,%XMM8,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMOVSD (%RSI,%R15,8),%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCOMISD %XMM3,%XMM15 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0.50 | 6 | 1 |
JBE 419808 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x1418> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 |
VANDPD %XMM6,%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VANDPD %XMM6,%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VANDPD %XMM6,%XMM0,%XMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VMULSD %XMM26,%XMM21,%XMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMINSD %XMM1,%XMM2,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM16,%XMM1,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD132SD %XMM23,%XMM15,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VDIVSD %XMM30,%XMM24,%XMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 13 | 5 |
VSUBSD %XMM27,%XMM4,%XMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VMULSD %XMM10,%XMM2,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMINSD %XMM20,%XMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM2,%XMM28,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD231SD %XMM12,%XMM15,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 0.50 |
MOV 0xe0(%RSP),%R8 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
VMULSD %XMM0,%XMM22,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVSD %XMM0,(%R8,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 |
INC %RAX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
CMP %EAX,%R14D | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
JL 419860 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x1470> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 |
MOV 0xf0(%RSP),%R13 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
MOV %RAX,%R8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VMOVSD (%R13,%RAX,8),%XMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCOMISD %XMM3,%XMM27 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0.50 | 6 | 1 |
JA 4196a0 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x12b0> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 |
LEA 0x1(%RAX),%EDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOV %RAX,%R13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
LEA -0x1(%RAX),%R8 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
CMP %R14D,%EDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
CMOVG %R14D,%EDX | 1 | 0.50 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
MOVSXD %EDX,%RDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOV %RDX,%RCX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
JMP 4196ae <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x12be> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Function | __advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0 |
Source file and lines | advec_cell_kernel.f90:110-155 |
Module | exec |
nb instructions | 87 |
nb uops | 90 |
loop length | 447 |
used x86 registers | 15 |
used mmx registers | 0 |
used xmm registers | 26 |
used ymm registers | 0 |
used zmm registers | 0 |
nb stack references | 5 |
ADD-SUB / MUL ratio | 0.67 |
micro-operation queue | 15.00 cycles |
front end | 15.00 cycles |
ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
uops | 5.25 | 5.25 | 5.00 | 5.00 | 2.50 | 6.33 | 6.33 | 6.33 | 11.00 | 11.00 | 11.00 | 11.00 | 3.00 | 3.00 |
cycles | 5.25 | 5.25 | 5.00 | 5.00 | 2.50 | 6.33 | 6.33 | 6.33 | 11.00 | 11.00 | 11.00 | 11.00 | 3.00 | 3.00 |
Cycles executing div or sqrt instructions | 15.00 |
Front-end | 15.00 |
Dispatch | 11.00 |
DIV/SQRT | 15.00 |
Overall L1 | 15.00 |
all | 0% |
load | NA (no load vectorizable/vectorized instructions) |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 0% |
all | 14% |
load | 0% |
store | 0% |
mul | 0% |
add-sub | 0% |
fma | 0% |
div/sqrt | 0% |
other | 47% |
all | 14% |
load | 0% |
store | 0% |
mul | 0% |
add-sub | 0% |
fma | 0% |
div/sqrt | 0% |
other | 42% |
all | 9% |
load | NA (no load vectorizable/vectorized instructions) |
store | NA (no store vectorizable/vectorized instructions) |
mul | NA (no mul vectorizable/vectorized instructions) |
add-sub | NA (no add-sub vectorizable/vectorized instructions) |
fma | NA (no fma vectorizable/vectorized instructions) |
other | 9% |
all | 14% |
load | 12% |
store | 12% |
mul | 12% |
add-sub | 12% |
fma | 12% |
div/sqrt | 12% |
other | 18% |
all | 14% |
load | 12% |
store | 12% |
mul | 12% |
add-sub | 12% |
fma | 12% |
div/sqrt | 12% |
other | 17% |
Instruction | Nb FU | ALU0/BRU0 | ALU1 | ALU2 | ALU3 | BRU1 | AGU0 | AGU1 | AGU2 | FP0 | FP1 | FP2 | FP3 | FP4 | FP5 | Latency | Recip. throughput |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
LEA -0x1(%RAX),%R13 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
LEA -0x2(%RAX),%R15D | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOVSXD %R15D,%RCX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOV %R13,%RDX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
MOV 0xe8(%RSP),%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
VMOVSD (%RBX,%RAX,8),%XMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VANDPD %XMM6,%XMM27,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
LEA (%RDI,%R13,1),%R15 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOV 0xd0(%RSP),%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
VDIVSD (%RSI,%R15,8),%XMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 13 | 5 |
VADDSD %XMM4,%XMM2,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VSUBSD %XMM2,%XMM11,%XMM16 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
ADD %RDI,%RDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
VMOVQ %XMM9,%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 |
VDIVSD (%RDI,%RDX,8),%XMM28,%XMM29 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 13 | 5 |
LEA (%R10,%RCX,1),%RDI | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
LEA (%R10,%R13,1),%RDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
VMULSD %XMM12,%XMM29,%XMM23 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVSD (%R12,%RDX,8),%XMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VSUBSD (%R12,%RDI,8),%XMM17,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
LEA (%R10,%R8,1),%RDI | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
VMOVSD (%R12,%RDI,8),%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VSUBSD %XMM17,%XMM15,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VCMPSD $0x6,%XMM3,%XMM1,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM0,%XMM1,%XMM18 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VBLENDVPD %XMM12,%XMM4,%XMM8,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCOMISD %XMM3,%XMM18 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0.50 | 6 | 1 |
JBE 419766 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x1376> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 |
VANDPD %XMM6,%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VANDPD %XMM6,%XMM0,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VSUBSD %XMM2,%XMM4,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VMINSD %XMM1,%XMM0,%XMM19 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM16,%XMM1,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD132SD %XMM23,%XMM12,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VMULSD %XMM10,%XMM0,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMINSD %XMM19,%XMM1,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM2,%XMM0,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD231SD %XMM15,%XMM12,%XMM17 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 0.50 |
MOV 0xd8(%RSP),%RDI | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
ADD %R9,%R13 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
ADD %R9,%R8 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
ADD %R9,%RCX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
VMULSD %XMM27,%XMM17,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVSD %XMM0,(%RDI,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 |
VMOVSD (%R11,%R13,8),%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMOVSD (%R11,%R8,8),%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VSUBSD %XMM22,%XMM15,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VCMPSD $0x6,%XMM3,%XMM1,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VSUBSD (%R11,%RCX,8),%XMM22,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VMOVSD (%R12,%RDX,8),%XMM26 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMULSD %XMM2,%XMM1,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VBLENDVPD %XMM12,%XMM4,%XMM8,%XMM12 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VMOVSD (%RSI,%R15,8),%XMM21 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCOMISD %XMM3,%XMM15 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0.50 | 6 | 1 |
JBE 419808 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x1418> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 |
VANDPD %XMM6,%XMM1,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VANDPD %XMM6,%XMM2,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VANDPD %XMM6,%XMM0,%XMM24 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 1 | 0.25 |
VMULSD %XMM26,%XMM21,%XMM30 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMINSD %XMM1,%XMM2,%XMM20 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM16,%XMM1,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD132SD %XMM23,%XMM15,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 0.50 |
VDIVSD %XMM30,%XMM24,%XMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 13 | 5 |
VSUBSD %XMM27,%XMM4,%XMM28 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 3 | 0.50 |
VMULSD %XMM10,%XMM2,%XMM1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMINSD %XMM20,%XMM1,%XMM2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 2 | 0.50 |
VMULSD %XMM2,%XMM28,%XMM15 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VFMADD231SD %XMM12,%XMM15,%XMM22 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 4 | 0.50 |
MOV 0xe0(%RSP),%R8 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
VMULSD %XMM0,%XMM22,%XMM0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0 | 0 | 0 | 0 | 3 | 0.50 |
VMOVSD %XMM0,(%R8,%RAX,8) | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 1 | 1 |
INC %RAX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
CMP %EAX,%R14D | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
JL 419860 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x1470> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 |
MOV 0xf0(%RSP),%R13 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.33 |
MOV %RAX,%R8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
VMOVSD (%R13,%RAX,8),%XMM27 | 1 | 0 | 0 | 0 | 0 | 0 | 0.33 | 0.33 | 0.33 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
VCOMISD %XMM3,%XMM27 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.50 | 0.50 | 0.50 | 0.50 | 6 | 1 |
JA 4196a0 <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x12b0> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50-1 |
LEA 0x1(%RAX),%EDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOV %RAX,%R13 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
LEA -0x1(%RAX),%R8 | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
CMP %R14D,%EDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
CMOVG %R14D,%EDX | 1 | 0.50 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.50 |
MOVSXD %EDX,%RDX | 1 | 0.25 | 0.25 | 0.25 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 |
MOV %RDX,%RCX | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.17 |
JMP 4196ae <__advec_cell_kernel_module_MOD_advec_cell_kernel._omp_fn.0+0x12be> | 1 | 0.50 | 0 | 0 | 0 | 0.50 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |