Lines Matching refs:r13

481         cmp     $128, %r13
1146 # clobbering r10, r11, r12, r13, r14, r15
1151 push %r13
1166 mov arg4, %r13 # save the number of bytes of plaintext/ciphertext
1167 and $-16, %r13 # r13 = r13 - (r13 mod 16)
1169 mov %r13, %r12
1191 sub $16*7, %r13
1196 sub $16*6, %r13
1201 sub $16*5, %r13
1206 sub $16*4, %r13
1211 sub $16*3, %r13
1216 sub $16*2, %r13
1221 sub $16*1, %r13
1229 cmp $0, %r13
1232 sub $128, %r13
1252 sub $128, %r13
1264 sub $128, %r13
1280 mov arg4, %r13
1281 and $15, %r13 # r13 = (arg4 mod 16)
1293 add %r13, %r11
1297 sub %r13, %r12 # adjust the shuffle mask pointer to be
1298 # able to shift 16-r13 bytes (r13 is the
1301 vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes
1306 mov arg4, %r13
1307 and $15, %r13 # r13 = (arg4 mod 16)
1320 sub %r13, %r12 # adjust the shuffle mask pointer to be
1321 # able to shift 16-r13 bytes (r13 is the
1328 cmp %r13, %r11
1340 # mask out top 16-r13 bytes of xmm9
1341 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
1347 sub %r13, %r11
1352 # mask out top 16-r13 bytes of xmm9
1353 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
1358 sub %r13, %r11
1365 # output r13 Bytes
1367 cmp $8, %r13
1374 sub $8, %r13
1380 sub $1, %r13
1437 pop %r13
1450 push %r13
1487 pop %r13
1741 cmp $128, %r13
2423 # clobbering r10, r11, r12, r13, r14, r15
2428 push %r13
2443 … mov arg4, %r13 # save the number of bytes of plaintext/ciphertext
2444 and $-16, %r13 # r13 = r13 - (r13 mod 16)
2446 mov %r13, %r12
2468 sub $16*7, %r13
2473 sub $16*6, %r13
2478 sub $16*5, %r13
2483 sub $16*4, %r13
2488 sub $16*3, %r13
2493 sub $16*2, %r13
2498 sub $16*1, %r13
2506 cmp $0, %r13
2509 sub $128, %r13
2529 sub $128, %r13
2541 sub $128, %r13
2557 mov arg4, %r13
2558 and $15, %r13 # r13 = (arg4 mod 16)
2570 add %r13, %r11
2574 sub %r13, %r12 # adjust the shuffle mask pointer
2575 # to be able to shift 16-r13 bytes
2576 # (r13 is the number of bytes in plaintext mod 16)
2578 vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes
2583 mov arg4, %r13
2584 and $15, %r13 # r13 = (arg4 mod 16)
2597 sub %r13, %r12 # adjust the shuffle mask pointer to be
2598 # able to shift 16-r13 bytes (r13 is the
2605 cmp %r13, %r11
2616 …LL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9
2617 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
2623 sub %r13, %r11
2627 …LL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9
2628 vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
2633 sub %r13, %r11
2640 # output r13 Bytes
2642 cmp $8, %r13
2649 sub $8, %r13
2655 sub $1, %r13
2712 pop %r13
2726 push %r13
2763 pop %r13