Lines Matching +full:- +full:t

4  * \brief Multi-precision integer library
8 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
16 * . IA-32 (386+) . AMD64 / EM64T
17 * . IA-32 (SSE2) . Motorola 68000
18 * . PowerPC, 32-bit . MicroBlaze
19 * . PowerPC, 64-bit . TriCore
51 #else /* 64-bits */
71 /* *INDENT-OFF* */
74 /* armcc5 --gnu defines __GNUC__ but doesn't support GNU's extended asm */
86 * instead of using a fixed register, was implemented on x86/x86-64
95 * Disable use of the i386 assembly code below if option -O0, to disable all
97 * This is done as the number of registers used in the assembly code doesn't
98 * work with the -O0 option.
103 { mbedtls_mpi_uint t; \
105 "movl %%ebx, %0 \n\t" \
106 "movl %5, %%esi \n\t" \
107 "movl %6, %%edi \n\t" \
108 "movl %7, %%ecx \n\t" \
109 "movl %8, %%ebx \n\t"
112 "lodsl \n\t" \
113 "mull %%ebx \n\t" \
114 "addl %%ecx, %%eax \n\t" \
115 "adcl $0, %%edx \n\t" \
116 "addl (%%edi), %%eax \n\t" \
117 "adcl $0, %%edx \n\t" \
118 "movl %%edx, %%ecx \n\t" \
119 "stosl \n\t"
122 "movl %4, %%ebx \n\t" \
123 "movl %%ecx, %1 \n\t" \
124 "movl %%edi, %2 \n\t" \
125 "movl %%esi, %3 \n\t" \
126 : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
127 : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
136 "movd %%ecx, %%mm1 \n\t" \
137 "movd %%ebx, %%mm0 \n\t" \
138 "movd (%%edi), %%mm3 \n\t" \
139 "paddq %%mm3, %%mm1 \n\t" \
140 "movd (%%esi), %%mm2 \n\t" \
141 "pmuludq %%mm0, %%mm2 \n\t" \
142 "movd 4(%%esi), %%mm4 \n\t" \
143 "pmuludq %%mm0, %%mm4 \n\t" \
144 "movd 8(%%esi), %%mm6 \n\t" \
145 "pmuludq %%mm0, %%mm6 \n\t" \
146 "movd 12(%%esi), %%mm7 \n\t" \
147 "pmuludq %%mm0, %%mm7 \n\t" \
148 "paddq %%mm2, %%mm1 \n\t" \
149 "movd 4(%%edi), %%mm3 \n\t" \
150 "paddq %%mm4, %%mm3 \n\t" \
151 "movd 8(%%edi), %%mm5 \n\t" \
152 "paddq %%mm6, %%mm5 \n\t" \
153 "movd 12(%%edi), %%mm4 \n\t" \
154 "paddq %%mm4, %%mm7 \n\t" \
155 "movd %%mm1, (%%edi) \n\t" \
156 "movd 16(%%esi), %%mm2 \n\t" \
157 "pmuludq %%mm0, %%mm2 \n\t" \
158 "psrlq $32, %%mm1 \n\t" \
159 "movd 20(%%esi), %%mm4 \n\t" \
160 "pmuludq %%mm0, %%mm4 \n\t" \
161 "paddq %%mm3, %%mm1 \n\t" \
162 "movd 24(%%esi), %%mm6 \n\t" \
163 "pmuludq %%mm0, %%mm6 \n\t" \
164 "movd %%mm1, 4(%%edi) \n\t" \
165 "psrlq $32, %%mm1 \n\t" \
166 "movd 28(%%esi), %%mm3 \n\t" \
167 "pmuludq %%mm0, %%mm3 \n\t" \
168 "paddq %%mm5, %%mm1 \n\t" \
169 "movd 16(%%edi), %%mm5 \n\t" \
170 "paddq %%mm5, %%mm2 \n\t" \
171 "movd %%mm1, 8(%%edi) \n\t" \
172 "psrlq $32, %%mm1 \n\t" \
173 "paddq %%mm7, %%mm1 \n\t" \
174 "movd 20(%%edi), %%mm5 \n\t" \
175 "paddq %%mm5, %%mm4 \n\t" \
176 "movd %%mm1, 12(%%edi) \n\t" \
177 "psrlq $32, %%mm1 \n\t" \
178 "paddq %%mm2, %%mm1 \n\t" \
179 "movd 24(%%edi), %%mm5 \n\t" \
180 "paddq %%mm5, %%mm6 \n\t" \
181 "movd %%mm1, 16(%%edi) \n\t" \
182 "psrlq $32, %%mm1 \n\t" \
183 "paddq %%mm4, %%mm1 \n\t" \
184 "movd 28(%%edi), %%mm5 \n\t" \
185 "paddq %%mm5, %%mm3 \n\t" \
186 "movd %%mm1, 20(%%edi) \n\t" \
187 "psrlq $32, %%mm1 \n\t" \
188 "paddq %%mm6, %%mm1 \n\t" \
189 "movd %%mm1, 24(%%edi) \n\t" \
190 "psrlq $32, %%mm1 \n\t" \
191 "paddq %%mm3, %%mm1 \n\t" \
192 "movd %%mm1, 28(%%edi) \n\t" \
193 "addl $32, %%edi \n\t" \
194 "addl $32, %%esi \n\t" \
195 "psrlq $32, %%mm1 \n\t" \
196 "movd %%mm1, %%ecx \n\t"
199 "emms \n\t" \
200 "movl %4, %%ebx \n\t" \
201 "movl %%ecx, %1 \n\t" \
202 "movl %%edi, %2 \n\t" \
203 "movl %%esi, %3 \n\t" \
204 : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
205 : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
239 // The following assembly code assumes that a pointer will fit in a 64-bit register
240 // (including ILP32 __aarch64__ ABIs such as on watchOS, hence the 2^32 - 1)
254 "ldr x4, [%x2], #8 \n\t" \
255 "ldr x5, [%x1] \n\t" \
256 "mul x6, x4, %4 \n\t" \
257 "umulh x7, x4, %4 \n\t" \
258 "adds x5, x5, x6 \n\t" \
259 "adc x7, x7, xzr \n\t" \
260 "adds x5, x5, %0 \n\t" \
261 "adc %0, x7, xzr \n\t" \
262 "str x5, [%x1], #8 \n\t"
279 "movl %3, %%a2 \n\t" \
280 "movl %4, %%a3 \n\t" \
281 "movl %5, %%d3 \n\t" \
282 "movl %6, %%d2 \n\t" \
283 "moveq #0, %%d0 \n\t"
286 "movel %%a2@+, %%d1 \n\t" \
287 "mulul %%d2, %%d4:%%d1 \n\t" \
288 "addl %%d3, %%d1 \n\t" \
289 "addxl %%d0, %%d4 \n\t" \
290 "moveq #0, %%d3 \n\t" \
291 "addl %%d1, %%a3@+ \n\t" \
292 "addxl %%d4, %%d3 \n\t"
295 "movl %%d3, %0 \n\t" \
296 "movl %%a3, %1 \n\t" \
297 "movl %%a2, %2 \n\t" \
306 "movel %%a2@+, %%d1 \n\t" \
307 "mulul %%d2, %%d4:%%d1 \n\t" \
308 "addxl %%d3, %%d1 \n\t" \
309 "addxl %%d0, %%d4 \n\t" \
310 "addl %%d1, %%a3@+ \n\t" \
311 "movel %%a2@+, %%d1 \n\t" \
312 "mulul %%d2, %%d3:%%d1 \n\t" \
313 "addxl %%d4, %%d1 \n\t" \
314 "addxl %%d0, %%d3 \n\t" \
315 "addl %%d1, %%a3@+ \n\t" \
316 "movel %%a2@+, %%d1 \n\t" \
317 "mulul %%d2, %%d4:%%d1 \n\t" \
318 "addxl %%d3, %%d1 \n\t" \
319 "addxl %%d0, %%d4 \n\t" \
320 "addl %%d1, %%a3@+ \n\t" \
321 "movel %%a2@+, %%d1 \n\t" \
322 "mulul %%d2, %%d3:%%d1 \n\t" \
323 "addxl %%d4, %%d1 \n\t" \
324 "addxl %%d0, %%d3 \n\t" \
325 "addl %%d1, %%a3@+ \n\t" \
326 "movel %%a2@+, %%d1 \n\t" \
327 "mulul %%d2, %%d4:%%d1 \n\t" \
328 "addxl %%d3, %%d1 \n\t" \
329 "addxl %%d0, %%d4 \n\t" \
330 "addl %%d1, %%a3@+ \n\t" \
331 "movel %%a2@+, %%d1 \n\t" \
332 "mulul %%d2, %%d3:%%d1 \n\t" \
333 "addxl %%d4, %%d1 \n\t" \
334 "addxl %%d0, %%d3 \n\t" \
335 "addl %%d1, %%a3@+ \n\t" \
336 "movel %%a2@+, %%d1 \n\t" \
337 "mulul %%d2, %%d4:%%d1 \n\t" \
338 "addxl %%d3, %%d1 \n\t" \
339 "addxl %%d0, %%d4 \n\t" \
340 "addl %%d1, %%a3@+ \n\t" \
341 "movel %%a2@+, %%d1 \n\t" \
342 "mulul %%d2, %%d3:%%d1 \n\t" \
343 "addxl %%d4, %%d1 \n\t" \
344 "addxl %%d0, %%d3 \n\t" \
345 "addl %%d1, %%a3@+ \n\t" \
346 "addxl %%d0, %%d3 \n\t"
358 "ld r3, %3 \n\t" \
359 "ld r4, %4 \n\t" \
360 "ld r5, %5 \n\t" \
361 "ld r6, %6 \n\t" \
362 "addi r3, r3, -8 \n\t" \
363 "addi r4, r4, -8 \n\t" \
364 "addic r5, r5, 0 \n\t"
367 "ldu r7, 8(r3) \n\t" \
368 "mulld r8, r7, r6 \n\t" \
369 "mulhdu r9, r7, r6 \n\t" \
370 "adde r8, r8, r5 \n\t" \
371 "ld r7, 8(r4) \n\t" \
372 "addze r5, r9 \n\t" \
373 "addc r8, r8, r7 \n\t" \
374 "stdu r8, 8(r4) \n\t"
377 "addze r5, r5 \n\t" \
378 "addi r4, r4, 8 \n\t" \
379 "addi r3, r3, 8 \n\t" \
380 "std r5, %0 \n\t" \
381 "std r4, %1 \n\t" \
382 "std r3, %2 \n\t" \
393 "ld %%r3, %3 \n\t" \
394 "ld %%r4, %4 \n\t" \
395 "ld %%r5, %5 \n\t" \
396 "ld %%r6, %6 \n\t" \
397 "addi %%r3, %%r3, -8 \n\t" \
398 "addi %%r4, %%r4, -8 \n\t" \
399 "addic %%r5, %%r5, 0 \n\t"
402 "ldu %%r7, 8(%%r3) \n\t" \
403 "mulld %%r8, %%r7, %%r6 \n\t" \
404 "mulhdu %%r9, %%r7, %%r6 \n\t" \
405 "adde %%r8, %%r8, %%r5 \n\t" \
406 "ld %%r7, 8(%%r4) \n\t" \
407 "addze %%r5, %%r9 \n\t" \
408 "addc %%r8, %%r8, %%r7 \n\t" \
409 "stdu %%r8, 8(%%r4) \n\t"
412 "addze %%r5, %%r5 \n\t" \
413 "addi %%r4, %%r4, 8 \n\t" \
414 "addi %%r3, %%r3, 8 \n\t" \
415 "std %%r5, %0 \n\t" \
416 "std %%r4, %1 \n\t" \
417 "std %%r3, %2 \n\t" \
431 "lwz r3, %3 \n\t" \
432 "lwz r4, %4 \n\t" \
433 "lwz r5, %5 \n\t" \
434 "lwz r6, %6 \n\t" \
435 "addi r3, r3, -4 \n\t" \
436 "addi r4, r4, -4 \n\t" \
437 "addic r5, r5, 0 \n\t"
440 "lwzu r7, 4(r3) \n\t" \
441 "mullw r8, r7, r6 \n\t" \
442 "mulhwu r9, r7, r6 \n\t" \
443 "adde r8, r8, r5 \n\t" \
444 "lwz r7, 4(r4) \n\t" \
445 "addze r5, r9 \n\t" \
446 "addc r8, r8, r7 \n\t" \
447 "stwu r8, 4(r4) \n\t"
450 "addze r5, r5 \n\t" \
451 "addi r4, r4, 4 \n\t" \
452 "addi r3, r3, 4 \n\t" \
453 "stw r5, %0 \n\t" \
454 "stw r4, %1 \n\t" \
455 "stw r3, %2 \n\t" \
465 "lwz %%r3, %3 \n\t" \
466 "lwz %%r4, %4 \n\t" \
467 "lwz %%r5, %5 \n\t" \
468 "lwz %%r6, %6 \n\t" \
469 "addi %%r3, %%r3, -4 \n\t" \
470 "addi %%r4, %%r4, -4 \n\t" \
471 "addic %%r5, %%r5, 0 \n\t"
474 "lwzu %%r7, 4(%%r3) \n\t" \
475 "mullw %%r8, %%r7, %%r6 \n\t" \
476 "mulhwu %%r9, %%r7, %%r6 \n\t" \
477 "adde %%r8, %%r8, %%r5 \n\t" \
478 "lwz %%r7, 4(%%r4) \n\t" \
479 "addze %%r5, %%r9 \n\t" \
480 "addc %%r8, %%r8, %%r7 \n\t" \
481 "stwu %%r8, 4(%%r4) \n\t"
484 "addze %%r5, %%r5 \n\t" \
485 "addi %%r4, %%r4, 4 \n\t" \
486 "addi %%r3, %%r3, 4 \n\t" \
487 "stw %%r5, %0 \n\t" \
488 "stw %%r4, %1 \n\t" \
489 "stw %%r3, %2 \n\t" \
508 "ldx %3, %%o0 \n\t" \
509 "ldx %4, %%o1 \n\t" \
510 "ld %5, %%o2 \n\t" \
511 "ld %6, %%o3 \n\t"
514 "ld [%%o0], %%o4 \n\t" \
515 "inc 4, %%o0 \n\t" \
516 "ld [%%o1], %%o5 \n\t" \
517 "umul %%o3, %%o4, %%o4 \n\t" \
518 "addcc %%o4, %%o2, %%o4 \n\t" \
519 "rd %%y, %%g1 \n\t" \
520 "addx %%g1, 0, %%g1 \n\t" \
521 "addcc %%o4, %%o5, %%o4 \n\t" \
522 "st %%o4, [%%o1] \n\t" \
523 "addx %%g1, 0, %%o2 \n\t" \
524 "inc 4, %%o1 \n\t"
527 "st %%o2, %0 \n\t" \
528 "stx %%o1, %1 \n\t" \
529 "stx %%o0, %2 \n\t" \
540 "ld %3, %%o0 \n\t" \
541 "ld %4, %%o1 \n\t" \
542 "ld %5, %%o2 \n\t" \
543 "ld %6, %%o3 \n\t"
546 "ld [%%o0], %%o4 \n\t" \
547 "inc 4, %%o0 \n\t" \
548 "ld [%%o1], %%o5 \n\t" \
549 "umul %%o3, %%o4, %%o4 \n\t" \
550 "addcc %%o4, %%o2, %%o4 \n\t" \
551 "rd %%y, %%g1 \n\t" \
552 "addx %%g1, 0, %%g1 \n\t" \
553 "addcc %%o4, %%o5, %%o4 \n\t" \
554 "st %%o4, [%%o1] \n\t" \
555 "addx %%g1, 0, %%o2 \n\t" \
556 "inc 4, %%o1 \n\t"
559 "st %%o2, %0 \n\t" \
560 "st %%o1, %1 \n\t" \
561 "st %%o0, %2 \n\t" \
575 "lwi r3, %3 \n\t" \
576 "lwi r4, %4 \n\t" \
577 "lwi r5, %5 \n\t" \
578 "lwi r6, %6 \n\t" \
579 "andi r7, r6, 0xffff \n\t" \
580 "bsrli r6, r6, 16 \n\t"
584 "lhui r9, r3, 0 \n\t" \
585 "addi r3, r3, 2 \n\t" \
586 "lhui r8, r3, 0 \n\t"
589 "lhui r8, r3, 0 \n\t" \
590 "addi r3, r3, 2 \n\t" \
591 "lhui r9, r3, 0 \n\t"
596 "addi r3, r3, 2 \n\t" \
597 "mul r10, r9, r6 \n\t" \
598 "mul r11, r8, r7 \n\t" \
599 "mul r12, r9, r7 \n\t" \
600 "mul r13, r8, r6 \n\t" \
601 "bsrli r8, r10, 16 \n\t" \
602 "bsrli r9, r11, 16 \n\t" \
603 "add r13, r13, r8 \n\t" \
604 "add r13, r13, r9 \n\t" \
605 "bslli r10, r10, 16 \n\t" \
606 "bslli r11, r11, 16 \n\t" \
607 "add r12, r12, r10 \n\t" \
608 "addc r13, r13, r0 \n\t" \
609 "add r12, r12, r11 \n\t" \
610 "addc r13, r13, r0 \n\t" \
611 "lwi r10, r4, 0 \n\t" \
612 "add r12, r12, r10 \n\t" \
613 "addc r13, r13, r0 \n\t" \
614 "add r12, r12, r5 \n\t" \
615 "addc r5, r13, r0 \n\t" \
616 "swi r12, r4, 0 \n\t" \
617 "addi r4, r4, 4 \n\t"
620 "swi r5, %0 \n\t" \
621 "swi r4, %1 \n\t" \
622 "swi r3, %2 \n\t" \
635 "ld.a %%a2, %3 \n\t" \
636 "ld.a %%a3, %4 \n\t" \
637 "ld.w %%d4, %5 \n\t" \
638 "ld.w %%d1, %6 \n\t" \
639 "xor %%d5, %%d5 \n\t"
642 "ld.w %%d0, [%%a2+] \n\t" \
643 "madd.u %%e2, %%e4, %%d0, %%d1 \n\t" \
644 "ld.w %%d0, [%%a3] \n\t" \
645 "addx %%d2, %%d2, %%d0 \n\t" \
646 "addc %%d3, %%d3, 0 \n\t" \
647 "mov %%d4, %%d3 \n\t" \
648 "st.w [%%a3+], %%d2 \n\t"
651 "st.w %0, %%d4 \n\t" \
652 "st.a %1, %%a3 \n\t" \
653 "st.a %2, %%a2 \n\t" \
672 * Note, gcc -O0 by default uses r7 for the frame pointer, so it complains about
673 * our use of r7 below, unless -fomit-frame-pointer is passed.
675 * On the other hand, -fomit-frame-pointer is implied by any -Ox options with
683 #define MULADDC_SCRATCH "RS .req r1 \n\t"
684 #define MULADDC_PRESERVE_SCRATCH "mov r10, r1 \n\t"
685 #define MULADDC_RESTORE_SCRATCH "mov r1, r10 \n\t"
688 #define MULADDC_SCRATCH "RS .req r7 \n\t"
697 "ldr r0, %3 \n\t" \
698 "ldr r1, %4 \n\t" \
699 "ldr r2, %5 \n\t" \
700 "ldr r3, %6 \n\t" \
701 "lsr r4, r3, #16 \n\t" \
702 "mov r9, r4 \n\t" \
703 "lsl r4, r3, #16 \n\t" \
704 "lsr r4, r4, #16 \n\t" \
705 "mov r8, r4 \n\t" \
710 "ldmia r0!, {r6} \n\t" \
711 "lsr RS, r6, #16 \n\t" \
712 "lsl r6, r6, #16 \n\t" \
713 "lsr r6, r6, #16 \n\t" \
714 "mov r4, r8 \n\t" \
715 "mul r4, r6 \n\t" \
716 "mov r3, r9 \n\t" \
717 "mul r6, r3 \n\t" \
718 "mov r5, r9 \n\t" \
719 "mul r5, RS \n\t" \
720 "mov r3, r8 \n\t" \
721 "mul RS, r3 \n\t" \
722 "lsr r3, r6, #16 \n\t" \
723 "add r5, r5, r3 \n\t" \
724 "lsr r3, RS, #16 \n\t" \
725 "add r5, r5, r3 \n\t" \
726 "add r4, r4, r2 \n\t" \
727 "mov r2, #0 \n\t" \
728 "adc r5, r2 \n\t" \
729 "lsl r3, r6, #16 \n\t" \
730 "add r4, r4, r3 \n\t" \
731 "adc r5, r2 \n\t" \
732 "lsl r3, RS, #16 \n\t" \
733 "add r4, r4, r3 \n\t" \
734 "adc r5, r2 \n\t" \
736 "ldr r3, [r1] \n\t" \
737 "add r4, r4, r3 \n\t" \
738 "adc r2, r5 \n\t" \
739 "stmia r1!, {r4} \n\t"
742 "str r2, %0 \n\t" \
743 "str r1, %1 \n\t" \
744 "str r0, %2 \n\t" \
754 /* Armv6-M (or later) with DSP Instruction Set Extensions.
764 ".p2align 2 \n\t" \
765 "ldr %[a], [%[in]], #4 \n\t" \
766 "ldr %[b], [%[acc]] \n\t" \
767 "umaal %[b], %[carry], %[scalar], %[a] \n\t" \
768 "str %[b], [%[acc]], #4 \n\t"
787 /* - Make sure loop is 4-byte aligned to avoid stalls
788 * upon repeated non-word aligned instructions in
790 * - Don't use ldm with post-increment or back-to-back
791 * loads with post-increment and same address register
793 * - Bunch loads and stores to reduce latency on some
794 * microarchitectures. E.g., on Cortex-M4, the first
796 * 2 cycles, while subsequent loads/stores are single-cycle. */
798 ".p2align 2 \n\t" \
799 "ldr %[a0], [%[in]], #+8 \n\t" \
800 "ldr %[b0], [%[acc]], #+8 \n\t" \
801 "ldr %[a1], [%[in], #-4] \n\t" \
802 "ldr %[b1], [%[acc], #-4] \n\t" \
803 "umaal %[b0], %[carry], %[scalar], %[a0] \n\t" \
804 "umaal %[b1], %[carry], %[scalar], %[a1] \n\t" \
805 "str %[b0], [%[acc], #-8] \n\t" \
806 "str %[b1], [%[acc], #-4] \n\t"
825 "ldr r0, %3 \n\t" \
826 "ldr r1, %4 \n\t" \
827 "ldr r2, %5 \n\t" \
828 "ldr r3, %6 \n\t"
831 "ldr r4, [r0], #4 \n\t" \
832 "mov r5, #0 \n\t" \
833 "ldr r6, [r1] \n\t" \
834 "umlal r2, r5, r3, r4 \n\t" \
835 "adds r4, r6, r2 \n\t" \
836 "adc r2, r5, #0 \n\t" \
837 "str r4, [r1], #4 \n\t"
840 "str r2, %0 \n\t" \
841 "str r1, %1 \n\t" \
842 "str r0, %2 \n\t" \
857 "ldq $1, %3 \n\t" \
858 "ldq $2, %4 \n\t" \
859 "ldq $3, %5 \n\t" \
860 "ldq $4, %6 \n\t"
863 "ldq $6, 0($1) \n\t" \
864 "addq $1, 8, $1 \n\t" \
865 "mulq $6, $4, $7 \n\t" \
866 "umulh $6, $4, $6 \n\t" \
867 "addq $7, $3, $7 \n\t" \
868 "cmpult $7, $3, $3 \n\t" \
869 "ldq $5, 0($2) \n\t" \
870 "addq $7, $5, $7 \n\t" \
871 "cmpult $7, $5, $5 \n\t" \
872 "stq $7, 0($2) \n\t" \
873 "addq $2, 8, $2 \n\t" \
874 "addq $6, $3, $3 \n\t" \
875 "addq $5, $3, $3 \n\t"
878 "stq $3, %0 \n\t" \
879 "stq $2, %1 \n\t" \
880 "stq $1, %2 \n\t" \
891 "lw $10, %3 \n\t" \
892 "lw $11, %4 \n\t" \
893 "lw $12, %5 \n\t" \
894 "lw $13, %6 \n\t"
897 "lw $14, 0($10) \n\t" \
898 "multu $13, $14 \n\t" \
899 "addi $10, $10, 4 \n\t" \
900 "mflo $14 \n\t" \
901 "mfhi $9 \n\t" \
902 "addu $14, $12, $14 \n\t" \
903 "lw $15, 0($11) \n\t" \
904 "sltu $12, $14, $12 \n\t" \
905 "addu $15, $14, $15 \n\t" \
906 "sltu $14, $15, $14 \n\t" \
907 "addu $12, $12, $9 \n\t" \
908 "sw $15, 0($11) \n\t" \
909 "addu $12, $12, $14 \n\t" \
910 "addi $11, $11, 4 \n\t"
913 "sw $12, %0 \n\t" \
914 "sw $11, %1 \n\t" \
915 "sw $10, %2 \n\t" \
1093 /* *INDENT-ON* */