PolarSSL v1.1.5
bn_mul.h
Go to the documentation of this file.
1 
27 /*
28  * Multiply source vector [s] with b, add result
29  * to destination vector [d] and set carry c.
30  *
31  * Currently supports:
32  *
33  * . IA-32 (386+) . AMD64 / EM64T
34  * . IA-32 (SSE2) . Motorola 68000
35  * . PowerPC, 32-bit . MicroBlaze
36  * . PowerPC, 64-bit . TriCore
37  * . SPARC v8 . ARM v3+
38  * . Alpha . MIPS32
39  * . C, longlong . C, generic
40  */
41 #ifndef POLARSSL_BN_MUL_H
42 #define POLARSSL_BN_MUL_H
43 
44 #include "bignum.h"
45 
46 #if defined(POLARSSL_HAVE_ASM)
47 
48 #if defined(__GNUC__)
49 #if defined(__i386__)
50 
51 #define MULADDC_INIT \
52  asm( " \
53  movl %%ebx, %0; \
54  movl %5, %%esi; \
55  movl %6, %%edi; \
56  movl %7, %%ecx; \
57  movl %8, %%ebx; \
58  "
59 
60 #define MULADDC_CORE \
61  " \
62  lodsl; \
63  mull %%ebx; \
64  addl %%ecx, %%eax; \
65  adcl $0, %%edx; \
66  addl (%%edi), %%eax; \
67  adcl $0, %%edx; \
68  movl %%edx, %%ecx; \
69  stosl; \
70  "
71 
72 #if defined(POLARSSL_HAVE_SSE2)
73 
74 #define MULADDC_HUIT \
75  " \
76  movd %%ecx, %%mm1; \
77  movd %%ebx, %%mm0; \
78  movd (%%edi), %%mm3; \
79  paddq %%mm3, %%mm1; \
80  movd (%%esi), %%mm2; \
81  pmuludq %%mm0, %%mm2; \
82  movd 4(%%esi), %%mm4; \
83  pmuludq %%mm0, %%mm4; \
84  movd 8(%%esi), %%mm6; \
85  pmuludq %%mm0, %%mm6; \
86  movd 12(%%esi), %%mm7; \
87  pmuludq %%mm0, %%mm7; \
88  paddq %%mm2, %%mm1; \
89  movd 4(%%edi), %%mm3; \
90  paddq %%mm4, %%mm3; \
91  movd 8(%%edi), %%mm5; \
92  paddq %%mm6, %%mm5; \
93  movd 12(%%edi), %%mm4; \
94  paddq %%mm4, %%mm7; \
95  movd %%mm1, (%%edi); \
96  movd 16(%%esi), %%mm2; \
97  pmuludq %%mm0, %%mm2; \
98  psrlq $32, %%mm1; \
99  movd 20(%%esi), %%mm4; \
100  pmuludq %%mm0, %%mm4; \
101  paddq %%mm3, %%mm1; \
102  movd 24(%%esi), %%mm6; \
103  pmuludq %%mm0, %%mm6; \
104  movd %%mm1, 4(%%edi); \
105  psrlq $32, %%mm1; \
106  movd 28(%%esi), %%mm3; \
107  pmuludq %%mm0, %%mm3; \
108  paddq %%mm5, %%mm1; \
109  movd 16(%%edi), %%mm5; \
110  paddq %%mm5, %%mm2; \
111  movd %%mm1, 8(%%edi); \
112  psrlq $32, %%mm1; \
113  paddq %%mm7, %%mm1; \
114  movd 20(%%edi), %%mm5; \
115  paddq %%mm5, %%mm4; \
116  movd %%mm1, 12(%%edi); \
117  psrlq $32, %%mm1; \
118  paddq %%mm2, %%mm1; \
119  movd 24(%%edi), %%mm5; \
120  paddq %%mm5, %%mm6; \
121  movd %%mm1, 16(%%edi); \
122  psrlq $32, %%mm1; \
123  paddq %%mm4, %%mm1; \
124  movd 28(%%edi), %%mm5; \
125  paddq %%mm5, %%mm3; \
126  movd %%mm1, 20(%%edi); \
127  psrlq $32, %%mm1; \
128  paddq %%mm6, %%mm1; \
129  movd %%mm1, 24(%%edi); \
130  psrlq $32, %%mm1; \
131  paddq %%mm3, %%mm1; \
132  movd %%mm1, 28(%%edi); \
133  addl $32, %%edi; \
134  addl $32, %%esi; \
135  psrlq $32, %%mm1; \
136  movd %%mm1, %%ecx; \
137  "
138 
139 #define MULADDC_STOP \
140  " \
141  emms; \
142  movl %4, %%ebx; \
143  movl %%ecx, %1; \
144  movl %%edi, %2; \
145  movl %%esi, %3; \
146  " \
147  : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
148  : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
149  : "eax", "ecx", "edx", "esi", "edi" \
150  );
151 
152 #else
153 
154 #define MULADDC_STOP \
155  " \
156  movl %4, %%ebx; \
157  movl %%ecx, %1; \
158  movl %%edi, %2; \
159  movl %%esi, %3; \
160  " \
161  : "=m" (t), "=m" (c), "=m" (d), "=m" (s) \
162  : "m" (t), "m" (s), "m" (d), "m" (c), "m" (b) \
163  : "eax", "ecx", "edx", "esi", "edi" \
164  );
165 #endif /* SSE2 */
166 #endif /* i386 */
167 
168 #if defined(__amd64__) || defined (__x86_64__)
169 
170 #define MULADDC_INIT \
171  asm( "movq %0, %%rsi " :: "m" (s)); \
172  asm( "movq %0, %%rdi " :: "m" (d)); \
173  asm( "movq %0, %%rcx " :: "m" (c)); \
174  asm( "movq %0, %%rbx " :: "m" (b)); \
175  asm( "xorq %r8, %r8 " );
176 
177 #define MULADDC_CORE \
178  asm( "movq (%rsi),%rax " ); \
179  asm( "mulq %rbx " ); \
180  asm( "addq $8, %rsi " ); \
181  asm( "addq %rcx, %rax " ); \
182  asm( "movq %r8, %rcx " ); \
183  asm( "adcq $0, %rdx " ); \
184  asm( "nop " ); \
185  asm( "addq %rax, (%rdi) " ); \
186  asm( "adcq %rdx, %rcx " ); \
187  asm( "addq $8, %rdi " );
188 
189 #define MULADDC_STOP \
190  asm( "movq %%rcx, %0 " : "=m" (c)); \
191  asm( "movq %%rdi, %0 " : "=m" (d)); \
192  asm( "movq %%rsi, %0 " : "=m" (s) :: \
193  "rax", "rcx", "rdx", "rbx", "rsi", "rdi", "r8" );
194 
195 #endif /* AMD64 */
196 
197 #if defined(__mc68020__) || defined(__mcpu32__)
198 
199 #define MULADDC_INIT \
200  asm( "movl %0, %%a2 " :: "m" (s)); \
201  asm( "movl %0, %%a3 " :: "m" (d)); \
202  asm( "movl %0, %%d3 " :: "m" (c)); \
203  asm( "movl %0, %%d2 " :: "m" (b)); \
204  asm( "moveq #0, %d0 " );
205 
206 #define MULADDC_CORE \
207  asm( "movel %a2@+, %d1 " ); \
208  asm( "mulul %d2, %d4:%d1 " ); \
209  asm( "addl %d3, %d1 " ); \
210  asm( "addxl %d0, %d4 " ); \
211  asm( "moveq #0, %d3 " ); \
212  asm( "addl %d1, %a3@+ " ); \
213  asm( "addxl %d4, %d3 " );
214 
215 #define MULADDC_STOP \
216  asm( "movl %%d3, %0 " : "=m" (c)); \
217  asm( "movl %%a3, %0 " : "=m" (d)); \
218  asm( "movl %%a2, %0 " : "=m" (s) :: \
219  "d0", "d1", "d2", "d3", "d4", "a2", "a3" );
220 
221 #define MULADDC_HUIT \
222  asm( "movel %a2@+, %d1 " ); \
223  asm( "mulul %d2, %d4:%d1 " ); \
224  asm( "addxl %d3, %d1 " ); \
225  asm( "addxl %d0, %d4 " ); \
226  asm( "addl %d1, %a3@+ " ); \
227  asm( "movel %a2@+, %d1 " ); \
228  asm( "mulul %d2, %d3:%d1 " ); \
229  asm( "addxl %d4, %d1 " ); \
230  asm( "addxl %d0, %d3 " ); \
231  asm( "addl %d1, %a3@+ " ); \
232  asm( "movel %a2@+, %d1 " ); \
233  asm( "mulul %d2, %d4:%d1 " ); \
234  asm( "addxl %d3, %d1 " ); \
235  asm( "addxl %d0, %d4 " ); \
236  asm( "addl %d1, %a3@+ " ); \
237  asm( "movel %a2@+, %d1 " ); \
238  asm( "mulul %d2, %d3:%d1 " ); \
239  asm( "addxl %d4, %d1 " ); \
240  asm( "addxl %d0, %d3 " ); \
241  asm( "addl %d1, %a3@+ " ); \
242  asm( "movel %a2@+, %d1 " ); \
243  asm( "mulul %d2, %d4:%d1 " ); \
244  asm( "addxl %d3, %d1 " ); \
245  asm( "addxl %d0, %d4 " ); \
246  asm( "addl %d1, %a3@+ " ); \
247  asm( "movel %a2@+, %d1 " ); \
248  asm( "mulul %d2, %d3:%d1 " ); \
249  asm( "addxl %d4, %d1 " ); \
250  asm( "addxl %d0, %d3 " ); \
251  asm( "addl %d1, %a3@+ " ); \
252  asm( "movel %a2@+, %d1 " ); \
253  asm( "mulul %d2, %d4:%d1 " ); \
254  asm( "addxl %d3, %d1 " ); \
255  asm( "addxl %d0, %d4 " ); \
256  asm( "addl %d1, %a3@+ " ); \
257  asm( "movel %a2@+, %d1 " ); \
258  asm( "mulul %d2, %d3:%d1 " ); \
259  asm( "addxl %d4, %d1 " ); \
260  asm( "addxl %d0, %d3 " ); \
261  asm( "addl %d1, %a3@+ " ); \
262  asm( "addxl %d0, %d3 " );
263 
264 #endif /* MC68000 */
265 
266 #if defined(__powerpc__) || defined(__ppc__)
267 #if defined(__powerpc64__) || defined(__ppc64__)
268 
269 #if defined(__MACH__) && defined(__APPLE__)
270 
271 #define MULADDC_INIT \
272  asm( "ld r3, %0 " :: "m" (s)); \
273  asm( "ld r4, %0 " :: "m" (d)); \
274  asm( "ld r5, %0 " :: "m" (c)); \
275  asm( "ld r6, %0 " :: "m" (b)); \
276  asm( "addi r3, r3, -8 " ); \
277  asm( "addi r4, r4, -8 " ); \
278  asm( "addic r5, r5, 0 " );
279 
280 #define MULADDC_CORE \
281  asm( "ldu r7, 8(r3) " ); \
282  asm( "mulld r8, r7, r6 " ); \
283  asm( "mulhdu r9, r7, r6 " ); \
284  asm( "adde r8, r8, r5 " ); \
285  asm( "ld r7, 8(r4) " ); \
286  asm( "addze r5, r9 " ); \
287  asm( "addc r8, r8, r7 " ); \
288  asm( "stdu r8, 8(r4) " );
289 
290 #define MULADDC_STOP \
291  asm( "addze r5, r5 " ); \
292  asm( "addi r4, r4, 8 " ); \
293  asm( "addi r3, r3, 8 " ); \
294  asm( "std r5, %0 " : "=m" (c)); \
295  asm( "std r4, %0 " : "=m" (d)); \
296  asm( "std r3, %0 " : "=m" (s) :: \
297  "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
298 
299 #else
300 
301 #define MULADDC_INIT \
302  asm( "ld %%r3, %0 " :: "m" (s)); \
303  asm( "ld %%r4, %0 " :: "m" (d)); \
304  asm( "ld %%r5, %0 " :: "m" (c)); \
305  asm( "ld %%r6, %0 " :: "m" (b)); \
306  asm( "addi %r3, %r3, -8 " ); \
307  asm( "addi %r4, %r4, -8 " ); \
308  asm( "addic %r5, %r5, 0 " );
309 
310 #define MULADDC_CORE \
311  asm( "ldu %r7, 8(%r3) " ); \
312  asm( "mulld %r8, %r7, %r6 " ); \
313  asm( "mulhdu %r9, %r7, %r6 " ); \
314  asm( "adde %r8, %r8, %r5 " ); \
315  asm( "ld %r7, 8(%r4) " ); \
316  asm( "addze %r5, %r9 " ); \
317  asm( "addc %r8, %r8, %r7 " ); \
318  asm( "stdu %r8, 8(%r4) " );
319 
320 #define MULADDC_STOP \
321  asm( "addze %r5, %r5 " ); \
322  asm( "addi %r4, %r4, 8 " ); \
323  asm( "addi %r3, %r3, 8 " ); \
324  asm( "std %%r5, %0 " : "=m" (c)); \
325  asm( "std %%r4, %0 " : "=m" (d)); \
326  asm( "std %%r3, %0 " : "=m" (s) :: \
327  "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
328 
329 #endif
330 
331 #else /* PPC32 */
332 
333 #if defined(__MACH__) && defined(__APPLE__)
334 
335 #define MULADDC_INIT \
336  asm( "lwz r3, %0 " :: "m" (s)); \
337  asm( "lwz r4, %0 " :: "m" (d)); \
338  asm( "lwz r5, %0 " :: "m" (c)); \
339  asm( "lwz r6, %0 " :: "m" (b)); \
340  asm( "addi r3, r3, -4 " ); \
341  asm( "addi r4, r4, -4 " ); \
342  asm( "addic r5, r5, 0 " );
343 
344 #define MULADDC_CORE \
345  asm( "lwzu r7, 4(r3) " ); \
346  asm( "mullw r8, r7, r6 " ); \
347  asm( "mulhwu r9, r7, r6 " ); \
348  asm( "adde r8, r8, r5 " ); \
349  asm( "lwz r7, 4(r4) " ); \
350  asm( "addze r5, r9 " ); \
351  asm( "addc r8, r8, r7 " ); \
352  asm( "stwu r8, 4(r4) " );
353 
354 #define MULADDC_STOP \
355  asm( "addze r5, r5 " ); \
356  asm( "addi r4, r4, 4 " ); \
357  asm( "addi r3, r3, 4 " ); \
358  asm( "stw r5, %0 " : "=m" (c)); \
359  asm( "stw r4, %0 " : "=m" (d)); \
360  asm( "stw r3, %0 " : "=m" (s) :: \
361  "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
362 
363 #else
364 
365 #define MULADDC_INIT \
366  asm( "lwz %%r3, %0 " :: "m" (s)); \
367  asm( "lwz %%r4, %0 " :: "m" (d)); \
368  asm( "lwz %%r5, %0 " :: "m" (c)); \
369  asm( "lwz %%r6, %0 " :: "m" (b)); \
370  asm( "addi %r3, %r3, -4 " ); \
371  asm( "addi %r4, %r4, -4 " ); \
372  asm( "addic %r5, %r5, 0 " );
373 
374 #define MULADDC_CORE \
375  asm( "lwzu %r7, 4(%r3) " ); \
376  asm( "mullw %r8, %r7, %r6 " ); \
377  asm( "mulhwu %r9, %r7, %r6 " ); \
378  asm( "adde %r8, %r8, %r5 " ); \
379  asm( "lwz %r7, 4(%r4) " ); \
380  asm( "addze %r5, %r9 " ); \
381  asm( "addc %r8, %r8, %r7 " ); \
382  asm( "stwu %r8, 4(%r4) " );
383 
384 #define MULADDC_STOP \
385  asm( "addze %r5, %r5 " ); \
386  asm( "addi %r4, %r4, 4 " ); \
387  asm( "addi %r3, %r3, 4 " ); \
388  asm( "stw %%r5, %0 " : "=m" (c)); \
389  asm( "stw %%r4, %0 " : "=m" (d)); \
390  asm( "stw %%r3, %0 " : "=m" (s) :: \
391  "r3", "r4", "r5", "r6", "r7", "r8", "r9" );
392 
393 #endif
394 
395 #endif /* PPC32 */
396 #endif /* PPC64 */
397 
398 #if defined(__sparc__) && defined(__sparc64__)
399 
400 #define MULADDC_INIT \
401  asm( \
402  " \
403  ldx %3, %%o0; \
404  ldx %4, %%o1; \
405  ld %5, %%o2; \
406  ld %6, %%o3; \
407  "
408 
409 #define MULADDC_CORE \
410  " \
411  ld [%%o0], %%o4; \
412  inc 4, %%o0; \
413  ld [%%o1], %%o5; \
414  umul %%o3, %%o4, %%o4; \
415  addcc %%o4, %%o2, %%o4; \
416  rd %%y, %%g1; \
417  addx %%g1, 0, %%g1; \
418  addcc %%o4, %%o5, %%o4; \
419  st %%o4, [%%o1]; \
420  addx %%g1, 0, %%o2; \
421  inc 4, %%o1; \
422  "
423 
424 #define MULADDC_STOP \
425  " \
426  st %%o2, %0; \
427  stx %%o1, %1; \
428  stx %%o0, %2; \
429  " \
430  : "=m" (c), "=m" (d), "=m" (s) \
431  : "m" (s), "m" (d), "m" (c), "m" (b) \
432  : "g1", "o0", "o1", "o2", "o3", "o4", \
433  "o5" \
434  );
435 #endif /* SPARCv9 */
436 
437 #if defined(__sparc__) && !defined(__sparc64__)
438 
439 #define MULADDC_INIT \
440  asm( \
441  " \
442  ld %3, %%o0; \
443  ld %4, %%o1; \
444  ld %5, %%o2; \
445  ld %6, %%o3; \
446  "
447 
448 #define MULADDC_CORE \
449  " \
450  ld [%%o0], %%o4; \
451  inc 4, %%o0; \
452  ld [%%o1], %%o5; \
453  umul %%o3, %%o4, %%o4; \
454  addcc %%o4, %%o2, %%o4; \
455  rd %%y, %%g1; \
456  addx %%g1, 0, %%g1; \
457  addcc %%o4, %%o5, %%o4; \
458  st %%o4, [%%o1]; \
459  addx %%g1, 0, %%o2; \
460  inc 4, %%o1; \
461  "
462 
463 #define MULADDC_STOP \
464  " \
465  st %%o2, %0; \
466  st %%o1, %1; \
467  st %%o0, %2; \
468  " \
469  : "=m" (c), "=m" (d), "=m" (s) \
470  : "m" (s), "m" (d), "m" (c), "m" (b) \
471  : "g1", "o0", "o1", "o2", "o3", "o4", \
472  "o5" \
473  );
474 
475 #endif /* SPARCv8 */
476 
477 #if defined(__microblaze__) || defined(microblaze)
478 
479 #define MULADDC_INIT \
480  asm( "lwi r3, %0 " :: "m" (s)); \
481  asm( "lwi r4, %0 " :: "m" (d)); \
482  asm( "lwi r5, %0 " :: "m" (c)); \
483  asm( "lwi r6, %0 " :: "m" (b)); \
484  asm( "andi r7, r6, 0xffff" ); \
485  asm( "bsrli r6, r6, 16 " );
486 
487 #define MULADDC_CORE \
488  asm( "lhui r8, r3, 0 " ); \
489  asm( "addi r3, r3, 2 " ); \
490  asm( "lhui r9, r3, 0 " ); \
491  asm( "addi r3, r3, 2 " ); \
492  asm( "mul r10, r9, r6 " ); \
493  asm( "mul r11, r8, r7 " ); \
494  asm( "mul r12, r9, r7 " ); \
495  asm( "mul r13, r8, r6 " ); \
496  asm( "bsrli r8, r10, 16 " ); \
497  asm( "bsrli r9, r11, 16 " ); \
498  asm( "add r13, r13, r8 " ); \
499  asm( "add r13, r13, r9 " ); \
500  asm( "bslli r10, r10, 16 " ); \
501  asm( "bslli r11, r11, 16 " ); \
502  asm( "add r12, r12, r10 " ); \
503  asm( "addc r13, r13, r0 " ); \
504  asm( "add r12, r12, r11 " ); \
505  asm( "addc r13, r13, r0 " ); \
506  asm( "lwi r10, r4, 0 " ); \
507  asm( "add r12, r12, r10 " ); \
508  asm( "addc r13, r13, r0 " ); \
509  asm( "add r12, r12, r5 " ); \
510  asm( "addc r5, r13, r0 " ); \
511  asm( "swi r12, r4, 0 " ); \
512  asm( "addi r4, r4, 4 " );
513 
514 #define MULADDC_STOP \
515  asm( "swi r5, %0 " : "=m" (c)); \
516  asm( "swi r4, %0 " : "=m" (d)); \
517  asm( "swi r3, %0 " : "=m" (s) :: \
518  "r3", "r4" , "r5" , "r6" , "r7" , "r8" , \
519  "r9", "r10", "r11", "r12", "r13" );
520 
521 #endif /* MicroBlaze */
522 
523 #if defined(__tricore__)
524 
525 #define MULADDC_INIT \
526  asm( "ld.a %%a2, %0 " :: "m" (s)); \
527  asm( "ld.a %%a3, %0 " :: "m" (d)); \
528  asm( "ld.w %%d4, %0 " :: "m" (c)); \
529  asm( "ld.w %%d1, %0 " :: "m" (b)); \
530  asm( "xor %d5, %d5 " );
531 
532 #define MULADDC_CORE \
533  asm( "ld.w %d0, [%a2+] " ); \
534  asm( "madd.u %e2, %e4, %d0, %d1 " ); \
535  asm( "ld.w %d0, [%a3] " ); \
536  asm( "addx %d2, %d2, %d0 " ); \
537  asm( "addc %d3, %d3, 0 " ); \
538  asm( "mov %d4, %d3 " ); \
539  asm( "st.w [%a3+], %d2 " );
540 
541 #define MULADDC_STOP \
542  asm( "st.w %0, %%d4 " : "=m" (c)); \
543  asm( "st.a %0, %%a3 " : "=m" (d)); \
544  asm( "st.a %0, %%a2 " : "=m" (s) :: \
545  "d0", "d1", "e2", "d4", "a2", "a3" );
546 
547 #endif /* TriCore */
548 
549 #if defined(__arm__)
550 
551 #if !defined(__thumb__)
552 
553 #define MULADDC_INIT \
554  asm( "ldr r0, %0 " :: "m" (s)); \
555  asm( "ldr r1, %0 " :: "m" (d)); \
556  asm( "ldr r2, %0 " :: "m" (c)); \
557  asm( "ldr r3, %0 " :: "m" (b));
558 
559 #define MULADDC_CORE \
560  asm( "ldr r4, [r0], #4 " ); \
561  asm( "mov r5, #0 " ); \
562  asm( "ldr r6, [r1] " ); \
563  asm( "umlal r2, r5, r3, r4 " ); \
564  asm( "adds r7, r6, r2 " ); \
565  asm( "adc r2, r5, #0 " ); \
566  asm( "str r7, [r1], #4 " );
567 
568 #define MULADDC_STOP \
569  asm( "str r2, %0 " : "=m" (c)); \
570  asm( "str r1, %0 " : "=m" (d)); \
571  asm( "str r0, %0 " : "=m" (s) :: \
572  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7" );
573 
574 #endif /* Thumb */
575 
576 #endif /* ARMv3 */
577 
578 #if defined(__alpha__)
579 
580 #define MULADDC_INIT \
581  asm( "ldq $1, %0 " :: "m" (s)); \
582  asm( "ldq $2, %0 " :: "m" (d)); \
583  asm( "ldq $3, %0 " :: "m" (c)); \
584  asm( "ldq $4, %0 " :: "m" (b));
585 
586 #define MULADDC_CORE \
587  asm( "ldq $6, 0($1) " ); \
588  asm( "addq $1, 8, $1 " ); \
589  asm( "mulq $6, $4, $7 " ); \
590  asm( "umulh $6, $4, $6 " ); \
591  asm( "addq $7, $3, $7 " ); \
592  asm( "cmpult $7, $3, $3 " ); \
593  asm( "ldq $5, 0($2) " ); \
594  asm( "addq $7, $5, $7 " ); \
595  asm( "cmpult $7, $5, $5 " ); \
596  asm( "stq $7, 0($2) " ); \
597  asm( "addq $2, 8, $2 " ); \
598  asm( "addq $6, $3, $3 " ); \
599  asm( "addq $5, $3, $3 " );
600 
601 #define MULADDC_STOP \
602  asm( "stq $3, %0 " : "=m" (c)); \
603  asm( "stq $2, %0 " : "=m" (d)); \
604  asm( "stq $1, %0 " : "=m" (s) :: \
605  "$1", "$2", "$3", "$4", "$5", "$6", "$7" );
606 
607 #endif /* Alpha */
608 
609 #if defined(__mips__)
610 
611 #define MULADDC_INIT \
612  asm( "lw $10, %0 " :: "m" (s)); \
613  asm( "lw $11, %0 " :: "m" (d)); \
614  asm( "lw $12, %0 " :: "m" (c)); \
615  asm( "lw $13, %0 " :: "m" (b));
616 
617 #define MULADDC_CORE \
618  asm( "lw $14, 0($10) " ); \
619  asm( "multu $13, $14 " ); \
620  asm( "addi $10, $10, 4 " ); \
621  asm( "mflo $14 " ); \
622  asm( "mfhi $9 " ); \
623  asm( "addu $14, $12, $14 " ); \
624  asm( "lw $15, 0($11) " ); \
625  asm( "sltu $12, $14, $12 " ); \
626  asm( "addu $15, $14, $15 " ); \
627  asm( "sltu $14, $15, $14 " ); \
628  asm( "addu $12, $12, $9 " ); \
629  asm( "sw $15, 0($11) " ); \
630  asm( "addu $12, $12, $14 " ); \
631  asm( "addi $11, $11, 4 " );
632 
633 #define MULADDC_STOP \
634  asm( "sw $12, %0 " : "=m" (c)); \
635  asm( "sw $11, %0 " : "=m" (d)); \
636  asm( "sw $10, %0 " : "=m" (s) :: \
637  "$9", "$10", "$11", "$12", "$13", "$14", "$15" );
638 
639 #endif /* MIPS */
640 #endif /* GNUC */
641 
642 #if (defined(_MSC_VER) && defined(_M_IX86)) || defined(__WATCOMC__)
643 
644 #define MULADDC_INIT \
645  __asm mov esi, s \
646  __asm mov edi, d \
647  __asm mov ecx, c \
648  __asm mov ebx, b
649 
650 #define MULADDC_CORE \
651  __asm lodsd \
652  __asm mul ebx \
653  __asm add eax, ecx \
654  __asm adc edx, 0 \
655  __asm add eax, [edi] \
656  __asm adc edx, 0 \
657  __asm mov ecx, edx \
658  __asm stosd
659 
660 #if defined(POLARSSL_HAVE_SSE2)
661 
662 #define EMIT __asm _emit
663 
664 #define MULADDC_HUIT \
665  EMIT 0x0F EMIT 0x6E EMIT 0xC9 \
666  EMIT 0x0F EMIT 0x6E EMIT 0xC3 \
667  EMIT 0x0F EMIT 0x6E EMIT 0x1F \
668  EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
669  EMIT 0x0F EMIT 0x6E EMIT 0x16 \
670  EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
671  EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x04 \
672  EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
673  EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x08 \
674  EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
675  EMIT 0x0F EMIT 0x6E EMIT 0x7E EMIT 0x0C \
676  EMIT 0x0F EMIT 0xF4 EMIT 0xF8 \
677  EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
678  EMIT 0x0F EMIT 0x6E EMIT 0x5F EMIT 0x04 \
679  EMIT 0x0F EMIT 0xD4 EMIT 0xDC \
680  EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x08 \
681  EMIT 0x0F EMIT 0xD4 EMIT 0xEE \
682  EMIT 0x0F EMIT 0x6E EMIT 0x67 EMIT 0x0C \
683  EMIT 0x0F EMIT 0xD4 EMIT 0xFC \
684  EMIT 0x0F EMIT 0x7E EMIT 0x0F \
685  EMIT 0x0F EMIT 0x6E EMIT 0x56 EMIT 0x10 \
686  EMIT 0x0F EMIT 0xF4 EMIT 0xD0 \
687  EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
688  EMIT 0x0F EMIT 0x6E EMIT 0x66 EMIT 0x14 \
689  EMIT 0x0F EMIT 0xF4 EMIT 0xE0 \
690  EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
691  EMIT 0x0F EMIT 0x6E EMIT 0x76 EMIT 0x18 \
692  EMIT 0x0F EMIT 0xF4 EMIT 0xF0 \
693  EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x04 \
694  EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
695  EMIT 0x0F EMIT 0x6E EMIT 0x5E EMIT 0x1C \
696  EMIT 0x0F EMIT 0xF4 EMIT 0xD8 \
697  EMIT 0x0F EMIT 0xD4 EMIT 0xCD \
698  EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x10 \
699  EMIT 0x0F EMIT 0xD4 EMIT 0xD5 \
700  EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x08 \
701  EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
702  EMIT 0x0F EMIT 0xD4 EMIT 0xCF \
703  EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x14 \
704  EMIT 0x0F EMIT 0xD4 EMIT 0xE5 \
705  EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x0C \
706  EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
707  EMIT 0x0F EMIT 0xD4 EMIT 0xCA \
708  EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x18 \
709  EMIT 0x0F EMIT 0xD4 EMIT 0xF5 \
710  EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x10 \
711  EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
712  EMIT 0x0F EMIT 0xD4 EMIT 0xCC \
713  EMIT 0x0F EMIT 0x6E EMIT 0x6F EMIT 0x1C \
714  EMIT 0x0F EMIT 0xD4 EMIT 0xDD \
715  EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x14 \
716  EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
717  EMIT 0x0F EMIT 0xD4 EMIT 0xCE \
718  EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x18 \
719  EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
720  EMIT 0x0F EMIT 0xD4 EMIT 0xCB \
721  EMIT 0x0F EMIT 0x7E EMIT 0x4F EMIT 0x1C \
722  EMIT 0x83 EMIT 0xC7 EMIT 0x20 \
723  EMIT 0x83 EMIT 0xC6 EMIT 0x20 \
724  EMIT 0x0F EMIT 0x73 EMIT 0xD1 EMIT 0x20 \
725  EMIT 0x0F EMIT 0x7E EMIT 0xC9
726 
727 #define MULADDC_STOP \
728  EMIT 0x0F EMIT 0x77 \
729  __asm mov c, ecx \
730  __asm mov d, edi \
731  __asm mov s, esi \
732 
733 #else
734 
735 #define MULADDC_STOP \
736  __asm mov c, ecx \
737  __asm mov d, edi \
738  __asm mov s, esi \
739 
740 #endif /* SSE2 */
741 #endif /* MSVC */
742 
743 #endif /* POLARSSL_HAVE_ASM */
744 
745 #if !defined(MULADDC_CORE)
746 #if defined(POLARSSL_HAVE_LONGLONG)
747 
748 #define MULADDC_INIT \
749 { \
750  t_udbl r; \
751  t_uint r0, r1;
752 
753 #define MULADDC_CORE \
754  r = *(s++) * (t_udbl) b; \
755  r0 = r; \
756  r1 = r >> biL; \
757  r0 += c; r1 += (r0 < c); \
758  r0 += *d; r1 += (r0 < *d); \
759  c = r1; *(d++) = r0;
760 
761 #define MULADDC_STOP \
762 }
763 
764 #else
765 #define MULADDC_INIT \
766 { \
767  t_uint s0, s1, b0, b1; \
768  t_uint r0, r1, rx, ry; \
769  b0 = ( b << biH ) >> biH; \
770  b1 = ( b >> biH );
771 
772 #define MULADDC_CORE \
773  s0 = ( *s << biH ) >> biH; \
774  s1 = ( *s >> biH ); s++; \
775  rx = s0 * b1; r0 = s0 * b0; \
776  ry = s1 * b0; r1 = s1 * b1; \
777  r1 += ( rx >> biH ); \
778  r1 += ( ry >> biH ); \
779  rx <<= biH; ry <<= biH; \
780  r0 += rx; r1 += (r0 < rx); \
781  r0 += ry; r1 += (r0 < ry); \
782  r0 += c; r1 += (r0 < c); \
783  r0 += *d; r1 += (r0 < *d); \
784  c = r1; *(d++) = r0;
785 
786 #define MULADDC_STOP \
787 }
788 
789 #endif /* C (generic) */
790 #endif /* C (longlong) */
791 
792 #endif /* bn_mul.h */