Browse Subversion Repository
Contents of /branches/mty-makai/crypt64.S
Parent Directory
| Revision Log
Revision 192 -
( show annotations)
( download)
Mon May 26 06:29:35 2008 UTC
(15 years, 10 months ago)
by notanpe
File size: 7501 byte(s)
シーズン 2 に向けて
| 1 |
/******************************************************-*-fundamental-*- |
| 2 |
* |
| 3 |
* $Id$ |
| 4 |
*/ |
| 5 |
|
| 6 |
#ifndef DEBUG |
| 7 |
#define DEBUG 0 |
| 8 |
#endif |
| 9 |
|
| 10 |
#if defined(USE_MMX) |
| 11 |
|
| 12 |
#define USE_T 1 |
| 13 |
|
| 14 |
.if !USE_T // to suppress code generated |
| 15 |
#include "x86-mmx.S" |
| 16 |
.endif |
| 17 |
#undef K |
| 18 |
|
| 19 |
#define PTR_T .long |
| 20 |
|
| 21 |
#define W 8 |
| 22 |
#define O 1 |
| 23 |
#define R(n) %mm##n |
| 24 |
#define MOV movq |
| 25 |
#define AND pand |
| 26 |
#define ANDN pandn |
| 27 |
#define OR por |
| 28 |
#define XOR pxor |
| 29 |
#define OX 0x00 |
| 30 |
|
| 31 |
#define AX %eax |
| 32 |
#define CNT %ax |
| 33 |
#define KSI %dl |
| 34 |
#define PARAM %edx |
| 35 |
#define TX %ebx |
| 36 |
#define K %ecx |
| 37 |
#define T /* +16 */ %esi |
| 38 |
#define LR /* +16 */ %ebp |
| 39 |
#define RL /* +16 */ %edi |
| 40 |
#define H %ecx |
| 41 |
|
| 42 |
#elif defined(USE_64) |
| 43 |
|
| 44 |
#define USE_T 0 |
| 45 |
|
| 46 |
#define PTR_T .quad |
| 47 |
|
| 48 |
#define W 8 |
| 49 |
#define O 1 |
| 50 |
#define R(n) %r##n |
| 51 |
#define MOV mov |
| 52 |
#define NOT not |
| 53 |
#define AND and |
| 54 |
#define OR or |
| 55 |
#define XOR xor |
| 56 |
#define OX 0x00 |
| 57 |
|
| 58 |
#define AX %rax |
| 59 |
#define CNT 4(%rsp) |
| 60 |
#define KSI 6(%rsp) |
| 61 |
#define PARAM %rdx |
| 62 |
#define TX %rdx |
| 63 |
#define K %rcx |
| 64 |
#define T /* +16 */ %rsi |
| 65 |
#define LR /* +16 */ %rbp |
| 66 |
#define RL /* +16 */ %rdi |
| 67 |
#define H %rcx |
| 68 |
|
| 69 |
#define WK(n) W*(n+16+8)(%rsp) |
| 70 |
|
| 71 |
#define r4 rax |
| 72 |
#define r5 rbx |
| 73 |
#define r6 rdx |
| 74 |
#define r7 rsi |
| 75 |
#define r0 r8 |
| 76 |
#define r1 r9 |
| 77 |
#define r2 r10 |
| 78 |
#define r3 r11 |
| 79 |
|
| 80 |
#elif defined(USE_64_XMM) |
| 81 |
|
| 82 |
#define USE_T 1 |
| 83 |
|
| 84 |
.if !USE_T // to suppress code generated |
| 85 |
//#include "x64-sse.S" |
| 86 |
.endif |
| 87 |
#undef K |
| 88 |
|
| 89 |
#define PTR_T .quad |
| 90 |
|
| 91 |
#define W 16 |
| 92 |
#define O 4 |
| 93 |
#define R(n) %xmm##n |
| 94 |
#define MOV movdqa |
| 95 |
#define AND pand |
| 96 |
#define ANDN pandn |
| 97 |
#define OR por |
| 98 |
#define XOR pxor |
| 99 |
/* disp8 ���������������������Adisp32 �����������K�v���� */ |
| 100 |
#define OX 0x100000 |
| 101 |
|
| 102 |
#define AX %rax |
| 103 |
#define CNT %ax |
| 104 |
#define KSI %dl |
| 105 |
#define PARAM %rdx |
| 106 |
#define TX %rbx |
| 107 |
#define K %rcx |
| 108 |
#define T /* +16 */ %rsi |
| 109 |
#define LR /* +16 */ %rbp |
| 110 |
#define RL /* +16 */ %rdi |
| 111 |
#define H %rcx |
| 112 |
|
| 113 |
#else /* XMM */ |
| 114 |
|
| 115 |
#define USE_T 1 /* TLS �����������������c */ |
| 116 |
|
| 117 |
.if 0 // to suppress code generated |
| 118 |
#include "x86-sse.S" |
| 119 |
.endif |
| 120 |
#undef K |
| 121 |
#undef tmp_at |
| 122 |
#ifdef USE_REGPARM |
| 123 |
#define tmp_at(n) param64+W*(((n+16)&31)+64) |
| 124 |
#else /* USE_REGPARM */ |
| 125 |
#define tmp_at(n) _param64+W*(((n+16)&31)+64) |
| 126 |
#endif /* USE_REGPARM */ |
| 127 |
|
| 128 |
#define PTR_T .long |
| 129 |
|
| 130 |
#define W 16 |
| 131 |
#define O 4 |
| 132 |
#define R(n) %xmm##n |
| 133 |
#define MOV movdqa |
| 134 |
#define AND pand |
| 135 |
#define ANDN pandn |
| 136 |
#define OR por |
| 137 |
#define XOR pxor |
| 138 |
/* disp8 ���������������������Adisp32 �����������K�v���� */ |
| 139 |
#define OX 0x100000 |
| 140 |
|
| 141 |
#define AX %eax |
| 142 |
#define CNT %ax |
| 143 |
#define KSI %dl |
| 144 |
#define PARAM %edx |
| 145 |
#define TX %ebx |
| 146 |
#define K %ecx |
| 147 |
#define T /* +16 */ %esi |
| 148 |
#define LR /* +16 */ %ebp |
| 149 |
#define RL /* +16 */ %edi |
| 150 |
#define H %ecx |
| 151 |
|
| 152 |
#endif |
| 153 |
|
| 154 |
#ifndef WK |
| 155 |
#if USE_T |
| 156 |
#define WK(n) W*(n)(T) |
| 157 |
#endif |
| 158 |
#endif |
| 159 |
|
| 160 |
#define LOADI(v,n,d,s) MOV W*((s)-16)+OX(LR),R(d);Ln##n: |
| 161 |
#define XORK(v,n,d,s) XOR W*s(K),R(d) |
| 162 |
|
| 163 |
#define RMOVE(v,n,d,s) MOV R(s),R(d) |
| 164 |
#define RANDN(v,n,d,s) ANDN R(s),R(d) |
| 165 |
#define RAND(v,n,d,s) AND R(s),R(d) |
| 166 |
#define ROR(v,n,d,s) OR R(s),R(d) |
| 167 |
#define RXOR(v,n,d,s) XOR R(s),R(d) |
| 168 |
|
| 169 |
#define NOP |
| 170 |
#define RXORN RXOR |
| 171 |
#define MXORN MXOR |
| 172 |
|
| 173 |
#define XORR(v,n,d,s) XOR W*((s)-16)(RL),R(d) |
| 174 |
#define STORR(v,n,d,s) MOV R(d),W*((s)-16)(RL) |
| 175 |
|
| 176 |
#define MXORR(v,n,d,s) XOR R(d),W*((s)-16)(RL) |
| 177 |
|
| 178 |
#define STORE(v,n,d,o) MOV R(d),WK((o)-16) |
| 179 |
|
| 180 |
#define MMOVE(v,n,d,o) MOV WK((o)-16),R(d) |
| 181 |
#define MXOR(v,n,d,o) XOR WK((o)-16),R(d) |
| 182 |
#define MOR(v,n,d,o) OR WK((o)-16),R(d) |
| 183 |
#define MANDN(v,n,d,o) ANDN WK((o)-16),R(d) |
| 184 |
#define MAND(v,n,d,o) AND WK((o)-16),R(d) |
| 185 |
|
| 186 |
#ifdef NOT |
| 187 |
#define RNOT(v,n,d,x) not R(d) |
| 188 |
#else |
| 189 |
#define RNOT(v,n,d,x) XOR (T),R(d) |
| 190 |
#endif |
| 191 |
|
| 192 |
// (ecx, edx) |
| 193 |
|
| 194 |
// ecx k |
| 195 |
// edx t+0 |
| 196 |
// esi t+16 |
| 197 |
// ebp lr+16 |
| 198 |
// edi rl+16 |
| 199 |
|
| 200 |
.text |
| 201 |
.align 16 |
| 202 |
.globl crypt64_desc |
| 203 |
crypt64_desc: |
| 204 |
.globl _crypt64_desc |
| 205 |
_crypt64_desc: |
| 206 |
PTR_T crypt64_pro |
| 207 |
PTR_T crypt64_crypt |
| 208 |
PTR_T crypt64_cmp_pro |
| 209 |
PTR_T crypt64_cmp_ep |
| 210 |
PTR_T crypt64_ep |
| 211 |
PTR_T crypt64_ep_end |
| 212 |
.byte 16/W |
| 213 |
.byte O |
| 214 |
.word (loe-los)/(2*48) |
| 215 |
|
| 216 |
/* disp(%reg) disp �����|�C���^ */ |
| 217 |
#define LOF(n) (Ln##n-crypt64_crypt-O) |
| 218 |
los: |
| 219 |
.word LOF(00),LOF(10),LOF(20),LOF(30),LOF(40),LOF(50) |
| 220 |
.word LOF(01),LOF(11),LOF(21),LOF(31),LOF(41),LOF(51) |
| 221 |
.word LOF(02),LOF(12),LOF(22),LOF(32),LOF(42),LOF(52) |
| 222 |
.word LOF(03),LOF(13),LOF(23),LOF(33),LOF(43),LOF(53) |
| 223 |
.word LOF(04),LOF(14),LOF(24),LOF(34),LOF(44),LOF(54) |
| 224 |
.word LOF(05),LOF(15),LOF(25),LOF(35),LOF(45),LOF(55) |
| 225 |
.word LOF(06),LOF(16),LOF(26),LOF(36),LOF(46),LOF(56) |
| 226 |
.word LOF(07),LOF(17),LOF(27),LOF(37),LOF(47),LOF(57) |
| 227 |
loe: |
| 228 |
.align 64 |
| 229 |
|
| 230 |
.globl _crypt64 |
| 231 |
_crypt64: |
| 232 |
crypt64_pro: |
| 233 |
push LR |
| 234 |
push RL |
| 235 |
push TX |
| 236 |
push T |
| 237 |
#ifdef USE_64 |
| 238 |
push %rbx |
| 239 |
push %r12 |
| 240 |
push %r13 |
| 241 |
push %r14 |
| 242 |
push %r15 |
| 243 |
sub $0x110,%rsp |
| 244 |
#endif |
| 245 |
|
| 246 |
#if 0&&DEBUG>=1 |
| 247 |
rdtsc |
| 248 |
#endif |
| 249 |
push AX // t0 ���������_�~�[ |
| 250 |
|
| 251 |
crypt64_crypt: |
| 252 |
|
| 253 |
// �����l�N���A |
| 254 |
// �����h������������������ memset(3) �g�������S����(��) |
| 255 |
XOR R(7),R(7) |
| 256 |
mov $-W*64,AX |
| 257 |
clr_lr: |
| 258 |
MOV R(7),W*64(PARAM,AX) |
| 259 |
add $W,AX |
| 260 |
jne clr_lr |
| 261 |
|
| 262 |
// �����������t���n�����B���[�����������B |
| 263 |
lea W*(16)(PARAM),LR /* XXX */ |
| 264 |
lea W*(32+16)(PARAM),RL |
| 265 |
#ifndef USE_64 |
| 266 |
lea W*(64+16)(PARAM),T |
| 267 |
#endif |
| 268 |
|
| 269 |
movb $0,KSI |
| 270 |
movw $16*25,CNT |
| 271 |
|
| 272 |
.align 64 |
| 273 |
loo: |
| 274 |
add W*2*56(K),K |
| 275 |
|
| 276 |
#ifdef S1 |
| 277 |
|
| 278 |
#if USE_T |
| 279 |
#undef tmp_at |
| 280 |
#define tmp_at(n) W*(n)(T) |
| 281 |
#endif |
| 282 |
|
| 283 |
#undef pnot |
| 284 |
#define pnot (T) |
| 285 |
|
| 286 |
#ifdef a6_p |
| 287 |
#define S_1(o1,o2,o3,o4) S1(o1,o2,o3,o4,) |
| 288 |
#define S_2(o1,o2,o3,o4) S2(o1,o2,o3,o4,) |
| 289 |
#define S_3(o1,o2,o3,o4) S3(o1,o2,o3,o4,) |
| 290 |
#define S_4(o1,o2,o3,o4) S4(o1,o2,o3,o4,) |
| 291 |
#define S_5(o1,o2,o3,o4) S5(o1,o2,o3,o4,) |
| 292 |
#define S_6(o1,o2,o3,o4) S6(o1,o2,o3,o4,) |
| 293 |
#define S_7(o1,o2,o3,o4) S7(o1,o2,o3,o4,) |
| 294 |
#define S_8(o1,o2,o3,o4) S8(o1,o2,o3,o4,) |
| 295 |
#else |
| 296 |
#define S_1(o1,o2,o3,o4) S1(o1,o2,o3,o4) |
| 297 |
#define S_2(o1,o2,o3,o4) S2(o1,o2,o3,o4) |
| 298 |
#define S_3(o1,o2,o3,o4) S3(o1,o2,o3,o4) |
| 299 |
#define S_4(o1,o2,o3,o4) S4(o1,o2,o3,o4) |
| 300 |
#define S_5(o1,o2,o3,o4) S5(o1,o2,o3,o4) |
| 301 |
#define S_6(o1,o2,o3,o4) S6(o1,o2,o3,o4) |
| 302 |
#define S_7(o1,o2,o3,o4) S7(o1,o2,o3,o4) |
| 303 |
#define S_8(o1,o2,o3,o4) S8(o1,o2,o3,o4) |
| 304 |
#endif |
| 305 |
|
| 306 |
#undef B |
| 307 |
#define B(n) W*(n-16)(RL) |
| 308 |
|
| 309 |
#define EK(e,k1,k2,k3,k4,k5,k6) \ |
| 310 |
LOADI(1,0##e,0,(31&(4*(e)-1))); XORK(1,0##e,0,k1); \ |
| 311 |
LOADI(2,1##e,1,(31&(4*(e)+0))); XORK(2,1##e,1,k2); \ |
| 312 |
LOADI(3,2##e,2,(31&(4*(e)+1))); XORK(3,2##e,2,k3); \ |
| 313 |
LOADI(4,3##e,3,(31&(4*(e)+2))); XORK(4,3##e,3,k4); \ |
| 314 |
LOADI(5,4##e,4,(31&(4*(e)+3))); XORK(5,4##e,4,k5); \ |
| 315 |
LOADI(6,5##e,5,(31&(4*(e)+4))); XORK(6,5##e,5,k6) |
| 316 |
|
| 317 |
/* use John */ |
| 318 |
EK(0,13,16,10,23,0,4);S_1(B(8),B(16),B(22),B(30)) |
| 319 |
EK(1,2,27,14,5,20,9);S_2(B(12),B(27),B(1),B(17)) |
| 320 |
EK(2,22,18,11,3,25,7);S_3(B(23),B(15),B(29),B(5)) |
| 321 |
EK(3,15,6,26,19,12,1);S_4(B(25),B(19),B(9),B(0)) |
| 322 |
EK(4,68,79,58,64,74,82);S_5(B(7),B(13),B(24),B(2)) |
| 323 |
EK(5,57,67,78,72,60,75);S_6(B(3),B(28),B(10),B(18)) |
| 324 |
EK(6,71,76,66,83,61,80);S_7(B(31),B(11),B(21),B(6)) |
| 325 |
EK(7,73,69,77,63,56,59);S_8(B(4),B(26),B(14),B(20)) |
| 326 |
|
| 327 |
#elif defined(USE_64_XMM) |
| 328 |
#include "x64-xmm.inc" |
| 329 |
#elif defined(USE_64) |
| 330 |
#include "x64-alu.inc" |
| 331 |
#else |
| 332 |
#include "crypt64.inc" |
| 333 |
#endif |
| 334 |
|
| 335 |
mov LR,TX |
| 336 |
addb $(256/16),KSI |
| 337 |
cmovne RL,LR |
| 338 |
cmovne TX,RL |
| 339 |
decw CNT |
| 340 |
jne loo |
| 341 |
|
| 342 |
// ���i���������|�C���^���������� |
| 343 |
lea W*(32+16)(LR),H |
| 344 |
|
| 345 |
// �������������������}�� |
| 346 |
// �������������R�[�h���W�J���������L���`���������������B |
| 347 |
lea W*(-16)(RL),PARAM |
| 348 |
lea W*(-16)(H),T |
| 349 |
|
| 350 |
crypt64_cmp_pro: |
| 351 |
|
| 352 |
#if 0&&DEBUG>=1 |
| 353 |
// ���r�����v�� |
| 354 |
rdtsc |
| 355 |
push AX // t1 |
| 356 |
#endif |
| 357 |
// �������������������v�������������A���������B |
| 358 |
lea W*(16)(PARAM),LR |
| 359 |
lea W*(32+16)(PARAM),RL |
| 360 |
lea W*(64+16)(PARAM),T |
| 361 |
|
| 362 |
// ���r�������������������������� |
| 363 |
|
| 364 |
crypt64_cmp_ep: |
| 365 |
|
| 366 |
crypt64_ep: |
| 367 |
|
| 368 |
pop LR // t1 ���������_�~�[ |
| 369 |
|
| 370 |
#if 0&&DEBUG>=1 |
| 371 |
// �v�����W�v |
| 372 |
rdtsc // t2 |
| 373 |
mov AX,PARAM |
| 374 |
sub LR,PARAM // t2 - t1 |
| 375 |
mov LR,AX |
| 376 |
pop LR // t0 |
| 377 |
sub LR,AX // t1 - t0 |
| 378 |
#if defined(USE_64) || defined(USE_64_XMM) |
| 379 |
shl $32,PARAM |
| 380 |
and $0xFFFFFFFF,%rax |
| 381 |
or PARAM,%rax |
| 382 |
#endif |
| 383 |
#endif |
| 384 |
|
| 385 |
#ifdef USE_64 |
| 386 |
add $0x110,%rsp |
| 387 |
pop %r15 |
| 388 |
pop %r14 |
| 389 |
pop %r13 |
| 390 |
pop %r12 |
| 391 |
pop %rbx |
| 392 |
#endif |
| 393 |
pop T |
| 394 |
pop TX |
| 395 |
pop RL |
| 396 |
pop LR |
| 397 |
|
| 398 |
ret |
| 399 |
|
| 400 |
crypt64_ep_end: |
| 401 |
|
| 402 |
//EOF |
Properties
| svn:eol-style |
native
|
| svn:keywords |
Author Date Id Rev URL
|
| |