├── .gitignore
├── graphs
├── chain2a.dot
├── chain2.dot
└── chain1.dot
├── Makefile
├── chain2.svg
├── chain2a.svg
├── chain1.svg
├── csum_partial.c
├── LICENSE
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | csum_partial
--------------------------------------------------------------------------------
/graphs/chain2a.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 | node [shape=record];
3 | start -> A -> "+ B" -> "+ C" -> "+ D" -> end;
4 | start [shape=Mdiamond];
5 | end [shape=Msquare];
6 | }
7 |
8 |
--------------------------------------------------------------------------------
/graphs/chain2.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 | node [shape=record];
3 | start -> "(A + B)";
4 | start -> "(C + D)";
5 | "(A + B)" -> "+";
6 | "(C + D)" -> "+";
7 |
8 | "+" -> end;
9 |
10 | start [shape=Mdiamond];
11 | end [shape=Msquare];
12 | }
13 |
14 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | all: csum_partial chain1.svg chain2.svg chain2a.svg
2 |
3 | csum_partial: csum_partial.c Makefile
4 | gcc $(CFLAGS) -O2 -gdwarf-4 -g2 -march=skylake csum_partial.c -o csum_partial
5 |
6 |
7 | chain1.svg: graphs/chain1.dot
8 | dot -Tsvg -O graphs/chain1.dot
9 | mv graphs/chain1.dot.svg chain1.svg
10 |
11 |
12 | chain2.svg: graphs/chain2.dot
13 | dot -Tsvg -O graphs/chain2.dot
14 | mv graphs/chain2.dot.svg chain2.svg
15 |
16 |
17 | chain2a.svg: graphs/chain2a.dot
18 | dot -Tsvg -O graphs/chain2a.dot
19 | mv graphs/chain2a.dot.svg chain2a.svg
20 |
21 |
22 |
--------------------------------------------------------------------------------
/graphs/chain1.dot:
--------------------------------------------------------------------------------
1 | digraph G {
2 | node [shape=record];
3 | subgraph cluster_0 {
4 | node [style=filled];
5 | "add (%rdi),%rdx" -> "adc 0x8(%rdi),%rdx" -> "adc 0x10(%rdi),%rdx" -> "adc 0x18(%rdi),%rdx" -> "adc 0x20(%rdi),%rdx";
6 | label = "ASM block";
7 | }
8 |
9 | subgraph cluster_1 {
10 | node [style=filled];
11 | "mov %rdx,%rax" -> "shr $0x20,%rax"-> "add %edx,%eax" -> "adc $0x0,%eax";
12 | label = "add32_with_carry";
13 | color=blue;
14 | }
15 | start -> "add (%rdi),%rdx";
16 | "adc 0x20(%rdi),%rdx" -> "mov %rdx,%rax";
17 | "adc $0x0,%eax" -> end;
18 |
19 | start [shape=Mdiamond];
20 | end [shape=Msquare];
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/chain2.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
82 |
--------------------------------------------------------------------------------
/chain2a.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
88 |
--------------------------------------------------------------------------------
/chain1.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
7 |
158 |
--------------------------------------------------------------------------------
/csum_partial.c:
--------------------------------------------------------------------------------
1 | // SPDX-License-Identifier: GPL-2.0
2 | /*
3 | * arch/x86_64/lib/csum-partial.c
4 | *
5 | * This file contains network checksum routines that are better done
6 | * in an architecture-specific manner due to speed.
7 | */
8 |
9 |
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include
15 |
16 | typedef uint32_t __wsum;
17 | typedef uint64_t u64;
18 | typedef uint32_t u32;
19 | # define unlikely(x) __builtin_expect(!!(x), 0)
20 |
21 | #define LOOPCOUNT 102400
22 | #define PACKETSIZE 40
23 |
24 | static inline unsigned long load_unaligned_zeropad(const void *addr)
25 | {
26 | unsigned long ret, dummy;
27 |
28 | asm(
29 | "1:\tmov %2,%0\n"
30 | "2:\n"
31 | ".section .fixup,\"ax\"\n"
32 | "3:\t"
33 | "lea %2,%1\n\t"
34 | "and %3,%1\n\t"
35 | "mov (%1),%0\n\t"
36 | "leal %2,%%ecx\n\t"
37 | "andl %4,%%ecx\n\t"
38 | "shll $3,%%ecx\n\t"
39 | "shr %%cl,%0\n\t"
40 | "jmp 2b\n"
41 | ".previous\n"
42 | :"=&r" (ret),"=&c" (dummy)
43 | :"m" (*(unsigned long *)addr),
44 | "i" (-sizeof(unsigned long)),
45 | "i" (sizeof(unsigned long)-1));
46 | return ret;
47 | }
48 |
49 | static inline unsigned add32_with_carry(unsigned a, unsigned b)
50 | {
51 | asm("addl %2,%0\n\t"
52 | "adcl $0,%0"
53 | : "=r" (a)
54 | : "0" (a), "rm" (b));
55 | return a;
56 | }
57 |
58 | static inline unsigned short from32to16(unsigned a)
59 | {
60 | unsigned short b = a >> 16;
61 | asm("addw %w2,%w0\n\t"
62 | "adcw $0,%w0\n"
63 | : "=r" (b)
64 | : "0" (b), "r" (a));
65 | return b;
66 | }
67 |
68 | /*
69 | * Do a checksum on an arbitrary memory area.
70 | * Returns a 32bit checksum.
71 | *
72 | * This isn't as time critical as it used to be because many NICs
73 | * do hardware checksumming these days.
74 | *
75 | * Still, with CHECKSUM_COMPLETE this is called to compute
76 | * checksums on IPv6 headers (40 bytes) and other small parts.
77 | * it's best to have buff aligned on a 64-bit boundary
78 | */
79 | __wsum csum_partial(const void *buff, int len, __wsum sum)
80 | {
81 | u64 temp64 = (u64)sum;
82 | unsigned odd, result;
83 |
84 | odd = 1 & (unsigned long) buff;
85 | if (unlikely(odd)) {
86 | if (unlikely(len == 0))
87 | return sum;
88 | temp64 += (*(unsigned char *)buff << 8);
89 | len--;
90 | buff++;
91 | }
92 |
93 | while (unlikely(len >= 64)) {
94 | asm("addq 0*8(%[src]),%[res]\n\t"
95 | "adcq 1*8(%[src]),%[res]\n\t"
96 | "adcq 2*8(%[src]),%[res]\n\t"
97 | "adcq 3*8(%[src]),%[res]\n\t"
98 | "adcq 4*8(%[src]),%[res]\n\t"
99 | "adcq 5*8(%[src]),%[res]\n\t"
100 | "adcq 6*8(%[src]),%[res]\n\t"
101 | "adcq 7*8(%[src]),%[res]\n\t"
102 | "adcq $0,%[res]"
103 | : [res] "+r" (temp64)
104 | : [src] "r" (buff)
105 | : "memory");
106 | buff += 64;
107 | len -= 64;
108 | }
109 |
110 | if (len & 32) {
111 | asm("addq 0*8(%[src]),%[res]\n\t"
112 | "adcq 1*8(%[src]),%[res]\n\t"
113 | "adcq 2*8(%[src]),%[res]\n\t"
114 | "adcq 3*8(%[src]),%[res]\n\t"
115 | "adcq $0,%[res]"
116 | : [res] "+r" (temp64)
117 | : [src] "r" (buff)
118 | : "memory");
119 | buff += 32;
120 | }
121 | if (len & 16) {
122 | asm("addq 0*8(%[src]),%[res]\n\t"
123 | "adcq 1*8(%[src]),%[res]\n\t"
124 | "adcq $0,%[res]"
125 | : [res] "+r" (temp64)
126 | : [src] "r" (buff)
127 | : "memory");
128 | buff += 16;
129 | }
130 | if (len & 8) {
131 | asm("addq 0*8(%[src]),%[res]\n\t"
132 | "adcq $0,%[res]"
133 | : [res] "+r" (temp64)
134 | : [src] "r" (buff)
135 | : "memory");
136 | buff += 8;
137 | }
138 | if (len & 7) {
139 | unsigned int shift = (8 - (len & 7)) * 8;
140 | unsigned long trail;
141 |
142 | trail = (load_unaligned_zeropad(buff) << shift) >> shift;
143 |
144 | asm("addq %[trail],%[res]\n\t"
145 | "adcq $0,%[res]"
146 | : [res] "+r" (temp64)
147 | : [trail] "r" (trail));
148 | }
149 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
150 | if (unlikely(odd)) {
151 | result = from32to16(result);
152 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
153 | }
154 | return (__wsum)result;
155 | }
156 |
157 |
158 | __wsum __csum_partial(const void *buff, int len, __wsum sum)
159 | {
160 | u64 temp64 = (u64)sum;
161 | unsigned odd, result;
162 |
163 | odd = 1 & (unsigned long) buff;
164 | if (unlikely(odd)) {
165 | if (unlikely(len == 0))
166 | return sum;
167 | temp64 += (*(unsigned char *)buff << 8);
168 | len--;
169 | buff++;
170 | }
171 |
172 | while (unlikely(len >= 64)) {
173 | asm("addq 0*8(%[src]),%[res]\n\t"
174 | "adcq 1*8(%[src]),%[res]\n\t"
175 | "adcq 2*8(%[src]),%[res]\n\t"
176 | "adcq 3*8(%[src]),%[res]\n\t"
177 | "adcq 4*8(%[src]),%[res]\n\t"
178 | "adcq 5*8(%[src]),%[res]\n\t"
179 | "adcq 6*8(%[src]),%[res]\n\t"
180 | "adcq 7*8(%[src]),%[res]\n\t"
181 | "adcq $0,%[res]"
182 | : [res] "+r" (temp64)
183 | : [src] "r" (buff)
184 | : "memory");
185 | buff += 64;
186 | len -= 64;
187 | }
188 |
189 | if (len & 32) {
190 | asm("addq 0*8(%[src]),%[res]\n\t"
191 | "adcq 1*8(%[src]),%[res]\n\t"
192 | "adcq 2*8(%[src]),%[res]\n\t"
193 | "adcq 3*8(%[src]),%[res]\n\t"
194 | "adcq $0,%[res]"
195 | : [res] "+r" (temp64)
196 | : [src] "r" (buff)
197 | : "memory");
198 | buff += 32;
199 | }
200 | if (len & 16) {
201 | asm("addq 0*8(%[src]),%[res]\n\t"
202 | "adcq 1*8(%[src]),%[res]\n\t"
203 | "adcq $0,%[res]"
204 | : [res] "+r" (temp64)
205 | : [src] "r" (buff)
206 | : "memory");
207 | buff += 16;
208 | }
209 | if (len & 8) {
210 | asm("addq 0*8(%[src]),%[res]\n\t"
211 | "adcq $0,%[res]"
212 | : [res] "+r" (temp64)
213 | : [src] "r" (buff)
214 | : "memory");
215 | buff += 8;
216 | }
217 | if (len & 7) {
218 | unsigned int shift = (8 - (len & 7)) * 8;
219 | unsigned long trail;
220 |
221 | trail = (load_unaligned_zeropad(buff) << shift) >> shift;
222 |
223 | asm("addq %[trail],%[res]\n\t"
224 | "adcq $0,%[res]"
225 | : [res] "+r" (temp64)
226 | : [trail] "r" (trail));
227 | }
228 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
229 | if (unlikely(odd)) {
230 | result = from32to16(result);
231 | result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
232 | }
233 | return (__wsum)result;
234 | }
235 |
236 |
237 | __wsum csum_partial40_no_odd(const void *buff, int len, __wsum sum)
238 | {
239 | u64 temp64 = (u64)sum;
240 | unsigned result;
241 |
242 | while (unlikely(len >= 64)) {
243 | asm("addq 0*8(%[src]),%[res]\n\t"
244 | "adcq 1*8(%[src]),%[res]\n\t"
245 | "adcq 2*8(%[src]),%[res]\n\t"
246 | "adcq 3*8(%[src]),%[res]\n\t"
247 | "adcq 4*8(%[src]),%[res]\n\t"
248 | "adcq 5*8(%[src]),%[res]\n\t"
249 | "adcq 6*8(%[src]),%[res]\n\t"
250 | "adcq 7*8(%[src]),%[res]\n\t"
251 | "adcq $0,%[res]"
252 | : [res] "+r" (temp64)
253 | : [src] "r" (buff)
254 | : "memory");
255 | buff += 64;
256 | len -= 64;
257 | }
258 |
259 | if (len & 32) {
260 | asm("addq 0*8(%[src]),%[res]\n\t"
261 | "adcq 1*8(%[src]),%[res]\n\t"
262 | "adcq 2*8(%[src]),%[res]\n\t"
263 | "adcq 3*8(%[src]),%[res]\n\t"
264 | "adcq $0,%[res]"
265 | : [res] "+r" (temp64)
266 | : [src] "r" (buff)
267 | : "memory");
268 | buff += 32;
269 | }
270 | if (len & 16) {
271 | asm("addq 0*8(%[src]),%[res]\n\t"
272 | "adcq 1*8(%[src]),%[res]\n\t"
273 | "adcq $0,%[res]"
274 | : [res] "+r" (temp64)
275 | : [src] "r" (buff)
276 | : "memory");
277 | buff += 16;
278 | }
279 | if (len & 8) {
280 | asm("addq 0*8(%[src]),%[res]\n\t"
281 | "adcq $0,%[res]"
282 | : [res] "+r" (temp64)
283 | : [src] "r" (buff)
284 | : "memory");
285 | buff += 8;
286 | }
287 | if (len & 7) {
288 | unsigned int shift = (8 - (len & 7)) * 8;
289 | unsigned long trail;
290 |
291 | trail = (load_unaligned_zeropad(buff) << shift) >> shift;
292 |
293 | asm("addq %[trail],%[res]\n\t"
294 | "adcq $0,%[res]"
295 | : [res] "+r" (temp64)
296 | : [trail] "r" (trail));
297 | }
298 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
299 | return (__wsum)result;
300 | }
301 |
302 |
303 | __wsum csum_partial40_dead_code(const void *buff, int len, __wsum sum)
304 | {
305 | u64 temp64 = (u64)sum;
306 | unsigned result;
307 |
308 | asm("addq 0*8(%[src]),%[res]\n\t"
309 | "adcq 1*8(%[src]),%[res]\n\t"
310 | "adcq 2*8(%[src]),%[res]\n\t"
311 | "adcq 3*8(%[src]),%[res]\n\t"
312 | "adcq 4*8(%[src]),%[res]\n\t"
313 | "adcq $0,%[res]"
314 | : [res] "+r" (temp64)
315 | : [src] "r" (buff)
316 | : "memory");
317 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
318 |
319 | return (__wsum)result;
320 | }
321 |
322 | __wsum csum_partial40_ACX(const void *buff, int len, __wsum sum)
323 | {
324 | u64 temp64 = (u64)sum;
325 | unsigned result;
326 |
327 | /*
328 | * the xorq not only zeroes r9, it also clears CF and OF so that
329 | * the first adcx/adox work as expected getting no input carry
330 | * while setting the output carry in the correct flags
331 | */
332 | asm("xorq %%r9, %%r9 \n\t"
333 | "movq 0*8(%[src]), %%rcx \n\t"
334 | "adcx 1*8(%[src]), %%rcx \n\t"
335 | "adcx 2*8(%[src]), %%rcx \n\t"
336 | "adcx 3*8(%[src]), %%rcx \n\t"
337 | "adcx %%r9, %%rcx \n\t"
338 | "adox 4*8(%[src]), %[res]\n\t"
339 | "adox %%rcx, %[res]\n\t"
340 | "adox %%r9, %[res]"
341 | : [res] "+d" (temp64)
342 | : [src] "r" (buff)
343 | : "memory", "rcx", "r9");
344 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
345 |
346 | return (__wsum)result;
347 | }
348 |
349 | __wsum csum_partial40_2_streams(const void *buff, int len, __wsum sum)
350 | {
351 | u64 temp64 = (u64)sum;
352 | unsigned result;
353 |
354 | asm("xorq %%r9, %%r9\n\t"
355 | "movq 0*8(%[src]),%%rcx\n\t"
356 | "addq 1*8(%[src]),%%rcx\n\t"
357 | "adcq 2*8(%[src]),%%rcx\n\t"
358 | "adcq 3*8(%[src]),%%rcx\n\t"
359 | "adcq %%r9, %%rcx\n\t"
360 | "addq 4*8(%[src]),%[res]\n\t"
361 | "adcq %%rcx,%[res]\n\t"
362 | "adcq %%r9,%[res]"
363 | : [res] "+r" (temp64)
364 | : [src] "r" (buff)
365 | : "memory", "rcx", "r9");
366 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
367 |
368 | return (__wsum)result;
369 | }
370 |
371 | __wsum csum_partial40_zero_sum(const void *buff, int len, __wsum sum)
372 | {
373 | u64 temp64 = (u64)sum;
374 | unsigned result;
375 |
376 | asm("movq 0*8(%[src]),%%rcx\n\t"
377 | "addq 1*8(%[src]),%%rcx\n\t"
378 | "adcq 2*8(%[src]),%%rcx\n\t"
379 | "adcq $0, %%rcx\n\t"
380 | "movq 3*8(%[src]),%[res]\n\t"
381 | "addq 4*8(%[src]),%[res]\n\t"
382 | "adcq %%rcx,%[res]\n\t"
383 | "adcq $0,%[res]"
384 | : [res] "=&r" (temp64)
385 | : [src] "r" (buff)
386 | : "memory", "rcx");
387 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
388 |
389 | return (__wsum)result;
390 | }
391 |
392 | __wsum csum_partial40_32bit(const void *buff, int len, __wsum sum)
393 | {
394 | __wsum temp32 = sum;
395 |
396 | asm("movl 0*4(%[src]), %%r9d \n\t"
397 | "movl 1*4(%[src]), %%r11d \n\t"
398 | "movl 2*4(%[src]), %%ecx \n\t"
399 |
400 | "addl 3*4(%[src]), %%r9d \n\t"
401 | "adcl 4*4(%[src]), %%r9d \n\t"
402 | "adcl $0, %%r9d \n\t"
403 |
404 | "addl 5*4(%[src]), %%r11d \n\t"
405 | "adcl 6*4(%[src]), %%r11d \n\t"
406 | "adcl $0, %%r11d \n\t"
407 |
408 | "addl 7*4(%[src]), %%ecx \n\t"
409 | "adcl 8*4(%[src]), %%ecx \n\t"
410 | "adcl $0, %%ecx \n\t"
411 |
412 | "addl 9*4(%[src]), %%edx \n\t"
413 | "adcl %%r9d, %%edx \n\t"
414 | "adcl %%r11d, %%edx \n\t"
415 | "adcl %%ecx, %%edx \n\t"
416 | "adcl $0, %%edx \n\t"
417 | : [res] "+d" (temp32)
418 | : [src] "r" (buff)
419 | : "memory", "rcx", "r9", "r11");
420 | return temp32;
421 | }
422 |
423 | static inline __wsum csum_partial_no_odd(const void *buff, int len, __wsum sum)
424 | {
425 | if (__builtin_constant_p(len) && len == 40) {
426 | return csum_partial40_no_odd(buff, len, sum);
427 | } else {
428 | return __csum_partial(buff, len, sum);
429 | }
430 | }
431 |
432 | static inline __wsum csum_specialized(const void *buff, int len, __wsum sum)
433 | {
434 | if (__builtin_constant_p(len) && len == 40) {
435 | return __csum_partial(buff, len, sum);
436 | } else {
437 | return __csum_partial(buff, len, sum);
438 | }
439 | }
440 |
441 | static inline __wsum csum_partial_dead_code(const void *buff, int len, __wsum sum)
442 | {
443 | if (__builtin_constant_p(len) && len == 40) {
444 | return csum_partial40_dead_code(buff, len, sum);
445 | } else {
446 | return __csum_partial(buff, len, sum);
447 | }
448 | }
449 | static inline __wsum csum_partial_ACX(const void *buff, int len, __wsum sum)
450 | {
451 | if (__builtin_constant_p(len) && len == 40) {
452 | return csum_partial40_ACX(buff, len, sum);
453 | } else {
454 | return __csum_partial(buff, len, sum);
455 | }
456 | }
457 | static inline __wsum csum_partial_2_streams(const void *buff, int len, __wsum sum)
458 | {
459 | if (__builtin_constant_p(len) && len == 40) {
460 | return csum_partial40_2_streams(buff, len, sum);
461 | } else {
462 | return __csum_partial(buff, len, sum);
463 | }
464 | }
465 | static inline __wsum csum_partial_32bit(const void *buff, int len, __wsum sum)
466 | {
467 | if (__builtin_constant_p(len) && len == 40) {
468 | return csum_partial40_32bit(buff, len, sum);
469 | } else {
470 | return __csum_partial(buff, len, sum);
471 | }
472 | }
473 |
474 | static inline __wsum csum_partial_zero_sum(const void *buff, int len, __wsum sum)
475 | {
476 | if (__builtin_constant_p(len) && len == 40) {
477 | return csum_partial40_zero_sum(buff, len, sum);
478 | } else {
479 | return __csum_partial(buff, len, sum);
480 | }
481 | }
482 |
483 | static inline __wsum nulltest(const void *buff, int len, __wsum sum)
484 | {
485 | return 2;
486 | }
487 |
488 |
489 | double cycles[64];
490 | int cyclecount[64];
491 | double cycles2[64];
492 | int cyclecount2[64];
493 | __wsum sum[64];
494 | char *names[64];
495 |
496 | void reset_data(void)
497 | {
498 | memset(cycles, 0, sizeof(cycles));
499 | memset(cyclecount, 0, sizeof(cyclecount));
500 | memset(names, 0, sizeof(names));
501 | }
502 |
503 | void decay_data(void)
504 | {
505 | int i;
506 | for (i = 0; i < 64; i++) {
507 |
508 | if (cyclecount[i] > 1024) {
509 | cyclecount[i] /= 2;
510 | cycles[i] /= 2.0;
511 | }
512 | }
513 | for (i = 0; i < 64; i++) {
514 |
515 | if (cyclecount2[i] > 1024) {
516 | cyclecount2[i] /= 2;
517 | cycles2[i] /= 2.0;
518 | }
519 | }
520 | }
521 |
522 | #define MEASURE(index, func, name) \
523 | sum[index] = 0; \
524 | start = __builtin_ia32_rdtscp(&A); \
525 | for (i = 0; i < LOOPCOUNT; i++) \
526 | sum[index] = func(buffer + 2 * i, PACKETSIZE, sum[index]); \
527 | end = __builtin_ia32_rdtscp(&A); \
528 | cycles[index] += 1.0 * (end - start)/LOOPCOUNT; \
529 | cyclecount[index]++; \
530 | names[index] = name; \
531 | sum[index+1] = 0; \
532 | start = __builtin_ia32_rdtscp(&A); \
533 | for (i = 0; i < LOOPCOUNT; i++) \
534 | sum[index+1] = func(buffer+1 + 2 * i, PACKETSIZE, sum[index+1]);\
535 | end = __builtin_ia32_rdtscp(&A); \
536 | cycles[index+1] += 1.0 * (end - start)/LOOPCOUNT; \
537 | cyclecount[index+1]++; \
538 | names[index+1] = name; \
539 | \
540 | sum[index] = 0; \
541 | start = __builtin_ia32_rdtscp(&A); \
542 | for (i = 0; i < LOOPCOUNT; i++) { \
543 | asm volatile ("lfence\n\t.align 64\n\t" : : : "memory", "rcx"); \
544 | sum[index] = func(buffer + 2 * i, PACKETSIZE, sum[index]); \
545 | asm volatile (".align 64\n\tlfence\n\t" : : : "memory", "rcx"); \
546 | } \
547 | end = __builtin_ia32_rdtscp(&A); \
548 | cycles2[index] += 1.0 * (end - start)/LOOPCOUNT; \
549 | cyclecount2[index]++; \
550 | names[index] = name; \
551 | sum[index+1] = 0; \
552 | start = __builtin_ia32_rdtscp(&A); \
553 | for (i = 0; i < LOOPCOUNT; i++) { \
554 | asm volatile ("lfence\n\t.align 64\n\t" : : : "memory", "rcx"); \
555 | sum[index+1] = func(buffer+1 + 2 * i, PACKETSIZE, sum[index+1]);\
556 | asm volatile (".align 64\n\tlfence\n\t" : : : "memory", "rcx"); \
557 | } \
558 | end = __builtin_ia32_rdtscp(&A); \
559 | cycles2[index+1] += 1.0 * (end - start)/LOOPCOUNT; \
560 | cyclecount2[index+1]++; \
561 | names[index+1] = name; \
562 |
563 |
564 |
565 | static void report(void)
566 | {
567 | static time_t prevtime;
568 | int i;
569 |
570 | if (time(NULL) - prevtime >= 1) {
571 | printf("\033[H");
572 | for (i = 2; i < 64; i+=2) {
573 | if (names[i]) {
574 | printf("%02i:\t%5.1f / %5.1f cycles\t(%08x)\t%s \n", i, cycles[i]/cyclecount[i], cycles2[i]/cyclecount2[i] - cycles2[0]/cyclecount2[0], sum[i], names[i]);
575 | }
576 | }
577 | printf("------- odd alignment ----- \n");
578 | for (i = 3; i < 64; i+=2) {
579 | if (names[i]) {
580 | printf("%02i:\t%5.1f / %5.1f cycles\t(%08x)\t%s \n", i, cycles[i]/cyclecount[i], cycles2[i]/cyclecount2[i] - cycles2[0]/cyclecount2[0], sum[i], names[i]);
581 | }
582 | }
583 | prevtime = time(NULL);
584 | }
585 |
586 | decay_data();
587 | }
588 |
589 |
590 | int main(int argc, char **argv)
591 | {
592 | char buffer[LOOPCOUNT * 4];
593 | int i;
594 | unsigned int A;
595 | uint32_t start, end;
596 |
597 | printf("\033[H\033[J");
598 |
599 | for (i = 0; i < LOOPCOUNT * 4; i++)
600 | buffer[i] = rand() & 255;
601 |
602 | /* power management warmup */
603 | for (i = 0; i < 5000; i++) {
604 | MEASURE(0, csum_partial, "Upcoming linux kernel version");
605 | }
606 |
607 |
608 | reset_data();
609 |
610 | while (1) {
611 | MEASURE(0, nulltest, "NULL test");
612 |
613 | MEASURE(2, csum_partial, "Upcoming linux kernel version");
614 | MEASURE(4, csum_specialized, "Specialized to size 40");
615 | MEASURE(22, csum_partial_no_odd, "Odd-alignment handling removed");
616 | MEASURE(24, csum_partial_dead_code, "Dead code elimination ");
617 | MEASURE(28, csum_partial_ACX, "ADX interleaved ");
618 | MEASURE(32, csum_partial_2_streams, "Work in progress non-ADX interleave ");
619 | MEASURE(34, csum_partial_32bit, "32 bit train ");
620 | MEASURE(36, csum_partial_zero_sum, "Assume zero input sum");
621 |
622 |
623 | report();
624 | }
625 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 2, June 1991
3 |
4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6 | Everyone is permitted to copy and distribute verbatim copies
7 | of this license document, but changing it is not allowed.
8 |
9 | Preamble
10 |
11 | The licenses for most software are designed to take away your
12 | freedom to share and change it. By contrast, the GNU General Public
13 | License is intended to guarantee your freedom to share and change free
14 | software--to make sure the software is free for all its users. This
15 | General Public License applies to most of the Free Software
16 | Foundation's software and to any other program whose authors commit to
17 | using it. (Some other Free Software Foundation software is covered by
18 | the GNU Lesser General Public License instead.) You can apply it to
19 | your programs, too.
20 |
21 | When we speak of free software, we are referring to freedom, not
22 | price. Our General Public Licenses are designed to make sure that you
23 | have the freedom to distribute copies of free software (and charge for
24 | this service if you wish), that you receive source code or can get it
25 | if you want it, that you can change the software or use pieces of it
26 | in new free programs; and that you know you can do these things.
27 |
28 | To protect your rights, we need to make restrictions that forbid
29 | anyone to deny you these rights or to ask you to surrender the rights.
30 | These restrictions translate to certain responsibilities for you if you
31 | distribute copies of the software, or if you modify it.
32 |
33 | For example, if you distribute copies of such a program, whether
34 | gratis or for a fee, you must give the recipients all the rights that
35 | you have. You must make sure that they, too, receive or can get the
36 | source code. And you must show them these terms so they know their
37 | rights.
38 |
39 | We protect your rights with two steps: (1) copyright the software, and
40 | (2) offer you this license which gives you legal permission to copy,
41 | distribute and/or modify the software.
42 |
43 | Also, for each author's protection and ours, we want to make certain
44 | that everyone understands that there is no warranty for this free
45 | software. If the software is modified by someone else and passed on, we
46 | want its recipients to know that what they have is not the original, so
47 | that any problems introduced by others will not reflect on the original
48 | authors' reputations.
49 |
50 | Finally, any free program is threatened constantly by software
51 | patents. We wish to avoid the danger that redistributors of a free
52 | program will individually obtain patent licenses, in effect making the
53 | program proprietary. To prevent this, we have made it clear that any
54 | patent must be licensed for everyone's free use or not licensed at all.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | GNU GENERAL PUBLIC LICENSE
60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
61 |
62 | 0. This License applies to any program or other work which contains
63 | a notice placed by the copyright holder saying it may be distributed
64 | under the terms of this General Public License. The "Program", below,
65 | refers to any such program or work, and a "work based on the Program"
66 | means either the Program or any derivative work under copyright law:
67 | that is to say, a work containing the Program or a portion of it,
68 | either verbatim or with modifications and/or translated into another
69 | language. (Hereinafter, translation is included without limitation in
70 | the term "modification".) Each licensee is addressed as "you".
71 |
72 | Activities other than copying, distribution and modification are not
73 | covered by this License; they are outside its scope. The act of
74 | running the Program is not restricted, and the output from the Program
75 | is covered only if its contents constitute a work based on the
76 | Program (independent of having been made by running the Program).
77 | Whether that is true depends on what the Program does.
78 |
79 | 1. You may copy and distribute verbatim copies of the Program's
80 | source code as you receive it, in any medium, provided that you
81 | conspicuously and appropriately publish on each copy an appropriate
82 | copyright notice and disclaimer of warranty; keep intact all the
83 | notices that refer to this License and to the absence of any warranty;
84 | and give any other recipients of the Program a copy of this License
85 | along with the Program.
86 |
87 | You may charge a fee for the physical act of transferring a copy, and
88 | you may at your option offer warranty protection in exchange for a fee.
89 |
90 | 2. You may modify your copy or copies of the Program or any portion
91 | of it, thus forming a work based on the Program, and copy and
92 | distribute such modifications or work under the terms of Section 1
93 | above, provided that you also meet all of these conditions:
94 |
95 | a) You must cause the modified files to carry prominent notices
96 | stating that you changed the files and the date of any change.
97 |
98 | b) You must cause any work that you distribute or publish, that in
99 | whole or in part contains or is derived from the Program or any
100 | part thereof, to be licensed as a whole at no charge to all third
101 | parties under the terms of this License.
102 |
103 | c) If the modified program normally reads commands interactively
104 | when run, you must cause it, when started running for such
105 | interactive use in the most ordinary way, to print or display an
106 | announcement including an appropriate copyright notice and a
107 | notice that there is no warranty (or else, saying that you provide
108 | a warranty) and that users may redistribute the program under
109 | these conditions, and telling the user how to view a copy of this
110 | License. (Exception: if the Program itself is interactive but
111 | does not normally print such an announcement, your work based on
112 | the Program is not required to print an announcement.)
113 |
114 | These requirements apply to the modified work as a whole. If
115 | identifiable sections of that work are not derived from the Program,
116 | and can be reasonably considered independent and separate works in
117 | themselves, then this License, and its terms, do not apply to those
118 | sections when you distribute them as separate works. But when you
119 | distribute the same sections as part of a whole which is a work based
120 | on the Program, the distribution of the whole must be on the terms of
121 | this License, whose permissions for other licensees extend to the
122 | entire whole, and thus to each and every part regardless of who wrote it.
123 |
124 | Thus, it is not the intent of this section to claim rights or contest
125 | your rights to work written entirely by you; rather, the intent is to
126 | exercise the right to control the distribution of derivative or
127 | collective works based on the Program.
128 |
129 | In addition, mere aggregation of another work not based on the Program
130 | with the Program (or with a work based on the Program) on a volume of
131 | a storage or distribution medium does not bring the other work under
132 | the scope of this License.
133 |
134 | 3. You may copy and distribute the Program (or a work based on it,
135 | under Section 2) in object code or executable form under the terms of
136 | Sections 1 and 2 above provided that you also do one of the following:
137 |
138 | a) Accompany it with the complete corresponding machine-readable
139 | source code, which must be distributed under the terms of Sections
140 | 1 and 2 above on a medium customarily used for software interchange; or,
141 |
142 | b) Accompany it with a written offer, valid for at least three
143 | years, to give any third party, for a charge no more than your
144 | cost of physically performing source distribution, a complete
145 | machine-readable copy of the corresponding source code, to be
146 | distributed under the terms of Sections 1 and 2 above on a medium
147 | customarily used for software interchange; or,
148 |
149 | c) Accompany it with the information you received as to the offer
150 | to distribute corresponding source code. (This alternative is
151 | allowed only for noncommercial distribution and only if you
152 | received the program in object code or executable form with such
153 | an offer, in accord with Subsection b above.)
154 |
155 | The source code for a work means the preferred form of the work for
156 | making modifications to it. For an executable work, complete source
157 | code means all the source code for all modules it contains, plus any
158 | associated interface definition files, plus the scripts used to
159 | control compilation and installation of the executable. However, as a
160 | special exception, the source code distributed need not include
161 | anything that is normally distributed (in either source or binary
162 | form) with the major components (compiler, kernel, and so on) of the
163 | operating system on which the executable runs, unless that component
164 | itself accompanies the executable.
165 |
166 | If distribution of executable or object code is made by offering
167 | access to copy from a designated place, then offering equivalent
168 | access to copy the source code from the same place counts as
169 | distribution of the source code, even though third parties are not
170 | compelled to copy the source along with the object code.
171 |
172 | 4. You may not copy, modify, sublicense, or distribute the Program
173 | except as expressly provided under this License. Any attempt
174 | otherwise to copy, modify, sublicense or distribute the Program is
175 | void, and will automatically terminate your rights under this License.
176 | However, parties who have received copies, or rights, from you under
177 | this License will not have their licenses terminated so long as such
178 | parties remain in full compliance.
179 |
180 | 5. You are not required to accept this License, since you have not
181 | signed it. However, nothing else grants you permission to modify or
182 | distribute the Program or its derivative works. These actions are
183 | prohibited by law if you do not accept this License. Therefore, by
184 | modifying or distributing the Program (or any work based on the
185 | Program), you indicate your acceptance of this License to do so, and
186 | all its terms and conditions for copying, distributing or modifying
187 | the Program or works based on it.
188 |
189 | 6. Each time you redistribute the Program (or any work based on the
190 | Program), the recipient automatically receives a license from the
191 | original licensor to copy, distribute or modify the Program subject to
192 | these terms and conditions. You may not impose any further
193 | restrictions on the recipients' exercise of the rights granted herein.
194 | You are not responsible for enforcing compliance by third parties to
195 | this License.
196 |
197 | 7. If, as a consequence of a court judgment or allegation of patent
198 | infringement or for any other reason (not limited to patent issues),
199 | conditions are imposed on you (whether by court order, agreement or
200 | otherwise) that contradict the conditions of this License, they do not
201 | excuse you from the conditions of this License. If you cannot
202 | distribute so as to satisfy simultaneously your obligations under this
203 | License and any other pertinent obligations, then as a consequence you
204 | may not distribute the Program at all. For example, if a patent
205 | license would not permit royalty-free redistribution of the Program by
206 | all those who receive copies directly or indirectly through you, then
207 | the only way you could satisfy both it and this License would be to
208 | refrain entirely from distribution of the Program.
209 |
210 | If any portion of this section is held invalid or unenforceable under
211 | any particular circumstance, the balance of the section is intended to
212 | apply and the section as a whole is intended to apply in other
213 | circumstances.
214 |
215 | It is not the purpose of this section to induce you to infringe any
216 | patents or other property right claims or to contest validity of any
217 | such claims; this section has the sole purpose of protecting the
218 | integrity of the free software distribution system, which is
219 | implemented by public license practices. Many people have made
220 | generous contributions to the wide range of software distributed
221 | through that system in reliance on consistent application of that
222 | system; it is up to the author/donor to decide if he or she is willing
223 | to distribute software through any other system and a licensee cannot
224 | impose that choice.
225 |
226 | This section is intended to make thoroughly clear what is believed to
227 | be a consequence of the rest of this License.
228 |
229 | 8. If the distribution and/or use of the Program is restricted in
230 | certain countries either by patents or by copyrighted interfaces, the
231 | original copyright holder who places the Program under this License
232 | may add an explicit geographical distribution limitation excluding
233 | those countries, so that distribution is permitted only in or among
234 | countries not thus excluded. In such case, this License incorporates
235 | the limitation as if written in the body of this License.
236 |
237 | 9. The Free Software Foundation may publish revised and/or new versions
238 | of the General Public License from time to time. Such new versions will
239 | be similar in spirit to the present version, but may differ in detail to
240 | address new problems or concerns.
241 |
242 | Each version is given a distinguishing version number. If the Program
243 | specifies a version number of this License which applies to it and "any
244 | later version", you have the option of following the terms and conditions
245 | either of that version or of any later version published by the Free
246 | Software Foundation. If the Program does not specify a version number of
247 | this License, you may choose any version ever published by the Free Software
248 | Foundation.
249 |
250 | 10. If you wish to incorporate parts of the Program into other free
251 | programs whose distribution conditions are different, write to the author
252 | to ask for permission. For software which is copyrighted by the Free
253 | Software Foundation, write to the Free Software Foundation; we sometimes
254 | make exceptions for this. Our decision will be guided by the two goals
255 | of preserving the free status of all derivatives of our free software and
256 | of promoting the sharing and reuse of software generally.
257 |
258 | NO WARRANTY
259 |
260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
268 | REPAIR OR CORRECTION.
269 |
270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
278 | POSSIBILITY OF SUCH DAMAGES.
279 |
280 | END OF TERMS AND CONDITIONS
281 |
282 | How to Apply These Terms to Your New Programs
283 |
284 | If you develop a new program, and you want it to be of the greatest
285 | possible use to the public, the best way to achieve this is to make it
286 | free software which everyone can redistribute and change under these terms.
287 |
288 | To do so, attach the following notices to the program. It is safest
289 | to attach them to the start of each source file to most effectively
290 | convey the exclusion of warranty; and each file should have at least
291 | the "copyright" line and a pointer to where the full notice is found.
292 |
293 |
294 | Copyright (C)
295 |
296 | This program is free software; you can redistribute it and/or modify
297 | it under the terms of the GNU General Public License as published by
298 | the Free Software Foundation; either version 2 of the License, or
299 | (at your option) any later version.
300 |
301 | This program is distributed in the hope that it will be useful,
302 | but WITHOUT ANY WARRANTY; without even the implied warranty of
303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
304 | GNU General Public License for more details.
305 |
306 | You should have received a copy of the GNU General Public License along
307 | with this program; if not, write to the Free Software Foundation, Inc.,
308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
309 |
310 | Also add information on how to contact you by electronic and paper mail.
311 |
312 | If the program is interactive, make it output a short notice like this
313 | when it starts in an interactive mode:
314 |
315 | Gnomovision version 69, Copyright (C) year name of author
316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
317 | This is free software, and you are welcome to redistribute it
318 | under certain conditions; type `show c' for details.
319 |
320 | The hypothetical commands `show w' and `show c' should show the appropriate
321 | parts of the General Public License. Of course, the commands you use may
322 | be called something other than `show w' and `show c'; they could even be
323 | mouse-clicks or menu items--whatever suits your program.
324 |
325 | You should also get your employer (if you work as a programmer) or your
326 | school, if any, to sign a "copyright disclaimer" for the program, if
327 | necessary. Here is a sample; alter the names:
328 |
329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program
330 | `Gnomovision' (which makes passes at compilers) written by James Hacker.
331 |
332 | , 1 April 1989
333 | Ty Coon, President of Vice
334 |
335 | This General Public License does not permit incorporating your program into
336 | proprietary programs. If your program is a subroutine library, you may
337 | consider it more useful to permit linking proprietary applications with the
338 | library. If this is what you want to do, use the GNU Lesser General
339 | Public License instead of this License.
340 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Intro
2 |
3 | Optimizing software for performance is fun. Loads of fun. And sometimes
4 | incredibly frustrating. It's also something to do on a long intercontinental
5 | flight to keep from watching that same movie again while being incredibly
6 | bored..
7 |
8 | In this post I'm going to show the steps I went through to optimize a
9 | specific Linux kernel function, and how I got to a set of final results. As
10 | with all performance work, the path from starting point to end point isn't a
11 | straight line but rather a roundabout meandering path of discovery and
12 | experiment.
13 |
14 | The code for the framework and the various steps lives in github together
15 | with this writeup on [github](https://github.com/fenrus75/csum_partial)
16 |
17 | ## Introduction to the problem
18 |
19 | In a [recent kernel commit](https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/commit/?h=x86/core&id=d31c3c683ee668ba5d87c0730610442fd672525f),
20 | Eric Dumazet optimized the x86-64 architecture version of the `csum_partial`
21 | function.
22 | In his commit message, Eric noted that the use of this function has
23 | effectively shifted from doing checksums for whole packets (which the
24 | hardware checksums nowadays) to primarily doing a checksum for the 40 byte IPv6
25 | header. And he then provides an optimization for this function, which shows
26 | that the CPU usage of this function was significant and drops a lot with his
27 | optimization.
28 |
29 | In this writeup, I'm going to take a deeper look at this function, and
30 | see if further optimizations are possible (spoiler: they are).
31 |
32 |
33 | ## What `csum_partial` does
34 |
35 | The function calculates a 32 bit checksum of a block of data. A checksum is
36 | basically a simple addition function, but where the outgoing carry feeds
37 | back into the checksum. Because addition is a very gentle mathematical
38 | function where the order of operation is completely unimportant (addition is
39 | transitive, e.g. A + B equals B + A), this gives a few key freedoms. The
40 | most important one (and used by the current `csum_partial`) is that you can
41 | calculte a 64 bit checksum and then "fold" it into a 32 bit checksum by just
42 | adding the upper and lower 32 bits (and then adding again the remaining
43 | carry from this addition). Likewise, if one wants a 16 bit carry, you can
44 | "fold" the two halves of a 32 bit checkum together.
45 |
46 | There really are only two messy parts in `csum_partial`:
47 |
48 | * coping with the "tail" of the buffer for buffers where the size isn't a
49 | nice multiple of 8 or 4.
50 | * coping with "weird alignment" in case the address of the buffer does not
51 | start at a multiple of two.
52 |
53 |
54 |
55 | ## The optimization logistics and overall strategy
56 |
57 | For convenience and speed of development I ported Eric's `csum_partial`
58 | function to a userspace testbench. This involved providing some the basic
59 | helper functions used by the function and removing a few kernel-isms
60 | that the kernel uses to help static analysis. Neither the functionality
61 | nor the performance of the function is impacted by this port.
62 |
63 | I also added the obvious glue logic for measuring and reporting cycle count
64 | averages of a large number of loops of the functions.
65 |
66 | In terms of strategy, I'm going to focus on the statement that the 40 byte
67 | input is the most common case and will specialize the code for it.
68 |
69 |
70 | ## Performance baseline
71 |
72 | For this article, I will only be measuring buffer sizes of 40, even though
73 | the code of course has to still work correctly for arbitrary buffer sizes.
74 |
75 | | Scenario | Even aligned buffer | Odd aligned buffer |
76 | | ----------------- | ------------------- | ------------------ |
77 | | Baseline | 11.1 cycles | 19.2 cycles |
78 |
79 | Nothing much to say about a baseline, other than that the "odd aligned
80 | buffer" case is shockingly more expensive.
81 |
82 |
83 |
84 |
85 | ## First step: Specializing
86 |
87 | As a very first step, we're going to make the size of 40 bytes a special
88 | case.
89 |
90 | static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
91 | {
92 | if (__builtin_constant_p(len) && len == 40) {
93 | return __csum_partial(buff, len, sum);
94 | } else {
95 | return __csum_partial(buff, len, sum);
96 | }
97 | }
98 |
99 | In this first step, we still call the original `csum_partial()` function (now
100 | renamed to __`csum_partial`) but with an if statement that checks for a
101 | compile-time constant len paraneter of 40 (this will obviously only work in
102 | an inline function in a header).
103 |
104 | | Scenario | Even aligned buffer | Odd aligned buffer |
105 | | ----------------- | ------------------- | ------------------ |
106 | | Baseline | 11.1 cycles | 19.2 cycles |
107 | | Specialized | 11.1 cycles | 19.2 cycles |
108 |
109 | As you can see in the results table, nothing has improved yet.
110 |
111 |
112 | ## Next step: Getting rid of the "Odd alignment" handling
113 |
114 | The data shows that the handling of odd-aligned buffers is very slow. It
115 | also is going to hurt further specialization, since it means sometimes we
116 | process the buffer as 40 bytes, and sometimes as 1 + 38 + 1.
117 | So lets see how bad the performance really is in the unaligned case by just
118 | removing the special case:
119 |
120 | __wsum csum_partial40_no_odd(const void *buff, int len, __wsum sum)
121 | {
122 | u64 temp64 = (u64)sum;
123 | unsigned result;
124 |
125 | while (unlikely(len >= 64)) {
126 | asm("addq 0*8(%[src]),%[res]\n\t"
127 | "adcq 1*8(%[src]),%[res]\n\t"
128 | "adcq 2*8(%[src]),%[res]\n\t"
129 | "adcq 3*8(%[src]),%[res]\n\t"
130 | "adcq 4*8(%[src]),%[res]\n\t"
131 | "adcq 5*8(%[src]),%[res]\n\t"
132 | "adcq 6*8(%[src]),%[res]\n\t"
133 | "adcq 7*8(%[src]),%[res]\n\t"
134 | "adcq $0,%[res]"
135 | : [res] "+r" (temp64)
136 | : [src] "r" (buff)
137 | : "memory");
138 | buff += 64;
139 | len -= 64;
140 | }
141 |
142 | if (len & 32) {
143 | asm("addq 0*8(%[src]),%[res]\n\t"
144 | "adcq 1*8(%[src]),%[res]\n\t"
145 | "adcq 2*8(%[src]),%[res]\n\t"
146 | "adcq 3*8(%[src]),%[res]\n\t"
147 | "adcq $0,%[res]"
148 | : [res] "+r" (temp64)
149 | : [src] "r" (buff)
150 | : "memory");
151 | buff += 32;
152 | }
153 | if (len & 16) {
154 | asm("addq 0*8(%[src]),%[res]\n\t"
155 | "adcq 1*8(%[src]),%[res]\n\t"
156 | "adcq $0,%[res]"
157 | : [res] "+r" (temp64)
158 | : [src] "r" (buff)
159 | : "memory");
160 | buff += 16;
161 | }
162 | if (len & 8) {
163 | asm("addq 0*8(%[src]),%[res]\n\t"
164 | "adcq $0,%[res]"
165 | : [res] "+r" (temp64)
166 | : [src] "r" (buff)
167 | : "memory");
168 | buff += 8;
169 | }
170 | if (len & 7) {
171 | unsigned int shift = (8 - (len & 7)) * 8;
172 | unsigned long trail;
173 |
174 | trail = (load_unaligned_zeropad(buff) << shift) >> shift;
175 |
176 | asm("addq %[trail],%[res]\n\t"
177 | "adcq $0,%[res]"
178 | : [res] "+r" (temp64)
179 | : [trail] "r" (trail));
180 | }
181 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
182 | return (__wsum)result;
183 | }
184 |
185 |
186 | | Scenario | Even aligned buffer | Odd aligned buffer |
187 | | ----------------- | ------------------- | ------------------ |
188 | | Baseline | 11.1 cycles | 19.2 cycles |
189 | | Specialized | 11.1 cycles | 19.2 cycles |
190 | | Unaligned removed | 11.1 cycles | 11.1 cycles |
191 |
192 | Well, the data speaks for itself and shows that the special casing of the
193 | odd-aligned buffer is completely pointless and only damaging performance.
194 |
195 |
196 | ## And now: Removing dead code
197 |
198 | Now that we only ever have to deal with 40 bytes (and not 38 or 39) we can remove
199 | the while loop (for sizes >= 64), as well as the code dealing with a
200 | remainder of 16 and remainders of 7 or less. The compiler would have done this
201 | as well, so this by itself is not going to be any win. However, we can now
202 | fold the one extra "adcq" statement to deal with the "8 bytes" remaining case into
203 | the code that deals with the first 32, effectively turning the code from
204 | 32 + 8 bytes into just doing 40 bytes. This will save one key operation since after each
205 | block the remaining carry has to be folded back into the sum, and by doing
206 | this optimization we go from 2 blocks of 32 + 8 -- and thus two folding
207 | operations -- to 1 block of 40 with only one folding operation.
208 |
209 | The resulting code now looks like this:
210 |
211 | __wsum csum_partial40_dead_code(const void *buff, int len, __wsum sum)
212 | {
213 | u64 temp64 = (u64)sum;
214 | unsigned result;
215 |
216 | asm("addq 0*8(%[src]),%[res]\n\t"
217 | "adcq 1*8(%[src]),%[res]\n\t"
218 | "adcq 2*8(%[src]),%[res]\n\t"
219 | "adcq 3*8(%[src]),%[res]\n\t"
220 | "adcq 4*8(%[src]),%[res]\n\t"
221 | "adcq $0,%[res]"
222 | : [res] "+r" (temp64)
223 | : [src] "r" (buff)
224 | : "memory");
225 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
226 |
227 | return (__wsum)result;
228 | }
229 |
230 | As you can see, the code is starting to look much simpler now, small and
231 | simple enough to just be an inline function from a header definition.
232 |
233 | This is also the first time we gained some performance for the even-aligned case:
234 |
235 | | Scenario | Even aligned buffer | Odd aligned buffer |
236 | | ----------------- | ------------------- | ------------------ |
237 | | Baseline | 11.1 cycles | 19.2 cycles |
238 | | Specialized | 11.1 cycles | 19.2 cycles |
239 | | Unaligned removed | 11.1 cycles | 11.1 cycles |
240 | | Dead Code Removed | 9.1 cycles | 9.1 cycles |
241 |
242 |
243 | ## Side track: Critical chain analysis
244 |
245 | At this point it's interesting to analyze the code to see what the
246 | fundamental floor of the performance of the code will be.
247 | In the diagram below, I've drawn the essential parts of the compiler generated code (including the
248 | `add32_with_carry`) in a diagram where the arrows show the dependency graph
249 | of this code.
250 |
251 | 
252 |
253 | The Cascade Lake CPU that Eric used can execute upto 4
254 | instructions each clock cycle, but as you can see in the diagram, there is a
255 | chain of "add with carry" instructions that each depend on the previous
256 | instruction to be completed. Or in other words, in reality the CPU will not
257 | execute 4, but only 1 instruction each cycle. The critical chain is
258 | 9 instructions long (not counting the mov instruction). The "add", "adc" and
259 | "shr" instructions all have a latency of 1 clock cycle. This means that any implementation
260 | that uses this chain has an lower bound of 9 cycles.
261 |
262 | Since our measured performance was 9.1 cycles, which includes the various
263 | setup and loop overheads... it means that there really
264 | isn't any more blood to squeeze out of this stone.
265 |
266 | Or is there,..
267 |
268 |
269 | ## Rethinking the problem
270 |
271 | We can admire the problem of this chain all day long, or try to polish the
272 | code a bit more, but neither is going to give us any step in performance.
273 |
274 | Having reached a dead end, it's time to take a step back. The reason we're
275 | at 9 cycles is that we have one long chain. To break through this barrier
276 | we therefore need to find a way to split the chain in seperate pieces that
277 | can execute in parallel.
278 |
279 | Earlier I wrote that addition is a very gentle, transitive mathematical
280 | function. Which means that one could transform a sequential such as
281 |
282 | R = A + B + C + D
283 |
284 | into
285 |
286 | R = (A + B) + (C + D)
287 |
288 | where (A + B) and (C + D) can be computed in parallel, turning a dependency
289 | chain of 3 cycles into a chain of 2 cycles.
290 |
291 |  
292 |
293 | Since our problem actually has 5 + 1 (the final carry) additions, we should
294 | be able to use this trick!
295 | The only complication is the final carry that we need to absorb back into
296 | our sum; in the next few sections we'll see what limits this imposes.
297 |
298 |
299 | ## Tangent: ADX instruction set extensions
300 |
301 | In certain types of cryptographic code, pairs of such long "add with carry" chains
302 | are common, and having only one carry flag ended up being a huge performance
303 | limiter. Because of this, Intel added 2 instructions (together called ADX)
304 | to the Broadwell generations of CPUs in 2014. One instruction (ADCX) will use and
305 | set ONLY the carry flag, while the other instruction wiil use and set ONLY
306 | the overflow flag. Because these two instructions use and set a disjoint set
307 | of CPU flags, they can be interleaved in a code stream without having
308 | dependencies between them.
309 |
310 | For more information, Wikipedia has a page: https://en.wikipedia.org/wiki/Intel_ADX
311 |
312 |
313 | ## Using ADX for `csum_partial`
314 |
315 | We can split our chain of adds into two separate strings that each can be
316 | computed in parallel and that are added together in the end.
317 | The code for this looks like this:
318 |
319 |
320 | __wsum csum_partial40_ACX(const void *buff, int len, __wsum sum)
321 | {
322 | u64 temp64 = (u64)sum;
323 | unsigned result;
324 |
325 | /*
326 | * the xorq not only zeroes r9, it also clears CF and OF so that
327 | * the first adcx/adox work as expected getting no input carry
328 | * while setting the output carry in the correct flags
329 | */
330 | asm("xorq %%r9, %%r9 \n\t"
331 | "movq 0*8(%[src]), %%rcx \n\t"
332 | "adcx 1*8(%[src]), %%rcx \n\t"
333 | "adcx 2*8(%[src]), %%rcx \n\t"
334 | "adcx 3*8(%[src]), %%rcx \n\t"
335 | "adcx %%r9, %%rcx \n\t"
336 | "adox 4*8(%[src]), %[res]\n\t"
337 | "adox %%rcx, %[res]\n\t"
338 | "adox %%r9, %[res]"
339 | : [res] "+d" (temp64)
340 | : [src] "r" (buff)
341 | : "memory", "rcx", "r9");
342 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
343 |
344 | return (__wsum)result;
345 | }
346 |
347 |
348 | | Scenario | Even aligned buffer | Odd aligned buffer |
349 | | ----------------- | ------------------- | ------------------ |
350 | | Baseline | 11.1 cycles | 19.2 cycles |
351 | | Specialized | 11.1 cycles | 19.2 cycles |
352 | | Unaligned removed | 11.1 cycles | 11.1 cycles |
353 | | Dead Code Removed | 9.1 cycles | 9.1 cycles |
354 | | Using ADX | 6.1 cycles | 6.1 cycles |
355 |
356 | Even though this codepath has one extra add (to fold the carry into the sum
357 | for the "adcx" side of the flow), the overall performance is a win!
358 |
359 |
360 | ## 2 streams without ADX
361 |
362 | In the ADX example, you might notice that the code doesn't actually
363 | interleave ADCX and ADOX, but that it depends on the out of order engine for
364 | the parallel execution. This implies it should be possible to also do something similar
365 | with using straight Add-with-carry `adc` instructions. Since ADX is somewhat
366 | recent (not even an entire decade old) it'll be useful to explore this path
367 | as well and see how close we can get.
368 |
369 | The code ends up looking like this:
370 |
371 | __wsum csum_partial40_2_streams(const void *buff, int len, __wsum sum)
372 | {
373 | u64 temp64 = (u64)sum;
374 | unsigned result;
375 |
376 | asm("movq 0*8(%[src]), %%rcx\n\t"
377 | "addq 1*8(%[src]), %%rcx\n\t"
378 | "adcq 2*8(%[src]), %%rcx\n\t"
379 | "addq 3*8(%[src]), %%rcx\n\t"
380 | "adcq $0, %%rcx\n\t"
381 | "addq 4*8(%[src]), %[res]\n\t"
382 | "adcq %%rcx, %[res]\n\t"
383 | "adcq $0, %[res]"
384 | : [res] "+r" (temp64)
385 | : [src] "r" (buff)
386 | : "memory", "rcx", "r9");
387 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
388 |
389 | return (__wsum)result;
390 | }
391 |
392 | And the data shows that we don't actually need ADX for this purpose.. we can
393 | get the same performance using obiquous 40 year old instructions.
394 |
395 | | Scenario | Even aligned buffer | Odd aligned buffer |
396 | | ----------------- | ------------------- | ------------------ |
397 | | Baseline | 11.1 cycles | 19.2 cycles |
398 | | Specialized | 11.1 cycles | 19.2 cycles |
399 | | Unaligned removed | 11.1 cycles | 11.1 cycles |
400 | | Dead Code Removed | 9.1 cycles | 9.1 cycles |
401 | | Using ADX | 6.1 cycles | 6.1 cycles |
402 | | Two Streams | 6.1 cycles | 6.1 cycles |
403 |
404 |
405 |
406 | ## Back to the drawing board
407 |
408 | Even with this 2 way interleaving, we're not yet at a 2x improvement over
409 | Eric's original code that is slated for the 5.17 kernel. So it's time to go
410 | back to our virtual whiteboard that still has the original dependency chain
411 | diagram on it.
412 |
413 | So far, we've focused on the first half of this dependency chain, and
414 | turning it into 2 streams of parallel adds. But there is also a second part!
415 | The second part does the `add32_with_carry` operation, which is a `shr` and
416 | two `adc` instructions that are each dependent on their previous instruction,
417 | so these are good for a 3 cycle cost.
418 |
419 | In general, doing 64 bit math and folding the result into 32 bits at the end
420 | should be a win, but at a size of 40 bytes? If we were to downgrade to only 32 bit
421 | math, we can save those 3 cycles for the folding, but have to do 10 instead of 5 additions.
422 | A quick guess would be that those 5 extra additions -- when done at 2 per
423 | cycle -- would be a 2.5 cycle cost. So on the back of the napkin, there is a
424 | potential half cycle win by just doing all operations at 32 bit granularity.
425 |
426 | In order to make this perform well, we'll need to use 4 instead of 2 parallel
427 | streams of addition, which is practical once you have 10 items, With modern
428 | CPUs being able to do 4 additions per cycle, this finally reaching the full
429 | CPU capability.
430 |
431 | The code will then look like this:
432 |
433 | __wsum csum_partial40_32bit(const void *buff, int len, __wsum sum)
434 | {
435 | __wsum temp32 = sum;
436 |
437 | asm("movl 0*4(%[src]), %%r9d \n\t"
438 | "movl 1*4(%[src]), %%r11d \n\t"
439 | "movl 2*4(%[src]), %%ecx \n\t"
440 |
441 | "addl 3*4(%[src]), %%r9d \n\t"
442 | "adcl 4*4(%[src]), %%r9d \n\t"
443 | "adcl $0, %%r9d \n\t"
444 |
445 | "addl 5*4(%[src]), %%r11d \n\t"
446 | "adcl 6*4(%[src]), %%r11d \n\t"
447 | "adcl $0, %%r11d \n\t"
448 |
449 | "addl 7*4(%[src]), %%ecx \n\t"
450 | "adcl 8*4(%[src]), %%ecx \n\t"
451 | "adcl $0, %%ecx \n\t"
452 |
453 | "addl 9*4(%[src]), %%edx \n\t"
454 | "adcl %%r9d, %%edx \n\t"
455 | "adcl %%r11d, %%edx \n\t"
456 | "adcl %%ecx, %%edx \n\t"
457 | "adcl $0, %%edx \n\t"
458 | : [res] "+d" (temp32)
459 | : [src] "r" (buff)
460 | : "memory", "rcx", "r9", "r11");
461 | return temp32;
462 | }
463 |
464 |
465 | The result is unfortunately slightly shy of the half cycle win we were
466 | hoping for, but a win nevertheless:
467 |
468 |
469 | | Scenario | Even aligned buffer | Odd aligned buffer |
470 | | ----------------- | ------------------- | ------------------ |
471 | | Baseline | 11.1 cycles | 19.2 cycles |
472 | | Specialized | 11.1 cycles | 19.2 cycles |
473 | | Unaligned removed | 11.1 cycles | 11.1 cycles |
474 | | Dead Code Removed | 9.1 cycles | 9.1 cycles |
475 | | Using ADX | 6.1 cycles | 6.1 cycles |
476 | | Two Streams | 6.1 cycles | 6.1 cycles |
477 | | 32 bit only | 5.7 cycles | 5.8 cycles |
478 |
479 |
480 |
481 | ## The final potential step
482 |
483 | So this was a fun poking session but my flight is starting to decent and my
484 | internal goal of beating Eric's code by 2x has not been achieved yet.
485 |
486 | The only thing I can think of right now to push the algorithm over the edge
487 | is another specialization, and that is setting the input checksum to zero.
488 | While this may sound like a useless thing, in reality calculating checksums
489 | of headers and such most likely is the only thing that is checksum'd, which
490 | mean a value of zero is going to be plugged in there.
491 | By doing this, we can skip one add which also allows to have a more balanced
492 | 2 halves of the tree... giving a potential of 2 cycles.
493 |
494 | The code now looks like this
495 |
496 | __wsum csum_partial_40_zero_sum(const void *buff, int len, __wsum sum)
497 | {
498 | u64 temp64 = (u64)sum;
499 | unsigned result;
500 |
501 | asm("movq 0*8(%[src]),%%rcx\n\t"
502 | "addq 1*8(%[src]),%%rcx\n\t"
503 | "adcq 2*8(%[src]),%%rcx\n\t"
504 | "adcq $0, %%rcx\n\t"
505 | "movq 3*8(%[src]),%[res]\n\t"
506 | "addq 4*8(%[src]),%[res]\n\t"
507 | "adcq %%rcx,%[res]\n\t"
508 | "adcq $0,%[res]"
509 | : [res] "=&r" (temp64)
510 | : [src] "r" (buff)
511 | : "memory", "rcx");
512 | result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
513 |
514 | return (__wsum)result;
515 | }
516 |
517 |
518 | Now this optimization exposed some funky things in the test framework,
519 | where gcc was all too clever by itself and managed to optimize the loop
520 | away until I tweaked the framework code for it not to do that.
521 |
522 |
523 | | Scenario | Even aligned buffer | Odd aligned buffer |
524 | | ----------------- | ------------------- | ------------------ |
525 | | Baseline | 11.1 cycles | 19.2 cycles |
526 | | Specialized | 11.1 cycles | 19.2 cycles |
527 | | Unaligned removed | 11.1 cycles | 11.1 cycles |
528 | | Dead Code Removed | 9.1 cycles | 9.1 cycles |
529 | | Using ADX | 6.1 cycles | 6.1 cycles |
530 | | Two Streams | 6.1 cycles | 6.1 cycles |
531 | | 32 bit only | 5.7 cycles | 5.8 cycles |
532 | | Assume Zero Input | 4.0 cycles | 4.0 cycles |
533 |
534 | Either way, the final goal is realized where a more-than-2x performance
535 | increase has been achieved.
536 |
537 |
538 |
539 |
540 | # Bonus section
541 |
542 | Some of my coworkers at Intel Corporation and others who look at the intersection of low level
543 | software and CPU microarchitecture realize that the CPUs Out Of Order engine
544 | is hiding latency in the examples and numbers above. One can debate if that
545 | is valid or not for this case. For now, I'm leaning towards it being valid
546 | since in a real world code flow, the Out of Order engine will always
547 | hide latencies -- that is its primary function.
548 |
549 | But just for interest, I made a set of measurements where I put an `lfence`
550 | instruction (which effectively fences the OOO engine) on either side of the
551 | call to the checksum function to measure a worst-case end-to-end latency.
552 |
553 | The data of this experiment is in the table below:
554 |
555 | Latency measurement with OOO fenced off
556 |
557 | | Scenario | Even aligned buffer | Odd aligned buffer |
558 | | ----------------- | ------------------- | ------------------ |
559 | | Baseline | 19.1 cycles | 26.9 cycles |
560 | | Specialized | 18.2 cycles | 26.9 cycles |
561 | | Unaligned removed | 18.4 cycles | 18.8 cycles |
562 | | Dead Code Removed | 14.0 cycles | 15.2 cycles |
563 | | Using ADX | 15.8 cycles | 17.8 cycles |
564 | | Two Streams | 16.3 cycles | 16.5 cycles |
565 | | Assume Zero Input | 14.4 cycles | 14.1 cycles |
566 |
567 | In playing with the code, it's clear that it is often possible to reduce these
568 | "worst case" latencies one or two cycles at the expense of the "with OOO"
569 | performance.
--------------------------------------------------------------------------------