|
4 | 4 | #include <stdio.h>
|
5 | 5 |
|
6 | 6 | #include "combinations.h"
|
7 |
| -#include "prob_desc.h" |
8 | 7 | #include "checker_helper.h"
|
9 | 8 | #include "checker.h"
|
| 9 | +#include "gray.h" |
10 | 10 |
|
11 | 11 | #ifdef VECT
|
12 | 12 |
|
13 |
| -int check_support(const int k, const int nb_internal, __m256i probes_a_curr, |
14 |
| - __m256i probes_b_curr, uint64_t probes_r_curr, __m256i **probes_a_all, |
15 |
| - __m256i **probes_b_all, int check_sni) |
| 13 | +int check_support(const struct comb_t comb_struct, const int nb_internal, __m256i probes_a_curr, |
| 14 | + __m256i probes_b_curr, uint64_t probes_r_curr, __m256i **probes_a_all, |
| 15 | + __m256i **probes_b_all, uint64_t *probes_r_all[NB_PR], int check_sni) |
16 | 16 | {
|
| 17 | + |
| 18 | +#ifdef GLITCH |
| 19 | + |
| 20 | + uint64_t *counters = malloc(comb_struct.k*sizeof(uint64_t)); |
| 21 | + int *local_radices = malloc(comb_struct.k*sizeof(int)); |
| 22 | + int *corres = malloc(comb_struct.k*sizeof(int)); |
| 23 | + |
| 24 | + int nb_nontrivials = 0; |
| 25 | + uint64_t c = 0; |
| 26 | + uint64_t i; |
| 27 | + int to_incr, to_incr_bin, p; |
| 28 | + int attack = 0; |
| 29 | + |
| 30 | + for (i = 0; i < comb_struct.k; i++) { |
| 31 | + if (radices[comb_struct.combination[i]] > 1) { |
| 32 | + //printf("nb: %d\n", nb_nontrivials); |
| 33 | + counters[nb_nontrivials] = 1; |
| 34 | + local_radices[nb_nontrivials] = radices[comb_struct.combination[i]]; |
| 35 | + corres[nb_nontrivials] = i; |
| 36 | + nb_nontrivials++; |
| 37 | + } |
| 38 | + } |
| 39 | + |
| 40 | + |
| 41 | + while ((to_incr = next_increment(&c, local_radices, nb_nontrivials)) != -1) { |
| 42 | + |
| 43 | + // Increment lower-level gray code |
| 44 | + to_incr_bin = next_increment_bin(&(counters[to_incr]), local_radices[to_incr]); |
| 45 | + p = comb_struct.combination[corres[to_incr]]; |
| 46 | + probes_a_curr = _mm256_xor_si256(probes_a_curr, probes_a_all[p][to_incr_bin]); |
| 47 | + probes_b_curr = _mm256_xor_si256(probes_b_curr, probes_b_all[p][to_incr_bin]); |
| 48 | + probes_r_curr ^= probes_r_all[p][to_incr_bin]; |
| 49 | + |
| 50 | + if (counters[to_incr] == 0) { |
| 51 | + to_incr_bin = next_increment_bin(&(counters[to_incr]), local_radices[to_incr]); |
| 52 | + p = comb_struct.combination[corres[to_incr]]; |
| 53 | + probes_a_curr = _mm256_xor_si256(probes_a_curr, probes_a_all[p][to_incr_bin]); |
| 54 | + probes_b_curr = _mm256_xor_si256(probes_b_curr, probes_b_all[p][to_incr_bin]); |
| 55 | + probes_r_curr ^= probes_r_all[p][to_incr_bin]; |
| 56 | + } |
| 57 | + |
| 58 | + |
| 59 | + if (check_sni) { |
| 60 | + attack |= check_attack_sni(comb_struct.k, nb_internal, probes_r_curr, probes_a_curr, probes_b_curr); |
| 61 | + } else { |
| 62 | + attack |= check_attack_ni(comb_struct.k, probes_r_curr, probes_a_curr, probes_b_curr); |
| 63 | + } |
| 64 | + |
| 65 | + } |
| 66 | + |
| 67 | + free(counters); |
| 68 | + free(local_radices); |
| 69 | + free(corres); |
| 70 | + |
| 71 | + return attack; |
| 72 | +#endif |
| 73 | + |
17 | 74 | if (check_sni) {
|
18 |
| - return check_attack_sni(k, nb_internal, probes_r_curr, probes_a_curr, probes_b_curr); |
| 75 | + return check_attack_sni(comb_struct.k, nb_internal, probes_r_curr, probes_a_curr, probes_b_curr); |
19 | 76 | } else {
|
20 |
| - return check_attack_ni(k, probes_r_curr, probes_a_curr, probes_b_curr); |
| 77 | + return check_attack_ni(comb_struct.k, probes_r_curr, probes_a_curr, probes_b_curr); |
21 | 78 | }
|
22 | 79 |
|
23 | 80 | }
|
24 | 81 |
|
25 | 82 | int next_support(struct comb_t *comb_struct, struct comb_diff_t *comb_diff,
|
26 | 83 | __m256i *probes_a_curr, __m256i *probes_b_curr, uint64_t *probes_r_curr,
|
27 |
| - __m256i **probes_a_all, __m256i **probes_b_all, |
| 84 | + __m256i *probes_a_all[NB_PR], __m256i *probes_b_all[NB_PR], uint64_t *probes_r_all[NB_PR], |
28 | 85 | uint64_t *nb_internal, int check_sni)
|
29 | 86 | {
|
30 |
| - next_combination(comb_struct, comb_diff); |
31 |
| - if (comb_struct->done) return -1; |
| 87 | + next_combination(comb_struct, comb_diff); |
| 88 | + if (comb_struct->done) return -1; |
32 | 89 |
|
33 |
| - // Adjust the number of internal probes |
34 |
| - if (check_sni) { |
35 |
| - if (comb_diff->to_del < NB_INT) nb_internal--; |
36 |
| - if (comb_diff->to_add < NB_INT) nb_internal++; |
37 |
| - } |
| 90 | + // Adjust the number of internal probes |
| 91 | + if (check_sni) { |
| 92 | + if (comb_diff->to_del < NB_INT) nb_internal--; |
| 93 | + if (comb_diff->to_add < NB_INT) nb_internal++; |
| 94 | + } |
38 | 95 |
|
39 |
| - *probes_a_curr = _mm256_xor_si256(*probes_a_curr, probes_a_all[comb_diff->to_del][0]); |
40 |
| - *probes_a_curr = _mm256_xor_si256(*probes_a_curr, probes_a_all[comb_diff->to_add][0]); |
| 96 | + *probes_a_curr = _mm256_xor_si256(*probes_a_curr, probes_a_all[comb_diff->to_del][0]); |
| 97 | + *probes_a_curr = _mm256_xor_si256(*probes_a_curr, probes_a_all[comb_diff->to_add][0]); |
41 | 98 |
|
42 |
| - *probes_b_curr = _mm256_xor_si256(*probes_b_curr, probes_b_all[comb_diff->to_del][0]); |
43 |
| - *probes_b_curr = _mm256_xor_si256(*probes_b_curr, probes_b_all[comb_diff->to_add][0]); |
| 99 | + *probes_b_curr = _mm256_xor_si256(*probes_b_curr, probes_b_all[comb_diff->to_del][0]); |
| 100 | + *probes_b_curr = _mm256_xor_si256(*probes_b_curr, probes_b_all[comb_diff->to_add][0]); |
44 | 101 |
|
45 |
| - *probes_r_curr ^= probes_r[comb_diff->to_del] ^ probes_r[comb_diff->to_add]; |
| 102 | + *probes_r_curr ^= probes_r_all[comb_diff->to_del][0] ^ probes_r_all[comb_diff->to_add][0]; |
46 | 103 |
|
47 |
| - return 0; |
| 104 | + return 0; |
48 | 105 | }
|
49 | 106 |
|
50 | 107 | int check_partial(struct comb_t comb_struct, uint64_t nb, int check_sni)
|
51 | 108 | {
|
52 | 109 | uint64_t nb_internal;
|
53 | 110 | struct comb_diff_t comb_diff;
|
54 | 111 | uint64_t probes_r_curr;
|
| 112 | + uint64_t *probes_r_all[NB_PR]; |
55 | 113 | int attack = 0;
|
56 | 114 | uint64_t c = 0;
|
57 | 115 | __m256i probes_a_curr;
|
58 | 116 | __m256i probes_b_curr;
|
59 | 117 | __m256i *probes_a_all[NB_PR];
|
60 | 118 | __m256i *probes_b_all[NB_PR];
|
61 |
| - init_sh_all(probes_a_all, probes_b_all); |
| 119 | + init_all(probes_a_all, probes_b_all, probes_r_all); |
62 | 120 |
|
63 | 121 | init_sh_curr(&probes_a_curr, probes_a_all, comb_struct.combination, comb_struct.k);
|
64 | 122 | init_sh_curr(&probes_b_curr, probes_b_all, comb_struct.combination, comb_struct.k);
|
65 |
| - init_r_curr(&probes_r_curr, comb_struct.combination, comb_struct.k); |
| 123 | + init_r_curr(&probes_r_curr, probes_r_all, comb_struct.combination, comb_struct.k); |
66 | 124 |
|
67 | 125 | c = 0;
|
68 |
| - nb_internal = 0; |
69 |
| - for (uint64_t i = 0; i < comb_struct.k; i++) |
70 |
| - nb_internal += comb_struct.combination[i] < NB_INT ? 1 : 0; |
| 126 | + nb_internal = 0; |
| 127 | + for (uint64_t i = 0; i < comb_struct.k; i++) |
| 128 | + nb_internal += comb_struct.combination[i] < NB_INT ? 1 : 0; |
71 | 129 |
|
72 | 130 | while (!attack && c < nb) {
|
73 | 131 | c++;
|
74 | 132 |
|
75 | 133 |
|
76 |
| - attack = check_support(comb_struct.k, nb_internal, probes_a_curr, |
| 134 | + attack = check_support(comb_struct, nb_internal, probes_a_curr, |
77 | 135 | probes_b_curr, probes_r_curr, probes_a_all, probes_b_all,
|
78 |
| - check_sni); |
| 136 | + probes_r_all, check_sni); |
79 | 137 | if (next_support(&comb_struct, &comb_diff, &probes_a_curr, &probes_b_curr,
|
80 |
| - &probes_r_curr, probes_a_all, probes_b_all, &nb_internal, |
81 |
| - check_sni)) { |
| 138 | + &probes_r_curr, probes_a_all, probes_b_all, probes_r_all, |
| 139 | + &nb_internal, check_sni)) { |
82 | 140 | break;
|
83 | 141 | }
|
84 | 142 | }
|
85 | 143 |
|
86 |
| - free_sh_all(probes_a_all, probes_b_all); |
| 144 | + free_all(probes_a_all, probes_b_all, probes_r_all); |
87 | 145 | if (attack) {
|
88 | 146 | printf("\n");
|
89 | 147 | print_combination(comb_struct);
|
@@ -119,11 +177,11 @@ int check_partial(struct comb_t comb_struct, uint64_t nb, int check_sni)
|
119 | 177 | init_r_curr(probes_r_curr, comb_struct.combination, comb_struct.k);
|
120 | 178 |
|
121 | 179 | c = 0;
|
122 |
| - nb_internal = 0; |
123 |
| - for (uint64_t i = 0; i < comb_struct.k; i++) |
124 |
| - nb_internal += comb_struct.combination[i] < NB_INT ? 1 : 0; |
| 180 | + nb_internal = 0; |
| 181 | + for (uint64_t i = 0; i < comb_struct.k; i++) |
| 182 | + nb_internal += comb_struct.combination[i] < NB_INT ? 1 : 0; |
125 | 183 |
|
126 |
| - while (!attack_ni && !attack_sni && c < nb) { |
| 184 | + while (!attack_ni && !attack_sni && c < nb) { |
127 | 185 | c++;
|
128 | 186 |
|
129 | 187 | if (check_sni) {
|
|
0 commit comments