Major rework of hf mf nested:

- PM: used GetCountMifare in MifareNested() for improved timing accuracy and to deliver better quality nonces
- PM: MifareNested now delivers exactly two different nonces to avoid time consuming multiple lfsr_recovery32() on client side
- Client: replaced quicksort by bucketsort in crapto1.c which is faster 
- Client: use multithreading (two parallel calls to lfsr_recovery32())
- Client: fixed a small bug in mfnested() (always showed trgkey=0)
- Client: introduced a mutex for PrintAndLog() to avoid interlaced printing
Minor rework of hf mf chk:
- Avoid time consuming off/on cycles. Send a "halt" instead.
This commit is contained in:
micki.held@gmx.de 2013-09-15 09:33:17 +00:00
commit 9492e0b098
14 changed files with 728 additions and 630 deletions

View file

@ -31,6 +31,71 @@ static void __attribute__((constructor)) fill_lut()
#define filter(x) (filterlut[(x) & 0xfffff])
#endif
typedef struct bucket {
uint32_t *head;
uint32_t *bp;
} bucket_t;
typedef bucket_t bucket_array_t[2][0x100];
typedef struct bucket_info {
struct {
uint32_t *head, *tail;
} bucket_info[2][0x100];
uint32_t numbuckets;
} bucket_info_t;
static void bucket_sort_intersect(uint32_t* const estart, uint32_t* const estop,
uint32_t* const ostart, uint32_t* const ostop,
bucket_info_t *bucket_info, bucket_array_t bucket)
{
uint32_t *p1, *p2;
uint32_t *start[2];
uint32_t *stop[2];
start[0] = estart;
stop[0] = estop;
start[1] = ostart;
stop[1] = ostop;
// init buckets to be empty
for (uint32_t i = 0; i < 2; i++) {
for (uint32_t j = 0x00; j <= 0xff; j++) {
bucket[i][j].bp = bucket[i][j].head;
}
}
// sort the lists into the buckets based on the MSB (contribution bits)
for (uint32_t i = 0; i < 2; i++) {
for (p1 = start[i]; p1 <= stop[i]; p1++) {
uint32_t bucket_index = (*p1 & 0xff000000) >> 24;
*(bucket[i][bucket_index].bp++) = *p1;
}
}
// write back intersecting buckets as sorted list.
// fill in bucket_info with head and tail of the bucket contents in the list and number of non-empty buckets.
uint32_t nonempty_bucket;
for (uint32_t i = 0; i < 2; i++) {
p1 = start[i];
nonempty_bucket = 0;
for (uint32_t j = 0x00; j <= 0xff; j++) {
if (bucket[0][j].bp != bucket[0][j].head && bucket[1][j].bp != bucket[1][j].head) { // non-empty intersecting buckets only
bucket_info->bucket_info[i][nonempty_bucket].head = p1;
for (p2 = bucket[i][j].head; p2 < bucket[i][j].bp; *p1++ = *p2++);
bucket_info->bucket_info[i][nonempty_bucket].tail = p1 - 1;
nonempty_bucket++;
}
}
bucket_info->numbuckets = nonempty_bucket;
}
}
static void quicksort(uint32_t* const start, uint32_t* const stop)
{
uint32_t *it = start + 1, *rit = stop;
@ -54,6 +119,8 @@ static void quicksort(uint32_t* const start, uint32_t* const stop)
quicksort(start, rit - 1);
quicksort(rit + 1, stop);
}
/** binsearch
* Binary search for the first occurence of *stop's MSB in sorted [start,stop]
*/
@ -90,45 +157,55 @@ static inline void
extend_table(uint32_t *tbl, uint32_t **end, int bit, int m1, int m2, uint32_t in)
{
in <<= 24;
for(*tbl <<= 1; tbl <= *end; *++tbl <<= 1)
if(filter(*tbl) ^ filter(*tbl | 1)) {
*tbl |= filter(*tbl) ^ bit;
update_contribution(tbl, m1, m2);
*tbl ^= in;
} else if(filter(*tbl) == bit) {
*++*end = tbl[1];
tbl[1] = tbl[0] | 1;
update_contribution(tbl, m1, m2);
*tbl++ ^= in;
update_contribution(tbl, m1, m2);
*tbl ^= in;
} else
*tbl-- = *(*end)--;
for(uint32_t *p = tbl; p <= *end; p++) {
*p <<= 1;
if(filter(*p) != filter(*p | 1)) { // replace
*p |= filter(*p) ^ bit;
update_contribution(p, m1, m2);
*p ^= in;
} else if(filter(*p) == bit) { // insert
*++*end = p[1];
p[1] = p[0] | 1;
update_contribution(p, m1, m2);
*p++ ^= in;
update_contribution(p, m1, m2);
*p ^= in;
} else { // drop
*p-- = *(*end)--;
}
}
}
/** extend_table_simple
* using a bit of the keystream extend the table of possible lfsr states
*/
static inline void
extend_table_simple(uint32_t *tbl, uint32_t **end, int bit)
{
for(*tbl <<= 1; tbl <= *end; *++tbl <<= 1)
if(filter(*tbl) ^ filter(*tbl | 1)) {
for(*tbl <<= 1; tbl <= *end; *++tbl <<= 1)
if(filter(*tbl) ^ filter(*tbl | 1)) { // replace
*tbl |= filter(*tbl) ^ bit;
} else if(filter(*tbl) == bit) {
} else if(filter(*tbl) == bit) { // insert
*++*end = *++tbl;
*tbl = tbl[-1] | 1;
} else
} else // drop
*tbl-- = *(*end)--;
}
/** recover
* recursively narrow down the search space, 4 bits of keystream at a time
*/
static struct Crypto1State*
recover(uint32_t *o_head, uint32_t *o_tail, uint32_t oks,
uint32_t *e_head, uint32_t *e_tail, uint32_t eks, int rem,
struct Crypto1State *sl, uint32_t in)
struct Crypto1State *sl, uint32_t in, bucket_array_t bucket)
{
uint32_t *o, *e, i;
uint32_t *o, *e;
bucket_info_t bucket_info;
if(rem == -1) {
for(e = e_head; e <= e_tail; ++e) {
@ -136,13 +213,13 @@ recover(uint32_t *o_head, uint32_t *o_tail, uint32_t oks,
for(o = o_head; o <= o_tail; ++o, ++sl) {
sl->even = *o;
sl->odd = *e ^ parity(*o & LF_POLY_ODD);
sl[1].odd = sl[1].even = 0;
}
}
sl->odd = sl->even = 0;
return sl;
}
for(i = 0; i < 4 && rem--; i++) {
for(uint32_t i = 0; i < 4 && rem--; i++) {
extend_table(o_head, &o_tail, (oks >>= 1) & 1,
LF_POLY_EVEN << 1 | 1, LF_POLY_ODD << 1, 0);
if(o_head > o_tail)
@ -154,21 +231,14 @@ recover(uint32_t *o_head, uint32_t *o_tail, uint32_t oks,
return sl;
}
quicksort(o_head, o_tail);
quicksort(e_head, e_tail);
while(o_tail >= o_head && e_tail >= e_head)
if(((*o_tail ^ *e_tail) >> 24) == 0) {
o_tail = binsearch(o_head, o = o_tail);
e_tail = binsearch(e_head, e = e_tail);
sl = recover(o_tail--, o, oks,
e_tail--, e, eks, rem, sl, in);
}
else if(*o_tail > *e_tail)
o_tail = binsearch(o_head, o_tail) - 1;
else
e_tail = binsearch(e_head, e_tail) - 1;
bucket_sort_intersect(e_head, e_tail, o_head, o_tail, &bucket_info, bucket);
for (int i = bucket_info.numbuckets - 1; i >= 0; i--) {
sl = recover(bucket_info.bucket_info[1][i].head, bucket_info.bucket_info[1][i].tail, oks,
bucket_info.bucket_info[0][i].head, bucket_info.bucket_info[0][i].tail, eks,
rem, sl, in, bucket);
}
return sl;
}
/** lfsr_recovery
@ -183,6 +253,7 @@ struct Crypto1State* lfsr_recovery32(uint32_t ks2, uint32_t in)
uint32_t *even_head = 0, *even_tail = 0, eks = 0;
int i;
// split the keystream into an odd and even part
for(i = 31; i >= 0; i -= 2)
oks = oks << 1 | BEBIT(ks2, i);
for(i = 30; i >= 0; i -= 2)
@ -191,11 +262,23 @@ struct Crypto1State* lfsr_recovery32(uint32_t ks2, uint32_t in)
odd_head = odd_tail = malloc(sizeof(uint32_t) << 21);
even_head = even_tail = malloc(sizeof(uint32_t) << 21);
statelist = malloc(sizeof(struct Crypto1State) << 18);
if(!odd_tail-- || !even_tail-- || !statelist)
if(!odd_tail-- || !even_tail-- || !statelist) {
goto out;
}
statelist->odd = statelist->even = 0;
// allocate memory for out of place bucket_sort
bucket_array_t bucket;
for (uint32_t i = 0; i < 2; i++)
for (uint32_t j = 0; j <= 0xff; j++) {
bucket[i][j].head = malloc(sizeof(uint32_t)<<14);
if (!bucket[i][j].head) {
goto out;
}
}
// initialize statelists: add all possible states which would result into the rightmost 2 bits of the keystream
for(i = 1 << 20; i >= 0; --i) {
if(filter(i) == (oks & 1))
*++odd_tail = i;
@ -203,18 +286,29 @@ struct Crypto1State* lfsr_recovery32(uint32_t ks2, uint32_t in)
*++even_tail = i;
}
// extend the statelists. Look at the next 8 Bits of the keystream (4 Bit each odd and even):
for(i = 0; i < 4; i++) {
extend_table_simple(odd_head, &odd_tail, (oks >>= 1) & 1);
extend_table_simple(even_head, &even_tail, (eks >>= 1) & 1);
}
in = (in >> 16 & 0xff) | (in << 16) | (in & 0xff00);
// the statelists now contain all states which could have generated the last 10 Bits of the keystream.
// 22 bits to go to recover 32 bits in total. From now on, we need to take the "in"
// parameter into account.
in = (in >> 16 & 0xff) | (in << 16) | (in & 0xff00); // Byte swapping
recover(odd_head, odd_tail, oks,
even_head, even_tail, eks, 11, statelist, in << 1);
even_head, even_tail, eks, 11, statelist, in << 1, bucket);
out:
free(odd_head);
free(even_head);
for (uint32_t i = 0; i < 2; i++)
for (uint32_t j = 0; j <= 0xff; j++)
free(bucket[i][j].head);
return statelist;
}