Annotation of embedaddon/rsync/match.c, revision 1.1.1.1
1.1 misho 1: /*
2: * Block matching used by the file-transfer code.
3: *
4: * Copyright (C) 1996 Andrew Tridgell
5: * Copyright (C) 1996 Paul Mackerras
6: * Copyright (C) 2003-2009 Wayne Davison
7: *
8: * This program is free software; you can redistribute it and/or modify
9: * it under the terms of the GNU General Public License as published by
10: * the Free Software Foundation; either version 3 of the License, or
11: * (at your option) any later version.
12: *
13: * This program is distributed in the hope that it will be useful,
14: * but WITHOUT ANY WARRANTY; without even the implied warranty of
15: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16: * GNU General Public License for more details.
17: *
18: * You should have received a copy of the GNU General Public License along
19: * with this program; if not, visit the http://fsf.org website.
20: */
21:
22: #include "rsync.h"
23:
24: extern int verbose;
25: extern int do_progress;
26: extern int checksum_seed;
27: extern int append_mode;
28:
29: int updating_basis_file;
30:
31: static int false_alarms;
32: static int hash_hits;
33: static int matches;
34: static int64 data_transfer;
35:
36: static int total_false_alarms;
37: static int total_hash_hits;
38: static int total_matches;
39:
40: extern struct stats stats;
41:
42: #define TRADITIONAL_TABLESIZE (1<<16)
43:
44: static uint32 tablesize;
45: static int32 *hash_table;
46:
47: #define SUM2HASH2(s1,s2) (((s1) + (s2)) & 0xFFFF)
48: #define SUM2HASH(sum) SUM2HASH2((sum)&0xFFFF,(sum)>>16)
49:
50: #define BIG_SUM2HASH(sum) ((sum)%tablesize)
51:
52: static void build_hash_table(struct sum_struct *s)
53: {
54: static uint32 alloc_size;
55: int32 i;
56:
57: /* Dynamically calculate the hash table size so that the hash load
58: * for big files is about 80%. A number greater than the traditional
59: * size must be odd or s2 will not be able to span the entire set. */
60: tablesize = (uint32)(s->count/8) * 10 + 11;
61: if (tablesize < TRADITIONAL_TABLESIZE)
62: tablesize = TRADITIONAL_TABLESIZE;
63: if (tablesize > alloc_size || tablesize < alloc_size - 16*1024) {
64: if (hash_table)
65: free(hash_table);
66: hash_table = new_array(int32, tablesize);
67: if (!hash_table)
68: out_of_memory("build_hash_table");
69: alloc_size = tablesize;
70: }
71:
72: memset(hash_table, 0xFF, tablesize * sizeof hash_table[0]);
73:
74: if (tablesize == TRADITIONAL_TABLESIZE) {
75: for (i = 0; i < s->count; i++) {
76: uint32 t = SUM2HASH(s->sums[i].sum1);
77: s->sums[i].chain = hash_table[t];
78: hash_table[t] = i;
79: }
80: } else {
81: for (i = 0; i < s->count; i++) {
82: uint32 t = BIG_SUM2HASH(s->sums[i].sum1);
83: s->sums[i].chain = hash_table[t];
84: hash_table[t] = i;
85: }
86: }
87: }
88:
89:
90: static OFF_T last_match;
91:
92:
93: /* Transmit a literal and/or match token.
94: *
95: * This delightfully-named function is called either when we find a
96: * match and need to transmit all the unmatched data leading up to it,
97: * or when we get bored of accumulating literal data and just need to
98: * transmit it. As a result of this second case, it is called even if
99: * we have not matched at all!
100: *
101: * If i >= 0, the number of a matched token. If < 0, indicates we have
102: * only literal data. A -1 will send a 0-token-int too, and a -2 sends
103: * only literal data, w/o any token-int. */
104: static void matched(int f, struct sum_struct *s, struct map_struct *buf,
105: OFF_T offset, int32 i)
106: {
107: int32 n = (int32)(offset - last_match); /* max value: block_size (int32) */
108: int32 j;
109:
110: if (verbose > 2 && i >= 0) {
111: rprintf(FINFO,
112: "match at %.0f last_match=%.0f j=%d len=%ld n=%ld\n",
113: (double)offset, (double)last_match, i,
114: (long)s->sums[i].len, (long)n);
115: }
116:
117: send_token(f, i, buf, last_match, n, i < 0 ? 0 : s->sums[i].len);
118: data_transfer += n;
119:
120: if (i >= 0) {
121: stats.matched_data += s->sums[i].len;
122: n += s->sums[i].len;
123: }
124:
125: for (j = 0; j < n; j += CHUNK_SIZE) {
126: int32 n1 = MIN(CHUNK_SIZE, n - j);
127: sum_update(map_ptr(buf, last_match + j, n1), n1);
128: }
129:
130: if (i >= 0)
131: last_match = offset + s->sums[i].len;
132: else
133: last_match = offset;
134:
135: if (buf && do_progress)
136: show_progress(last_match, buf->file_size);
137: }
138:
139:
140: static void hash_search(int f,struct sum_struct *s,
141: struct map_struct *buf, OFF_T len)
142: {
143: OFF_T offset, aligned_offset, end;
144: int32 k, want_i, aligned_i, backup;
145: char sum2[SUM_LENGTH];
146: uint32 s1, s2, sum;
147: int more;
148: schar *map;
149:
150: /* want_i is used to encourage adjacent matches, allowing the RLL
151: * coding of the output to work more efficiently. */
152: want_i = 0;
153:
154: if (verbose > 2) {
155: rprintf(FINFO, "hash search b=%ld len=%.0f\n",
156: (long)s->blength, (double)len);
157: }
158:
159: k = (int32)MIN(len, (OFF_T)s->blength);
160:
161: map = (schar *)map_ptr(buf, 0, k);
162:
163: sum = get_checksum1((char *)map, k);
164: s1 = sum & 0xFFFF;
165: s2 = sum >> 16;
166: if (verbose > 3)
167: rprintf(FINFO, "sum=%.8x k=%ld\n", sum, (long)k);
168:
169: offset = aligned_offset = aligned_i = 0;
170:
171: end = len + 1 - s->sums[s->count-1].len;
172:
173: if (verbose > 3) {
174: rprintf(FINFO, "hash search s->blength=%ld len=%.0f count=%.0f\n",
175: (long)s->blength, (double)len, (double)s->count);
176: }
177:
178: do {
179: int done_csum2 = 0;
180: int32 i;
181:
182: if (verbose > 4) {
183: rprintf(FINFO, "offset=%.0f sum=%04x%04x\n",
184: (double)offset, s2 & 0xFFFF, s1 & 0xFFFF);
185: }
186:
187: if (tablesize == TRADITIONAL_TABLESIZE) {
188: if ((i = hash_table[SUM2HASH2(s1,s2)]) < 0)
189: goto null_hash;
190: sum = (s1 & 0xffff) | (s2 << 16);
191: } else {
192: sum = (s1 & 0xffff) | (s2 << 16);
193: if ((i = hash_table[BIG_SUM2HASH(sum)]) < 0)
194: goto null_hash;
195: }
196:
197: hash_hits++;
198: do {
199: int32 l;
200:
201: if (sum != s->sums[i].sum1)
202: continue;
203:
204: /* also make sure the two blocks are the same length */
205: l = (int32)MIN((OFF_T)s->blength, len-offset);
206: if (l != s->sums[i].len)
207: continue;
208:
209: /* in-place: ensure chunk's offset is either >= our
210: * offset or that the data didn't move. */
211: if (updating_basis_file && s->sums[i].offset < offset
212: && !(s->sums[i].flags & SUMFLG_SAME_OFFSET))
213: continue;
214:
215: if (verbose > 3) {
216: rprintf(FINFO,
217: "potential match at %.0f i=%ld sum=%08x\n",
218: (double)offset, (long)i, sum);
219: }
220:
221: if (!done_csum2) {
222: map = (schar *)map_ptr(buf,offset,l);
223: get_checksum2((char *)map,l,sum2);
224: done_csum2 = 1;
225: }
226:
227: if (memcmp(sum2,s->sums[i].sum2,s->s2length) != 0) {
228: false_alarms++;
229: continue;
230: }
231:
232: /* When updating in-place, the best possible match is
233: * one with an identical offset, so we prefer that over
234: * the adjacent want_i optimization. */
235: if (updating_basis_file) {
236: /* All the generator's chunks start at blength boundaries. */
237: while (aligned_offset < offset) {
238: aligned_offset += s->blength;
239: aligned_i++;
240: }
241: if (offset == aligned_offset && aligned_i < s->count) {
242: if (i != aligned_i) {
243: if (sum != s->sums[aligned_i].sum1
244: || l != s->sums[aligned_i].len
245: || memcmp(sum2, s->sums[aligned_i].sum2, s->s2length) != 0)
246: goto check_want_i;
247: i = aligned_i;
248: }
249: /* This identical chunk is in the same spot in the old and new file. */
250: s->sums[i].flags |= SUMFLG_SAME_OFFSET;
251: want_i = i;
252: }
253: }
254:
255: check_want_i:
256: /* we've found a match, but now check to see
257: * if want_i can hint at a better match. */
258: if (i != want_i && want_i < s->count
259: && (!updating_basis_file || s->sums[want_i].offset >= offset
260: || s->sums[want_i].flags & SUMFLG_SAME_OFFSET)
261: && sum == s->sums[want_i].sum1
262: && memcmp(sum2, s->sums[want_i].sum2, s->s2length) == 0) {
263: /* we've found an adjacent match - the RLL coder
264: * will be happy */
265: i = want_i;
266: }
267: want_i = i + 1;
268:
269: matched(f,s,buf,offset,i);
270: offset += s->sums[i].len - 1;
271: k = (int32)MIN((OFF_T)s->blength, len-offset);
272: map = (schar *)map_ptr(buf, offset, k);
273: sum = get_checksum1((char *)map, k);
274: s1 = sum & 0xFFFF;
275: s2 = sum >> 16;
276: matches++;
277: break;
278: } while ((i = s->sums[i].chain) >= 0);
279:
280: null_hash:
281: backup = (int32)(offset - last_match);
282: /* We sometimes read 1 byte prior to last_match... */
283: if (backup < 0)
284: backup = 0;
285:
286: /* Trim off the first byte from the checksum */
287: more = offset + k < len;
288: map = (schar *)map_ptr(buf, offset - backup, k + more + backup)
289: + backup;
290: s1 -= map[0] + CHAR_OFFSET;
291: s2 -= k * (map[0]+CHAR_OFFSET);
292:
293: /* Add on the next byte (if there is one) to the checksum */
294: if (more) {
295: s1 += map[k] + CHAR_OFFSET;
296: s2 += s1;
297: } else
298: --k;
299:
300: /* By matching early we avoid re-reading the
301: data 3 times in the case where a token
302: match comes a long way after last
303: match. The 3 reads are caused by the
304: running match, the checksum update and the
305: literal send. */
306: if (backup >= s->blength+CHUNK_SIZE && end-offset > CHUNK_SIZE)
307: matched(f, s, buf, offset - s->blength, -2);
308: } while (++offset < end);
309:
310: matched(f, s, buf, len, -1);
311: map_ptr(buf, len-1, 1);
312: }
313:
314:
315: /**
316: * Scan through a origin file, looking for sections that match
317: * checksums from the generator, and transmit either literal or token
318: * data.
319: *
320: * Also calculates the MD4 checksum of the whole file, using the md
321: * accumulator. This is transmitted with the file as protection
322: * against corruption on the wire.
323: *
324: * @param s Checksums received from the generator. If <tt>s->count ==
325: * 0</tt>, then there are actually no checksums for this file.
326: *
327: * @param len Length of the file to send.
328: **/
329: void match_sums(int f, struct sum_struct *s, struct map_struct *buf, OFF_T len)
330: {
331: char file_sum[MAX_DIGEST_LEN];
332: int sum_len;
333:
334: last_match = 0;
335: false_alarms = 0;
336: hash_hits = 0;
337: matches = 0;
338: data_transfer = 0;
339:
340: sum_init(checksum_seed);
341:
342: if (append_mode > 0) {
343: if (append_mode == 2) {
344: OFF_T j = 0;
345: for (j = CHUNK_SIZE; j < s->flength; j += CHUNK_SIZE) {
346: if (buf && do_progress)
347: show_progress(last_match, buf->file_size);
348: sum_update(map_ptr(buf, last_match, CHUNK_SIZE),
349: CHUNK_SIZE);
350: last_match = j;
351: }
352: if (last_match < s->flength) {
353: int32 n = (int32)(s->flength - last_match);
354: if (buf && do_progress)
355: show_progress(last_match, buf->file_size);
356: sum_update(map_ptr(buf, last_match, n), n);
357: }
358: }
359: last_match = s->flength;
360: s->count = 0;
361: }
362:
363: if (len > 0 && s->count > 0) {
364: build_hash_table(s);
365:
366: if (verbose > 2)
367: rprintf(FINFO,"built hash table\n");
368:
369: hash_search(f, s, buf, len);
370:
371: if (verbose > 2)
372: rprintf(FINFO,"done hash search\n");
373: } else {
374: OFF_T j;
375: /* by doing this in pieces we avoid too many seeks */
376: for (j = last_match + CHUNK_SIZE; j < len; j += CHUNK_SIZE)
377: matched(f, s, buf, j, -2);
378: matched(f, s, buf, len, -1);
379: }
380:
381: sum_len = sum_end(file_sum);
382: /* If we had a read error, send a bad checksum. */
383: if (buf && buf->status != 0)
384: file_sum[0]++;
385:
386: if (verbose > 2)
387: rprintf(FINFO,"sending file_sum\n");
388: write_buf(f, file_sum, sum_len);
389:
390: if (verbose > 2)
391: rprintf(FINFO, "false_alarms=%d hash_hits=%d matches=%d\n",
392: false_alarms, hash_hits, matches);
393:
394: total_hash_hits += hash_hits;
395: total_false_alarms += false_alarms;
396: total_matches += matches;
397: stats.literal_data += data_transfer;
398: }
399:
400: void match_report(void)
401: {
402: if (verbose <= 1)
403: return;
404:
405: rprintf(FINFO,
406: "total: matches=%d hash_hits=%d false_alarms=%d data=%.0f\n",
407: total_matches, total_hash_hits, total_false_alarms,
408: (double)stats.literal_data);
409: }
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>