File:  [ELWIX - Embedded LightWeight unIX -] / embedaddon / rsync / match.c
Revision 1.1.1.4 (vendor branch): download - view: text, annotated - select for diffs - revision graph
Wed Mar 17 00:32:36 2021 UTC (3 years, 3 months ago) by misho
Branches: rsync, MAIN
CVS tags: v3_2_3, HEAD
rsync 3.2.3

    1: /*
    2:  * Block matching used by the file-transfer code.
    3:  *
    4:  * Copyright (C) 1996 Andrew Tridgell
    5:  * Copyright (C) 1996 Paul Mackerras
    6:  * Copyright (C) 2003-2020 Wayne Davison
    7:  *
    8:  * This program is free software; you can redistribute it and/or modify
    9:  * it under the terms of the GNU General Public License as published by
   10:  * the Free Software Foundation; either version 3 of the License, or
   11:  * (at your option) any later version.
   12:  *
   13:  * This program is distributed in the hope that it will be useful,
   14:  * but WITHOUT ANY WARRANTY; without even the implied warranty of
   15:  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   16:  * GNU General Public License for more details.
   17:  *
   18:  * You should have received a copy of the GNU General Public License along
   19:  * with this program; if not, visit the http://fsf.org website.
   20:  */
   21: 
   22: #include "rsync.h"
   23: #include "inums.h"
   24: 
   25: extern int checksum_seed;
   26: extern int append_mode;
   27: extern int xfersum_type;
   28: 
   29: int updating_basis_file;
   30: char sender_file_sum[MAX_DIGEST_LEN];
   31: 
   32: static int false_alarms;
   33: static int hash_hits;
   34: static int matches;
   35: static int64 data_transfer;
   36: 
   37: static int total_false_alarms;
   38: static int total_hash_hits;
   39: static int total_matches;
   40: 
   41: extern struct stats stats;
   42: 
   43: #define TRADITIONAL_TABLESIZE (1<<16)
   44: 
   45: static uint32 tablesize;
   46: static int32 *hash_table;
   47: 
   48: #define SUM2HASH2(s1,s2) (((s1) + (s2)) & 0xFFFF)
   49: #define SUM2HASH(sum) SUM2HASH2((sum)&0xFFFF,(sum)>>16)
   50: 
   51: #define BIG_SUM2HASH(sum) ((sum)%tablesize)
   52: 
   53: static void build_hash_table(struct sum_struct *s)
   54: {
   55: 	static uint32 alloc_size;
   56: 	int32 i;
   57: 
   58: 	/* Dynamically calculate the hash table size so that the hash load
   59: 	 * for big files is about 80%.  A number greater than the traditional
   60: 	 * size must be odd or s2 will not be able to span the entire set. */
   61: 	tablesize = (uint32)(s->count/8) * 10 + 11;
   62: 	if (tablesize < TRADITIONAL_TABLESIZE)
   63: 		tablesize = TRADITIONAL_TABLESIZE;
   64: 	if (tablesize > alloc_size || tablesize < alloc_size - 16*1024) {
   65: 		if (hash_table)
   66: 			free(hash_table);
   67: 		hash_table = new_array(int32, tablesize);
   68: 		alloc_size = tablesize;
   69: 	}
   70: 
   71: 	memset(hash_table, 0xFF, tablesize * sizeof hash_table[0]);
   72: 
   73: 	if (tablesize == TRADITIONAL_TABLESIZE) {
   74: 		for (i = 0; i < s->count; i++) {
   75: 			uint32 t = SUM2HASH(s->sums[i].sum1);
   76: 			s->sums[i].chain = hash_table[t];
   77: 			hash_table[t] = i;
   78: 		}
   79: 	} else {
   80: 		for (i = 0; i < s->count; i++) {
   81: 			uint32 t = BIG_SUM2HASH(s->sums[i].sum1);
   82: 			s->sums[i].chain = hash_table[t];
   83: 			hash_table[t] = i;
   84: 		}
   85: 	}
   86: }
   87: 
   88: 
   89: static OFF_T last_match;
   90: 
   91: 
   92: /* Transmit a literal and/or match token.
   93:  *
   94:  * This delightfully-named function is called either when we find a
   95:  * match and need to transmit all the unmatched data leading up to it,
   96:  * or when we get bored of accumulating literal data and just need to
   97:  * transmit it.  As a result of this second case, it is called even if
   98:  * we have not matched at all!
   99:  *
  100:  * If i >= 0, the number of a matched token.  If < 0, indicates we have
  101:  * only literal data.  A -1 will send a 0-token-int too, and a -2 sends
  102:  * only literal data, w/o any token-int. */
  103: static void matched(int f, struct sum_struct *s, struct map_struct *buf, OFF_T offset, int32 i)
  104: {
  105: 	int32 n = (int32)(offset - last_match); /* max value: block_size (int32) */
  106: 	int32 j;
  107: 
  108: 	if (DEBUG_GTE(DELTASUM, 2) && i >= 0) {
  109: 		rprintf(FINFO,
  110: 			"match at %s last_match=%s j=%d len=%ld n=%ld\n",
  111: 			big_num(offset), big_num(last_match), i,
  112: 			(long)s->sums[i].len, (long)n);
  113: 	}
  114: 
  115: 	send_token(f, i, buf, last_match, n, i < 0 ? 0 : s->sums[i].len);
  116: 	data_transfer += n;
  117: 
  118: 	if (i >= 0) {
  119: 		stats.matched_data += s->sums[i].len;
  120: 		n += s->sums[i].len;
  121: 	}
  122: 
  123: 	for (j = 0; j < n; j += CHUNK_SIZE) {
  124: 		int32 n1 = MIN(CHUNK_SIZE, n - j);
  125: 		sum_update(map_ptr(buf, last_match + j, n1), n1);
  126: 	}
  127: 
  128: 	if (i >= 0)
  129: 		last_match = offset + s->sums[i].len;
  130: 	else
  131: 		last_match = offset;
  132: 
  133: 	if (buf && INFO_GTE(PROGRESS, 1))
  134: 		show_progress(last_match, buf->file_size);
  135: }
  136: 
  137: 
  138: static void hash_search(int f,struct sum_struct *s,
  139: 			struct map_struct *buf, OFF_T len)
  140: {
  141: 	OFF_T offset, aligned_offset, end;
  142: 	int32 k, want_i, aligned_i, backup;
  143: 	char sum2[SUM_LENGTH];
  144: 	uint32 s1, s2, sum;
  145: 	int more;
  146: 	schar *map;
  147: 
  148: 	/* want_i is used to encourage adjacent matches, allowing the RLL
  149: 	 * coding of the output to work more efficiently. */
  150: 	want_i = 0;
  151: 
  152: 	if (DEBUG_GTE(DELTASUM, 2)) {
  153: 		rprintf(FINFO, "hash search b=%ld len=%s\n",
  154: 			(long)s->blength, big_num(len));
  155: 	}
  156: 
  157: 	k = (int32)MIN(len, (OFF_T)s->blength);
  158: 
  159: 	map = (schar *)map_ptr(buf, 0, k);
  160: 
  161: 	sum = get_checksum1((char *)map, k);
  162: 	s1 = sum & 0xFFFF;
  163: 	s2 = sum >> 16;
  164: 	if (DEBUG_GTE(DELTASUM, 3))
  165: 		rprintf(FINFO, "sum=%.8x k=%ld\n", sum, (long)k);
  166: 
  167: 	checksum2_enable_prefetch(buf, len, s->blength);
  168: 
  169: 	offset = aligned_offset = aligned_i = 0;
  170: 
  171: 	end = len + 1 - s->sums[s->count-1].len;
  172: 
  173: 	if (DEBUG_GTE(DELTASUM, 3)) {
  174: 		rprintf(FINFO, "hash search s->blength=%ld len=%s count=%s\n",
  175: 			(long)s->blength, big_num(len), big_num(s->count));
  176: 	}
  177: 
  178: 	do {
  179: 		int done_csum2 = 0;
  180: 		uint32 hash_entry;
  181: 		int32 i, *prev;
  182: 
  183: 		if (DEBUG_GTE(DELTASUM, 4)) {
  184: 			rprintf(FINFO, "offset=%s sum=%04x%04x\n",
  185: 				big_num(offset), s2 & 0xFFFF, s1 & 0xFFFF);
  186: 		}
  187: 
  188: 		if (tablesize == TRADITIONAL_TABLESIZE) {
  189: 			hash_entry = SUM2HASH2(s1,s2);
  190: 			if ((i = hash_table[hash_entry]) < 0)
  191: 				goto null_hash;
  192: 			sum = (s1 & 0xffff) | (s2 << 16);
  193: 		} else {
  194: 			sum = (s1 & 0xffff) | (s2 << 16);
  195: 			hash_entry = BIG_SUM2HASH(sum);
  196: 			if ((i = hash_table[hash_entry]) < 0)
  197: 				goto null_hash;
  198: 		}
  199: 		prev = &hash_table[hash_entry];
  200: 
  201: 		hash_hits++;
  202: 		do {
  203: 			int32 l;
  204: 
  205: 			/* When updating in-place, the chunk's offset must be
  206: 			 * either >= our offset or identical data at that offset.
  207: 			 * Remove any bypassed entries that we can never use. */
  208: 			if (updating_basis_file && s->sums[i].offset < offset
  209: 			 && !(s->sums[i].flags & SUMFLG_SAME_OFFSET)) {
  210: 				*prev = s->sums[i].chain;
  211: 				continue;
  212: 			}
  213: 			prev = &s->sums[i].chain;
  214: 
  215: 			if (sum != s->sums[i].sum1)
  216: 				continue;
  217: 
  218: 			/* also make sure the two blocks are the same length */
  219: 			l = (int32)MIN((OFF_T)s->blength, len-offset);
  220: 			if (l != s->sums[i].len)
  221: 				continue;
  222: 
  223: 			if (DEBUG_GTE(DELTASUM, 3)) {
  224: 				rprintf(FINFO,
  225: 					"potential match at %s i=%ld sum=%08x\n",
  226: 					big_num(offset), (long)i, sum);
  227: 			}
  228: 
  229: 			if (!done_csum2) {
  230: 				map = (schar *)map_ptr(buf,offset,l);
  231: 				get_checksum2((char *)map, l, sum2, offset);
  232: 				done_csum2 = 1;
  233: 			}
  234: 
  235: 			if (memcmp(sum2,s->sums[i].sum2,s->s2length) != 0) {
  236: 				false_alarms++;
  237: 				continue;
  238: 			}
  239: 
  240: 			/* When updating in-place, the best possible match is
  241: 			 * one with an identical offset, so we prefer that over
  242: 			 * the adjacent want_i optimization. */
  243: 			if (updating_basis_file) {
  244: 				/* All the generator's chunks start at blength boundaries. */
  245: 				while (aligned_offset < offset) {
  246: 					aligned_offset += s->blength;
  247: 					aligned_i++;
  248: 				}
  249: 				if ((offset == aligned_offset
  250: 				  || (sum == 0 && l == s->blength && aligned_offset + l <= len))
  251: 				 && aligned_i < s->count) {
  252: 					if (i != aligned_i) {
  253: 						if (sum != s->sums[aligned_i].sum1
  254: 						 || l != s->sums[aligned_i].len
  255: 						 || memcmp(sum2, s->sums[aligned_i].sum2, s->s2length) != 0)
  256: 							goto check_want_i;
  257: 						i = aligned_i;
  258: 					}
  259: 					if (offset != aligned_offset) {
  260: 						/* We've matched some zeros in a spot that is also zeros
  261: 						 * further along in the basis file, if we find zeros ahead
  262: 						 * in the sender's file, we'll output enough literal data
  263: 						 * to re-align with the basis file, and get back to seeking
  264: 						 * instead of writing. */
  265: 						backup = (int32)(aligned_offset - last_match);
  266: 						if (backup < 0)
  267: 							backup = 0;
  268: 						map = (schar *)map_ptr(buf, aligned_offset - backup, l + backup)
  269: 						    + backup;
  270: 						sum = get_checksum1((char *)map, l);
  271: 						if (sum != s->sums[i].sum1)
  272: 							goto check_want_i;
  273: 						get_checksum2((char *)map, l, sum2, aligned_offset);
  274: 						if (memcmp(sum2, s->sums[i].sum2, s->s2length) != 0)
  275: 							goto check_want_i;
  276: 						/* OK, we have a re-alignment match.  Bump the offset
  277: 						 * forward to the new match point. */
  278: 						offset = aligned_offset;
  279: 					}
  280: 					/* This identical chunk is in the same spot in the old and new file. */
  281: 					s->sums[i].flags |= SUMFLG_SAME_OFFSET;
  282: 					want_i = i;
  283: 				}
  284: 			}
  285: 
  286: 		  check_want_i:
  287: 			/* we've found a match, but now check to see
  288: 			 * if want_i can hint at a better match. */
  289: 			if (i != want_i && want_i < s->count
  290: 			 && (!updating_basis_file || s->sums[want_i].offset >= offset
  291: 			  || s->sums[want_i].flags & SUMFLG_SAME_OFFSET)
  292: 			 && sum == s->sums[want_i].sum1
  293: 			 && memcmp(sum2, s->sums[want_i].sum2, s->s2length) == 0) {
  294: 				/* we've found an adjacent match - the RLL coder
  295: 				 * will be happy */
  296: 				i = want_i;
  297: 			}
  298: 			want_i = i + 1;
  299: 
  300: 			matched(f,s,buf,offset,i);
  301: 			offset += s->sums[i].len - 1;
  302: 			k = (int32)MIN((OFF_T)s->blength, len-offset);
  303: 			map = (schar *)map_ptr(buf, offset, k);
  304: 			sum = get_checksum1((char *)map, k);
  305: 			s1 = sum & 0xFFFF;
  306: 			s2 = sum >> 16;
  307: 			matches++;
  308: 			break;
  309: 		} while ((i = s->sums[i].chain) >= 0);
  310: 
  311: 	  null_hash:
  312: 		backup = (int32)(offset - last_match);
  313: 		/* We sometimes read 1 byte prior to last_match... */
  314: 		if (backup < 0)
  315: 			backup = 0;
  316: 
  317: 		/* Trim off the first byte from the checksum */
  318: 		more = offset + k < len;
  319: 		map = (schar *)map_ptr(buf, offset - backup, k + more + backup) + backup;
  320: 		s1 -= map[0] + CHAR_OFFSET;
  321: 		s2 -= k * (map[0]+CHAR_OFFSET);
  322: 
  323: 		/* Add on the next byte (if there is one) to the checksum */
  324: 		if (more) {
  325: 			s1 += map[k] + CHAR_OFFSET;
  326: 			s2 += s1;
  327: 		} else
  328: 			--k;
  329: 
  330: 		/* By matching early we avoid re-reading the
  331: 		   data 3 times in the case where a token
  332: 		   match comes a long way after last
  333: 		   match. The 3 reads are caused by the
  334: 		   running match, the checksum update and the
  335: 		   literal send. */
  336: 		if (backup >= s->blength+CHUNK_SIZE && end-offset > CHUNK_SIZE)
  337: 			matched(f, s, buf, offset - s->blength, -2);
  338: 	} while (++offset < end);
  339: 
  340: 	checksum2_disable_prefetch();
  341: 
  342: 	matched(f, s, buf, len, -1);
  343: 	map_ptr(buf, len-1, 1);
  344: }
  345: 
  346: 
  347: /**
  348:  * Scan through a origin file, looking for sections that match
  349:  * checksums from the generator, and transmit either literal or token
  350:  * data.
  351:  *
  352:  * Also calculates the MD4 checksum of the whole file, using the md
  353:  * accumulator.  This is transmitted with the file as protection
  354:  * against corruption on the wire.
  355:  *
  356:  * @param s Checksums received from the generator.  If <tt>s->count ==
  357:  * 0</tt>, then there are actually no checksums for this file.
  358:  *
  359:  * @param len Length of the file to send.
  360:  **/
  361: void match_sums(int f, struct sum_struct *s, struct map_struct *buf, OFF_T len)
  362: {
  363: 	int sum_len;
  364: 
  365: 	last_match = 0;
  366: 	false_alarms = 0;
  367: 	hash_hits = 0;
  368: 	matches = 0;
  369: 	data_transfer = 0;
  370: 
  371: 	sum_init(xfersum_type, checksum_seed);
  372: 
  373: 	if (append_mode > 0) {
  374: 		if (append_mode == 2) {
  375: 			OFF_T j = 0;
  376: 			for (j = CHUNK_SIZE; j < s->flength; j += CHUNK_SIZE) {
  377: 				if (buf && INFO_GTE(PROGRESS, 1))
  378: 					show_progress(last_match, buf->file_size);
  379: 				sum_update(map_ptr(buf, last_match, CHUNK_SIZE),
  380: 					   CHUNK_SIZE);
  381: 				last_match = j;
  382: 			}
  383: 			if (last_match < s->flength) {
  384: 				int32 n = (int32)(s->flength - last_match);
  385: 				if (buf && INFO_GTE(PROGRESS, 1))
  386: 					show_progress(last_match, buf->file_size);
  387: 				sum_update(map_ptr(buf, last_match, n), n);
  388: 			}
  389: 		}
  390: 		last_match = s->flength;
  391: 		s->count = 0;
  392: 	}
  393: 
  394: 	if (len > 0 && s->count > 0) {
  395: 		build_hash_table(s);
  396: 
  397: 		if (DEBUG_GTE(DELTASUM, 2))
  398: 			rprintf(FINFO,"built hash table\n");
  399: 
  400: 		hash_search(f, s, buf, len);
  401: 
  402: 		if (DEBUG_GTE(DELTASUM, 2))
  403: 			rprintf(FINFO,"done hash search\n");
  404: 	} else {
  405: 		OFF_T j;
  406: 		/* by doing this in pieces we avoid too many seeks */
  407: 		for (j = last_match + CHUNK_SIZE; j < len; j += CHUNK_SIZE)
  408: 			matched(f, s, buf, j, -2);
  409: 		matched(f, s, buf, len, -1);
  410: 	}
  411: 
  412: 	sum_len = sum_end(sender_file_sum);
  413: 
  414: 	/* If we had a read error, send a bad checksum.  We use all bits
  415: 	 * off as long as the checksum doesn't happen to be that, in
  416: 	 * which case we turn the last 0 bit into a 1. */
  417: 	if (buf && buf->status != 0) {
  418: 		int i;
  419: 		for (i = 0; i < sum_len && sender_file_sum[i] == 0; i++) {}
  420: 		memset(sender_file_sum, 0, sum_len);
  421: 		if (i == sum_len)
  422: 			sender_file_sum[i-1]++;
  423: 	}
  424: 
  425: 	if (DEBUG_GTE(DELTASUM, 2))
  426: 		rprintf(FINFO,"sending file_sum\n");
  427: 	write_buf(f, sender_file_sum, sum_len);
  428: 
  429: 	if (DEBUG_GTE(DELTASUM, 2)) {
  430: 		rprintf(FINFO, "false_alarms=%d hash_hits=%d matches=%d\n",
  431: 			false_alarms, hash_hits, matches);
  432: 	}
  433: 
  434: 	total_hash_hits += hash_hits;
  435: 	total_false_alarms += false_alarms;
  436: 	total_matches += matches;
  437: 	stats.literal_data += data_transfer;
  438: }
  439: 
  440: void match_report(void)
  441: {
  442: 	if (!DEBUG_GTE(DELTASUM, 1))
  443: 		return;
  444: 
  445: 	rprintf(FINFO,
  446: 		"total: matches=%d  hash_hits=%d  false_alarms=%d data=%s\n",
  447: 		total_matches, total_hash_hits, total_false_alarms,
  448: 		big_num(stats.literal_data));
  449: }

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>